1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetLoweringObjectFile.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore NoAlias and NonNull because they don't affect the 61 // call sequence. 62 AttributeList CallerAttrs = F.getAttributes(); 63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) 64 .removeAttribute(Attribute::NoAlias) 65 .removeAttribute(Attribute::NonNull) 66 .hasAttributes()) 67 return false; 68 69 // It's not safe to eliminate the sign / zero extension of the return value. 70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || 71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 72 return false; 73 74 // Check if the only use is a function return node. 75 return isUsedByReturnOnly(Node, Chain); 76 } 77 78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 79 const uint32_t *CallerPreservedMask, 80 const SmallVectorImpl<CCValAssign> &ArgLocs, 81 const SmallVectorImpl<SDValue> &OutVals) const { 82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 83 const CCValAssign &ArgLoc = ArgLocs[I]; 84 if (!ArgLoc.isRegLoc()) 85 continue; 86 MCRegister Reg = ArgLoc.getLocReg(); 87 // Only look at callee saved registers. 88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 89 continue; 90 // Check that we pass the value used for the caller. 91 // (We look for a CopyFromReg reading a virtual register that is used 92 // for the function live-in value of register Reg) 93 SDValue Value = OutVals[I]; 94 if (Value->getOpcode() != ISD::CopyFromReg) 95 return false; 96 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 97 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 98 return false; 99 } 100 return true; 101 } 102 103 /// Set CallLoweringInfo attribute flags based on a call instruction 104 /// and called function attributes. 105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 106 unsigned ArgIdx) { 107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 113 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 114 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 115 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 116 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 117 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 118 Alignment = Call->getParamAlign(ArgIdx); 119 ByValType = nullptr; 120 if (IsByVal) 121 ByValType = Call->getParamByValType(ArgIdx); 122 PreallocatedType = nullptr; 123 if (IsPreallocated) 124 PreallocatedType = Call->getParamPreallocatedType(ArgIdx); 125 } 126 127 /// Generate a libcall taking the given operands as arguments and returning a 128 /// result of type RetVT. 129 std::pair<SDValue, SDValue> 130 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 131 ArrayRef<SDValue> Ops, 132 MakeLibCallOptions CallOptions, 133 const SDLoc &dl, 134 SDValue InChain) const { 135 if (!InChain) 136 InChain = DAG.getEntryNode(); 137 138 TargetLowering::ArgListTy Args; 139 Args.reserve(Ops.size()); 140 141 TargetLowering::ArgListEntry Entry; 142 for (unsigned i = 0; i < Ops.size(); ++i) { 143 SDValue NewOp = Ops[i]; 144 Entry.Node = NewOp; 145 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 146 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 147 CallOptions.IsSExt); 148 Entry.IsZExt = !Entry.IsSExt; 149 150 if (CallOptions.IsSoften && 151 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 152 Entry.IsSExt = Entry.IsZExt = false; 153 } 154 Args.push_back(Entry); 155 } 156 157 if (LC == RTLIB::UNKNOWN_LIBCALL) 158 report_fatal_error("Unsupported library call operation!"); 159 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 160 getPointerTy(DAG.getDataLayout())); 161 162 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 163 TargetLowering::CallLoweringInfo CLI(DAG); 164 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 165 bool zeroExtend = !signExtend; 166 167 if (CallOptions.IsSoften && 168 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 169 signExtend = zeroExtend = false; 170 } 171 172 CLI.setDebugLoc(dl) 173 .setChain(InChain) 174 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 175 .setNoReturn(CallOptions.DoesNotReturn) 176 .setDiscardResult(!CallOptions.IsReturnValueUsed) 177 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 178 .setSExtResult(signExtend) 179 .setZExtResult(zeroExtend); 180 return LowerCallTo(CLI); 181 } 182 183 bool TargetLowering::findOptimalMemOpLowering( 184 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 185 unsigned SrcAS, const AttributeList &FuncAttributes) const { 186 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 187 return false; 188 189 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 190 191 if (VT == MVT::Other) { 192 // Use the largest integer type whose alignment constraints are satisfied. 193 // We only need to check DstAlign here as SrcAlign is always greater or 194 // equal to DstAlign (or zero). 195 VT = MVT::i64; 196 if (Op.isFixedDstAlign()) 197 while ( 198 Op.getDstAlign() < (VT.getSizeInBits() / 8) && 199 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) 200 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 201 assert(VT.isInteger()); 202 203 // Find the largest legal integer type. 204 MVT LVT = MVT::i64; 205 while (!isTypeLegal(LVT)) 206 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 207 assert(LVT.isInteger()); 208 209 // If the type we've chosen is larger than the largest legal integer type 210 // then use that instead. 211 if (VT.bitsGT(LVT)) 212 VT = LVT; 213 } 214 215 unsigned NumMemOps = 0; 216 uint64_t Size = Op.size(); 217 while (Size) { 218 unsigned VTSize = VT.getSizeInBits() / 8; 219 while (VTSize > Size) { 220 // For now, only use non-vector load / store's for the left-over pieces. 221 EVT NewVT = VT; 222 unsigned NewVTSize; 223 224 bool Found = false; 225 if (VT.isVector() || VT.isFloatingPoint()) { 226 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 227 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 228 isSafeMemOpType(NewVT.getSimpleVT())) 229 Found = true; 230 else if (NewVT == MVT::i64 && 231 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 232 isSafeMemOpType(MVT::f64)) { 233 // i64 is usually not legal on 32-bit targets, but f64 may be. 234 NewVT = MVT::f64; 235 Found = true; 236 } 237 } 238 239 if (!Found) { 240 do { 241 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 242 if (NewVT == MVT::i8) 243 break; 244 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 245 } 246 NewVTSize = NewVT.getSizeInBits() / 8; 247 248 // If the new VT cannot cover all of the remaining bits, then consider 249 // issuing a (or a pair of) unaligned and overlapping load / store. 250 bool Fast; 251 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 252 allowsMisalignedMemoryAccesses( 253 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1, 254 MachineMemOperand::MONone, &Fast) && 255 Fast) 256 VTSize = Size; 257 else { 258 VT = NewVT; 259 VTSize = NewVTSize; 260 } 261 } 262 263 if (++NumMemOps > Limit) 264 return false; 265 266 MemOps.push_back(VT); 267 Size -= VTSize; 268 } 269 270 return true; 271 } 272 273 /// Soften the operands of a comparison. This code is shared among BR_CC, 274 /// SELECT_CC, and SETCC handlers. 275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 276 SDValue &NewLHS, SDValue &NewRHS, 277 ISD::CondCode &CCCode, 278 const SDLoc &dl, const SDValue OldLHS, 279 const SDValue OldRHS) const { 280 SDValue Chain; 281 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 282 OldRHS, Chain); 283 } 284 285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 286 SDValue &NewLHS, SDValue &NewRHS, 287 ISD::CondCode &CCCode, 288 const SDLoc &dl, const SDValue OldLHS, 289 const SDValue OldRHS, 290 SDValue &Chain, 291 bool IsSignaling) const { 292 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 293 // not supporting it. We can update this code when libgcc provides such 294 // functions. 295 296 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 297 && "Unsupported setcc type!"); 298 299 // Expand into one or more soft-fp libcall(s). 300 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 301 bool ShouldInvertCC = false; 302 switch (CCCode) { 303 case ISD::SETEQ: 304 case ISD::SETOEQ: 305 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 306 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 307 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 308 break; 309 case ISD::SETNE: 310 case ISD::SETUNE: 311 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 312 (VT == MVT::f64) ? RTLIB::UNE_F64 : 313 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 314 break; 315 case ISD::SETGE: 316 case ISD::SETOGE: 317 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 318 (VT == MVT::f64) ? RTLIB::OGE_F64 : 319 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 320 break; 321 case ISD::SETLT: 322 case ISD::SETOLT: 323 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 324 (VT == MVT::f64) ? RTLIB::OLT_F64 : 325 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 326 break; 327 case ISD::SETLE: 328 case ISD::SETOLE: 329 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 330 (VT == MVT::f64) ? RTLIB::OLE_F64 : 331 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 332 break; 333 case ISD::SETGT: 334 case ISD::SETOGT: 335 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 336 (VT == MVT::f64) ? RTLIB::OGT_F64 : 337 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 338 break; 339 case ISD::SETO: 340 ShouldInvertCC = true; 341 LLVM_FALLTHROUGH; 342 case ISD::SETUO: 343 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 344 (VT == MVT::f64) ? RTLIB::UO_F64 : 345 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 346 break; 347 case ISD::SETONE: 348 // SETONE = O && UNE 349 ShouldInvertCC = true; 350 LLVM_FALLTHROUGH; 351 case ISD::SETUEQ: 352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 353 (VT == MVT::f64) ? RTLIB::UO_F64 : 354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 355 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 356 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 357 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 358 break; 359 default: 360 // Invert CC for unordered comparisons 361 ShouldInvertCC = true; 362 switch (CCCode) { 363 case ISD::SETULT: 364 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 365 (VT == MVT::f64) ? RTLIB::OGE_F64 : 366 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 367 break; 368 case ISD::SETULE: 369 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 370 (VT == MVT::f64) ? RTLIB::OGT_F64 : 371 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 372 break; 373 case ISD::SETUGT: 374 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 375 (VT == MVT::f64) ? RTLIB::OLE_F64 : 376 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 377 break; 378 case ISD::SETUGE: 379 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 380 (VT == MVT::f64) ? RTLIB::OLT_F64 : 381 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 382 break; 383 default: llvm_unreachable("Do not know how to soften this setcc!"); 384 } 385 } 386 387 // Use the target specific return value for comparions lib calls. 388 EVT RetVT = getCmpLibcallReturnType(); 389 SDValue Ops[2] = {NewLHS, NewRHS}; 390 TargetLowering::MakeLibCallOptions CallOptions; 391 EVT OpsVT[2] = { OldLHS.getValueType(), 392 OldRHS.getValueType() }; 393 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 394 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 395 NewLHS = Call.first; 396 NewRHS = DAG.getConstant(0, dl, RetVT); 397 398 CCCode = getCmpLibcallCC(LC1); 399 if (ShouldInvertCC) { 400 assert(RetVT.isInteger()); 401 CCCode = getSetCCInverse(CCCode, RetVT); 402 } 403 404 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 405 // Update Chain. 406 Chain = Call.second; 407 } else { 408 EVT SetCCVT = 409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 410 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 411 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 412 CCCode = getCmpLibcallCC(LC2); 413 if (ShouldInvertCC) 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 416 if (Chain) 417 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 418 Call2.second); 419 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 420 Tmp.getValueType(), Tmp, NewLHS); 421 NewRHS = SDValue(); 422 } 423 } 424 425 /// Return the entry encoding for a jump table in the current function. The 426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 427 unsigned TargetLowering::getJumpTableEncoding() const { 428 // In non-pic modes, just use the address of a block. 429 if (!isPositionIndependent()) 430 return MachineJumpTableInfo::EK_BlockAddress; 431 432 // In PIC mode, if the target supports a GPRel32 directive, use it. 433 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 434 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 435 436 // Otherwise, use a label difference. 437 return MachineJumpTableInfo::EK_LabelDifference32; 438 } 439 440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 441 SelectionDAG &DAG) const { 442 // If our PIC model is GP relative, use the global offset table as the base. 443 unsigned JTEncoding = getJumpTableEncoding(); 444 445 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 446 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 447 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 448 449 return Table; 450 } 451 452 /// This returns the relocation base for the given PIC jumptable, the same as 453 /// getPICJumpTableRelocBase, but as an MCExpr. 454 const MCExpr * 455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 456 unsigned JTI,MCContext &Ctx) const{ 457 // The normal PIC reloc base is the label at the start of the jump table. 458 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 459 } 460 461 bool 462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 463 const TargetMachine &TM = getTargetMachine(); 464 const GlobalValue *GV = GA->getGlobal(); 465 466 // If the address is not even local to this DSO we will have to load it from 467 // a got and then add the offset. 468 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 469 return false; 470 471 // If the code is position independent we will have to add a base register. 472 if (isPositionIndependent()) 473 return false; 474 475 // Otherwise we can do it. 476 return true; 477 } 478 479 //===----------------------------------------------------------------------===// 480 // Optimization Methods 481 //===----------------------------------------------------------------------===// 482 483 /// If the specified instruction has a constant integer operand and there are 484 /// bits set in that constant that are not demanded, then clear those bits and 485 /// return true. 486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 487 const APInt &DemandedBits, 488 const APInt &DemandedElts, 489 TargetLoweringOpt &TLO) const { 490 SDLoc DL(Op); 491 unsigned Opcode = Op.getOpcode(); 492 493 // Do target-specific constant optimization. 494 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 495 return TLO.New.getNode(); 496 497 // FIXME: ISD::SELECT, ISD::SELECT_CC 498 switch (Opcode) { 499 default: 500 break; 501 case ISD::XOR: 502 case ISD::AND: 503 case ISD::OR: { 504 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 505 if (!Op1C) 506 return false; 507 508 // If this is a 'not' op, don't touch it because that's a canonical form. 509 const APInt &C = Op1C->getAPIntValue(); 510 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 511 return false; 512 513 if (!C.isSubsetOf(DemandedBits)) { 514 EVT VT = Op.getValueType(); 515 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 516 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 517 return TLO.CombineTo(Op, NewOp); 518 } 519 520 break; 521 } 522 } 523 524 return false; 525 } 526 527 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 528 const APInt &DemandedBits, 529 TargetLoweringOpt &TLO) const { 530 EVT VT = Op.getValueType(); 531 APInt DemandedElts = VT.isVector() 532 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 533 : APInt(1, 1); 534 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 535 } 536 537 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 538 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 539 /// generalized for targets with other types of implicit widening casts. 540 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 541 const APInt &Demanded, 542 TargetLoweringOpt &TLO) const { 543 assert(Op.getNumOperands() == 2 && 544 "ShrinkDemandedOp only supports binary operators!"); 545 assert(Op.getNode()->getNumValues() == 1 && 546 "ShrinkDemandedOp only supports nodes with one result!"); 547 548 SelectionDAG &DAG = TLO.DAG; 549 SDLoc dl(Op); 550 551 // Early return, as this function cannot handle vector types. 552 if (Op.getValueType().isVector()) 553 return false; 554 555 // Don't do this if the node has another user, which may require the 556 // full value. 557 if (!Op.getNode()->hasOneUse()) 558 return false; 559 560 // Search for the smallest integer type with free casts to and from 561 // Op's type. For expedience, just check power-of-2 integer types. 562 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 563 unsigned DemandedSize = Demanded.getActiveBits(); 564 unsigned SmallVTBits = DemandedSize; 565 if (!isPowerOf2_32(SmallVTBits)) 566 SmallVTBits = NextPowerOf2(SmallVTBits); 567 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 568 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 569 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 570 TLI.isZExtFree(SmallVT, Op.getValueType())) { 571 // We found a type with free casts. 572 SDValue X = DAG.getNode( 573 Op.getOpcode(), dl, SmallVT, 574 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 575 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 576 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 577 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 578 return TLO.CombineTo(Op, Z); 579 } 580 } 581 return false; 582 } 583 584 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 585 DAGCombinerInfo &DCI) const { 586 SelectionDAG &DAG = DCI.DAG; 587 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 588 !DCI.isBeforeLegalizeOps()); 589 KnownBits Known; 590 591 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 592 if (Simplified) { 593 DCI.AddToWorklist(Op.getNode()); 594 DCI.CommitTargetLoweringOpt(TLO); 595 } 596 return Simplified; 597 } 598 599 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 600 KnownBits &Known, 601 TargetLoweringOpt &TLO, 602 unsigned Depth, 603 bool AssumeSingleUse) const { 604 EVT VT = Op.getValueType(); 605 606 // TODO: We can probably do more work on calculating the known bits and 607 // simplifying the operations for scalable vectors, but for now we just 608 // bail out. 609 if (VT.isScalableVector()) { 610 // Pretend we don't know anything for now. 611 Known = KnownBits(DemandedBits.getBitWidth()); 612 return false; 613 } 614 615 APInt DemandedElts = VT.isVector() 616 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 617 : APInt(1, 1); 618 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 619 AssumeSingleUse); 620 } 621 622 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 623 // TODO: Under what circumstances can we create nodes? Constant folding? 624 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 625 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 626 SelectionDAG &DAG, unsigned Depth) const { 627 // Limit search depth. 628 if (Depth >= SelectionDAG::MaxRecursionDepth) 629 return SDValue(); 630 631 // Ignore UNDEFs. 632 if (Op.isUndef()) 633 return SDValue(); 634 635 // Not demanding any bits/elts from Op. 636 if (DemandedBits == 0 || DemandedElts == 0) 637 return DAG.getUNDEF(Op.getValueType()); 638 639 unsigned NumElts = DemandedElts.getBitWidth(); 640 unsigned BitWidth = DemandedBits.getBitWidth(); 641 KnownBits LHSKnown, RHSKnown; 642 switch (Op.getOpcode()) { 643 case ISD::BITCAST: { 644 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 645 EVT SrcVT = Src.getValueType(); 646 EVT DstVT = Op.getValueType(); 647 if (SrcVT == DstVT) 648 return Src; 649 650 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 651 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 652 if (NumSrcEltBits == NumDstEltBits) 653 if (SDValue V = SimplifyMultipleUseDemandedBits( 654 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 655 return DAG.getBitcast(DstVT, V); 656 657 // TODO - bigendian once we have test coverage. 658 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 && 659 DAG.getDataLayout().isLittleEndian()) { 660 unsigned Scale = NumDstEltBits / NumSrcEltBits; 661 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 662 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 663 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 664 for (unsigned i = 0; i != Scale; ++i) { 665 unsigned Offset = i * NumSrcEltBits; 666 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 667 if (!Sub.isNullValue()) { 668 DemandedSrcBits |= Sub; 669 for (unsigned j = 0; j != NumElts; ++j) 670 if (DemandedElts[j]) 671 DemandedSrcElts.setBit((j * Scale) + i); 672 } 673 } 674 675 if (SDValue V = SimplifyMultipleUseDemandedBits( 676 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 677 return DAG.getBitcast(DstVT, V); 678 } 679 680 // TODO - bigendian once we have test coverage. 681 if ((NumSrcEltBits % NumDstEltBits) == 0 && 682 DAG.getDataLayout().isLittleEndian()) { 683 unsigned Scale = NumSrcEltBits / NumDstEltBits; 684 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 685 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 686 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 687 for (unsigned i = 0; i != NumElts; ++i) 688 if (DemandedElts[i]) { 689 unsigned Offset = (i % Scale) * NumDstEltBits; 690 DemandedSrcBits.insertBits(DemandedBits, Offset); 691 DemandedSrcElts.setBit(i / Scale); 692 } 693 694 if (SDValue V = SimplifyMultipleUseDemandedBits( 695 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 696 return DAG.getBitcast(DstVT, V); 697 } 698 699 break; 700 } 701 case ISD::AND: { 702 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 703 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 704 705 // If all of the demanded bits are known 1 on one side, return the other. 706 // These bits cannot contribute to the result of the 'and' in this 707 // context. 708 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 709 return Op.getOperand(0); 710 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 711 return Op.getOperand(1); 712 break; 713 } 714 case ISD::OR: { 715 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 716 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 717 718 // If all of the demanded bits are known zero on one side, return the 719 // other. These bits cannot contribute to the result of the 'or' in this 720 // context. 721 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 722 return Op.getOperand(0); 723 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 724 return Op.getOperand(1); 725 break; 726 } 727 case ISD::XOR: { 728 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 729 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 730 731 // If all of the demanded bits are known zero on one side, return the 732 // other. 733 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 734 return Op.getOperand(0); 735 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 736 return Op.getOperand(1); 737 break; 738 } 739 case ISD::SHL: { 740 // If we are only demanding sign bits then we can use the shift source 741 // directly. 742 if (const APInt *MaxSA = 743 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 744 SDValue Op0 = Op.getOperand(0); 745 unsigned ShAmt = MaxSA->getZExtValue(); 746 unsigned NumSignBits = 747 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 748 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 749 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 750 return Op0; 751 } 752 break; 753 } 754 case ISD::SETCC: { 755 SDValue Op0 = Op.getOperand(0); 756 SDValue Op1 = Op.getOperand(1); 757 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 758 // If (1) we only need the sign-bit, (2) the setcc operands are the same 759 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 760 // -1, we may be able to bypass the setcc. 761 if (DemandedBits.isSignMask() && 762 Op0.getScalarValueSizeInBits() == BitWidth && 763 getBooleanContents(Op0.getValueType()) == 764 BooleanContent::ZeroOrNegativeOneBooleanContent) { 765 // If we're testing X < 0, then this compare isn't needed - just use X! 766 // FIXME: We're limiting to integer types here, but this should also work 767 // if we don't care about FP signed-zero. The use of SETLT with FP means 768 // that we don't care about NaNs. 769 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 770 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 771 return Op0; 772 } 773 break; 774 } 775 case ISD::SIGN_EXTEND_INREG: { 776 // If none of the extended bits are demanded, eliminate the sextinreg. 777 SDValue Op0 = Op.getOperand(0); 778 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 779 unsigned ExBits = ExVT.getScalarSizeInBits(); 780 if (DemandedBits.getActiveBits() <= ExBits) 781 return Op0; 782 // If the input is already sign extended, just drop the extension. 783 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 784 if (NumSignBits >= (BitWidth - ExBits + 1)) 785 return Op0; 786 break; 787 } 788 case ISD::ANY_EXTEND_VECTOR_INREG: 789 case ISD::SIGN_EXTEND_VECTOR_INREG: 790 case ISD::ZERO_EXTEND_VECTOR_INREG: { 791 // If we only want the lowest element and none of extended bits, then we can 792 // return the bitcasted source vector. 793 SDValue Src = Op.getOperand(0); 794 EVT SrcVT = Src.getValueType(); 795 EVT DstVT = Op.getValueType(); 796 if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 797 DAG.getDataLayout().isLittleEndian() && 798 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 799 return DAG.getBitcast(DstVT, Src); 800 } 801 break; 802 } 803 case ISD::INSERT_VECTOR_ELT: { 804 // If we don't demand the inserted element, return the base vector. 805 SDValue Vec = Op.getOperand(0); 806 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 807 EVT VecVT = Vec.getValueType(); 808 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 809 !DemandedElts[CIdx->getZExtValue()]) 810 return Vec; 811 break; 812 } 813 case ISD::INSERT_SUBVECTOR: { 814 // If we don't demand the inserted subvector, return the base vector. 815 SDValue Vec = Op.getOperand(0); 816 SDValue Sub = Op.getOperand(1); 817 uint64_t Idx = Op.getConstantOperandVal(2); 818 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 819 if (DemandedElts.extractBits(NumSubElts, Idx) == 0) 820 return Vec; 821 break; 822 } 823 case ISD::VECTOR_SHUFFLE: { 824 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 825 826 // If all the demanded elts are from one operand and are inline, 827 // then we can use the operand directly. 828 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 829 for (unsigned i = 0; i != NumElts; ++i) { 830 int M = ShuffleMask[i]; 831 if (M < 0 || !DemandedElts[i]) 832 continue; 833 AllUndef = false; 834 IdentityLHS &= (M == (int)i); 835 IdentityRHS &= ((M - NumElts) == i); 836 } 837 838 if (AllUndef) 839 return DAG.getUNDEF(Op.getValueType()); 840 if (IdentityLHS) 841 return Op.getOperand(0); 842 if (IdentityRHS) 843 return Op.getOperand(1); 844 break; 845 } 846 default: 847 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 848 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 849 Op, DemandedBits, DemandedElts, DAG, Depth)) 850 return V; 851 break; 852 } 853 return SDValue(); 854 } 855 856 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 857 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 858 unsigned Depth) const { 859 EVT VT = Op.getValueType(); 860 APInt DemandedElts = VT.isVector() 861 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 862 : APInt(1, 1); 863 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 864 Depth); 865 } 866 867 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 868 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 869 unsigned Depth) const { 870 APInt DemandedBits = APInt::getAllOnesValue(Op.getScalarValueSizeInBits()); 871 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 872 Depth); 873 } 874 875 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 876 /// result of Op are ever used downstream. If we can use this information to 877 /// simplify Op, create a new simplified DAG node and return true, returning the 878 /// original and new nodes in Old and New. Otherwise, analyze the expression and 879 /// return a mask of Known bits for the expression (used to simplify the 880 /// caller). The Known bits may only be accurate for those bits in the 881 /// OriginalDemandedBits and OriginalDemandedElts. 882 bool TargetLowering::SimplifyDemandedBits( 883 SDValue Op, const APInt &OriginalDemandedBits, 884 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 885 unsigned Depth, bool AssumeSingleUse) const { 886 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 887 assert(Op.getScalarValueSizeInBits() == BitWidth && 888 "Mask size mismatches value type size!"); 889 890 // Don't know anything. 891 Known = KnownBits(BitWidth); 892 893 // TODO: We can probably do more work on calculating the known bits and 894 // simplifying the operations for scalable vectors, but for now we just 895 // bail out. 896 if (Op.getValueType().isScalableVector()) 897 return false; 898 899 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 900 assert((!Op.getValueType().isVector() || 901 NumElts == Op.getValueType().getVectorNumElements()) && 902 "Unexpected vector size"); 903 904 APInt DemandedBits = OriginalDemandedBits; 905 APInt DemandedElts = OriginalDemandedElts; 906 SDLoc dl(Op); 907 auto &DL = TLO.DAG.getDataLayout(); 908 909 // Undef operand. 910 if (Op.isUndef()) 911 return false; 912 913 if (Op.getOpcode() == ISD::Constant) { 914 // We know all of the bits for a constant! 915 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 916 Known.Zero = ~Known.One; 917 return false; 918 } 919 920 if (Op.getOpcode() == ISD::ConstantFP) { 921 // We know all of the bits for a floating point constant! 922 Known.One = cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt(); 923 Known.Zero = ~Known.One; 924 return false; 925 } 926 927 // Other users may use these bits. 928 EVT VT = Op.getValueType(); 929 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 930 if (Depth != 0) { 931 // If not at the root, Just compute the Known bits to 932 // simplify things downstream. 933 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 934 return false; 935 } 936 // If this is the root being simplified, allow it to have multiple uses, 937 // just set the DemandedBits/Elts to all bits. 938 DemandedBits = APInt::getAllOnesValue(BitWidth); 939 DemandedElts = APInt::getAllOnesValue(NumElts); 940 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 941 // Not demanding any bits/elts from Op. 942 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 943 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 944 // Limit search depth. 945 return false; 946 } 947 948 KnownBits Known2; 949 switch (Op.getOpcode()) { 950 case ISD::TargetConstant: 951 llvm_unreachable("Can't simplify this node"); 952 case ISD::SCALAR_TO_VECTOR: { 953 if (!DemandedElts[0]) 954 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 955 956 KnownBits SrcKnown; 957 SDValue Src = Op.getOperand(0); 958 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 959 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 960 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 961 return true; 962 963 // Upper elements are undef, so only get the knownbits if we just demand 964 // the bottom element. 965 if (DemandedElts == 1) 966 Known = SrcKnown.anyextOrTrunc(BitWidth); 967 break; 968 } 969 case ISD::BUILD_VECTOR: 970 // Collect the known bits that are shared by every demanded element. 971 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 972 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 973 return false; // Don't fall through, will infinitely loop. 974 case ISD::LOAD: { 975 LoadSDNode *LD = cast<LoadSDNode>(Op); 976 if (getTargetConstantFromLoad(LD)) { 977 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 978 return false; // Don't fall through, will infinitely loop. 979 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 980 // If this is a ZEXTLoad and we are looking at the loaded value. 981 EVT MemVT = LD->getMemoryVT(); 982 unsigned MemBits = MemVT.getScalarSizeInBits(); 983 Known.Zero.setBitsFrom(MemBits); 984 return false; // Don't fall through, will infinitely loop. 985 } 986 break; 987 } 988 case ISD::INSERT_VECTOR_ELT: { 989 SDValue Vec = Op.getOperand(0); 990 SDValue Scl = Op.getOperand(1); 991 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 992 EVT VecVT = Vec.getValueType(); 993 994 // If index isn't constant, assume we need all vector elements AND the 995 // inserted element. 996 APInt DemandedVecElts(DemandedElts); 997 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 998 unsigned Idx = CIdx->getZExtValue(); 999 DemandedVecElts.clearBit(Idx); 1000 1001 // Inserted element is not required. 1002 if (!DemandedElts[Idx]) 1003 return TLO.CombineTo(Op, Vec); 1004 } 1005 1006 KnownBits KnownScl; 1007 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1008 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1009 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1010 return true; 1011 1012 Known = KnownScl.anyextOrTrunc(BitWidth); 1013 1014 KnownBits KnownVec; 1015 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1016 Depth + 1)) 1017 return true; 1018 1019 if (!!DemandedVecElts) { 1020 Known.One &= KnownVec.One; 1021 Known.Zero &= KnownVec.Zero; 1022 } 1023 1024 return false; 1025 } 1026 case ISD::INSERT_SUBVECTOR: { 1027 // Demand any elements from the subvector and the remainder from the src its 1028 // inserted into. 1029 SDValue Src = Op.getOperand(0); 1030 SDValue Sub = Op.getOperand(1); 1031 uint64_t Idx = Op.getConstantOperandVal(2); 1032 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1033 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1034 APInt DemandedSrcElts = DemandedElts; 1035 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 1036 1037 KnownBits KnownSub, KnownSrc; 1038 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1039 Depth + 1)) 1040 return true; 1041 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1042 Depth + 1)) 1043 return true; 1044 1045 Known.Zero.setAllBits(); 1046 Known.One.setAllBits(); 1047 if (!!DemandedSubElts) { 1048 Known.One &= KnownSub.One; 1049 Known.Zero &= KnownSub.Zero; 1050 } 1051 if (!!DemandedSrcElts) { 1052 Known.One &= KnownSrc.One; 1053 Known.Zero &= KnownSrc.Zero; 1054 } 1055 1056 // Attempt to avoid multi-use src if we don't need anything from it. 1057 if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() || 1058 !DemandedSrcElts.isAllOnesValue()) { 1059 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1060 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1061 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1062 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1063 if (NewSub || NewSrc) { 1064 NewSub = NewSub ? NewSub : Sub; 1065 NewSrc = NewSrc ? NewSrc : Src; 1066 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1067 Op.getOperand(2)); 1068 return TLO.CombineTo(Op, NewOp); 1069 } 1070 } 1071 break; 1072 } 1073 case ISD::EXTRACT_SUBVECTOR: { 1074 // Offset the demanded elts by the subvector index. 1075 SDValue Src = Op.getOperand(0); 1076 if (Src.getValueType().isScalableVector()) 1077 break; 1078 uint64_t Idx = Op.getConstantOperandVal(1); 1079 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1080 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1081 1082 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1083 Depth + 1)) 1084 return true; 1085 1086 // Attempt to avoid multi-use src if we don't need anything from it. 1087 if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) { 1088 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1089 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1090 if (DemandedSrc) { 1091 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1092 Op.getOperand(1)); 1093 return TLO.CombineTo(Op, NewOp); 1094 } 1095 } 1096 break; 1097 } 1098 case ISD::CONCAT_VECTORS: { 1099 Known.Zero.setAllBits(); 1100 Known.One.setAllBits(); 1101 EVT SubVT = Op.getOperand(0).getValueType(); 1102 unsigned NumSubVecs = Op.getNumOperands(); 1103 unsigned NumSubElts = SubVT.getVectorNumElements(); 1104 for (unsigned i = 0; i != NumSubVecs; ++i) { 1105 APInt DemandedSubElts = 1106 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1107 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1108 Known2, TLO, Depth + 1)) 1109 return true; 1110 // Known bits are shared by every demanded subvector element. 1111 if (!!DemandedSubElts) { 1112 Known.One &= Known2.One; 1113 Known.Zero &= Known2.Zero; 1114 } 1115 } 1116 break; 1117 } 1118 case ISD::VECTOR_SHUFFLE: { 1119 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1120 1121 // Collect demanded elements from shuffle operands.. 1122 APInt DemandedLHS(NumElts, 0); 1123 APInt DemandedRHS(NumElts, 0); 1124 for (unsigned i = 0; i != NumElts; ++i) { 1125 if (!DemandedElts[i]) 1126 continue; 1127 int M = ShuffleMask[i]; 1128 if (M < 0) { 1129 // For UNDEF elements, we don't know anything about the common state of 1130 // the shuffle result. 1131 DemandedLHS.clearAllBits(); 1132 DemandedRHS.clearAllBits(); 1133 break; 1134 } 1135 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1136 if (M < (int)NumElts) 1137 DemandedLHS.setBit(M); 1138 else 1139 DemandedRHS.setBit(M - NumElts); 1140 } 1141 1142 if (!!DemandedLHS || !!DemandedRHS) { 1143 SDValue Op0 = Op.getOperand(0); 1144 SDValue Op1 = Op.getOperand(1); 1145 1146 Known.Zero.setAllBits(); 1147 Known.One.setAllBits(); 1148 if (!!DemandedLHS) { 1149 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1150 Depth + 1)) 1151 return true; 1152 Known.One &= Known2.One; 1153 Known.Zero &= Known2.Zero; 1154 } 1155 if (!!DemandedRHS) { 1156 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1157 Depth + 1)) 1158 return true; 1159 Known.One &= Known2.One; 1160 Known.Zero &= Known2.Zero; 1161 } 1162 1163 // Attempt to avoid multi-use ops if we don't need anything from them. 1164 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1165 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1166 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1167 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1168 if (DemandedOp0 || DemandedOp1) { 1169 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1170 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1171 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1172 return TLO.CombineTo(Op, NewOp); 1173 } 1174 } 1175 break; 1176 } 1177 case ISD::AND: { 1178 SDValue Op0 = Op.getOperand(0); 1179 SDValue Op1 = Op.getOperand(1); 1180 1181 // If the RHS is a constant, check to see if the LHS would be zero without 1182 // using the bits from the RHS. Below, we use knowledge about the RHS to 1183 // simplify the LHS, here we're using information from the LHS to simplify 1184 // the RHS. 1185 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1186 // Do not increment Depth here; that can cause an infinite loop. 1187 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1188 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1189 if ((LHSKnown.Zero & DemandedBits) == 1190 (~RHSC->getAPIntValue() & DemandedBits)) 1191 return TLO.CombineTo(Op, Op0); 1192 1193 // If any of the set bits in the RHS are known zero on the LHS, shrink 1194 // the constant. 1195 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1196 DemandedElts, TLO)) 1197 return true; 1198 1199 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1200 // constant, but if this 'and' is only clearing bits that were just set by 1201 // the xor, then this 'and' can be eliminated by shrinking the mask of 1202 // the xor. For example, for a 32-bit X: 1203 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1204 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1205 LHSKnown.One == ~RHSC->getAPIntValue()) { 1206 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1207 return TLO.CombineTo(Op, Xor); 1208 } 1209 } 1210 1211 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1212 Depth + 1)) 1213 return true; 1214 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1215 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1216 Known2, TLO, Depth + 1)) 1217 return true; 1218 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1219 1220 // Attempt to avoid multi-use ops if we don't need anything from them. 1221 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1222 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1223 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1224 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1225 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1226 if (DemandedOp0 || DemandedOp1) { 1227 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1228 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1229 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1230 return TLO.CombineTo(Op, NewOp); 1231 } 1232 } 1233 1234 // If all of the demanded bits are known one on one side, return the other. 1235 // These bits cannot contribute to the result of the 'and'. 1236 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1237 return TLO.CombineTo(Op, Op0); 1238 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1239 return TLO.CombineTo(Op, Op1); 1240 // If all of the demanded bits in the inputs are known zeros, return zero. 1241 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1242 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1243 // If the RHS is a constant, see if we can simplify it. 1244 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1245 TLO)) 1246 return true; 1247 // If the operation can be done in a smaller type, do so. 1248 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1249 return true; 1250 1251 Known &= Known2; 1252 break; 1253 } 1254 case ISD::OR: { 1255 SDValue Op0 = Op.getOperand(0); 1256 SDValue Op1 = Op.getOperand(1); 1257 1258 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1259 Depth + 1)) 1260 return true; 1261 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1262 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1263 Known2, TLO, Depth + 1)) 1264 return true; 1265 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1266 1267 // Attempt to avoid multi-use ops if we don't need anything from them. 1268 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1269 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1270 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1271 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1272 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1273 if (DemandedOp0 || DemandedOp1) { 1274 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1275 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1276 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1277 return TLO.CombineTo(Op, NewOp); 1278 } 1279 } 1280 1281 // If all of the demanded bits are known zero on one side, return the other. 1282 // These bits cannot contribute to the result of the 'or'. 1283 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1284 return TLO.CombineTo(Op, Op0); 1285 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1286 return TLO.CombineTo(Op, Op1); 1287 // If the RHS is a constant, see if we can simplify it. 1288 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1289 return true; 1290 // If the operation can be done in a smaller type, do so. 1291 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1292 return true; 1293 1294 Known |= Known2; 1295 break; 1296 } 1297 case ISD::XOR: { 1298 SDValue Op0 = Op.getOperand(0); 1299 SDValue Op1 = Op.getOperand(1); 1300 1301 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1302 Depth + 1)) 1303 return true; 1304 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1305 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1306 Depth + 1)) 1307 return true; 1308 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1309 1310 // Attempt to avoid multi-use ops if we don't need anything from them. 1311 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1312 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1313 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1314 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1315 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1316 if (DemandedOp0 || DemandedOp1) { 1317 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1318 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1319 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1320 return TLO.CombineTo(Op, NewOp); 1321 } 1322 } 1323 1324 // If all of the demanded bits are known zero on one side, return the other. 1325 // These bits cannot contribute to the result of the 'xor'. 1326 if (DemandedBits.isSubsetOf(Known.Zero)) 1327 return TLO.CombineTo(Op, Op0); 1328 if (DemandedBits.isSubsetOf(Known2.Zero)) 1329 return TLO.CombineTo(Op, Op1); 1330 // If the operation can be done in a smaller type, do so. 1331 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1332 return true; 1333 1334 // If all of the unknown bits are known to be zero on one side or the other 1335 // turn this into an *inclusive* or. 1336 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1337 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1338 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1339 1340 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1341 if (C) { 1342 // If one side is a constant, and all of the set bits in the constant are 1343 // also known set on the other side, turn this into an AND, as we know 1344 // the bits will be cleared. 1345 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1346 // NB: it is okay if more bits are known than are requested 1347 if (C->getAPIntValue() == Known2.One) { 1348 SDValue ANDC = 1349 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1350 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1351 } 1352 1353 // If the RHS is a constant, see if we can change it. Don't alter a -1 1354 // constant because that's a 'not' op, and that is better for combining 1355 // and codegen. 1356 if (!C->isAllOnesValue() && 1357 DemandedBits.isSubsetOf(C->getAPIntValue())) { 1358 // We're flipping all demanded bits. Flip the undemanded bits too. 1359 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1360 return TLO.CombineTo(Op, New); 1361 } 1362 } 1363 1364 // If we can't turn this into a 'not', try to shrink the constant. 1365 if (!C || !C->isAllOnesValue()) 1366 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1367 return true; 1368 1369 Known ^= Known2; 1370 break; 1371 } 1372 case ISD::SELECT: 1373 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1374 Depth + 1)) 1375 return true; 1376 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1377 Depth + 1)) 1378 return true; 1379 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1380 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1381 1382 // If the operands are constants, see if we can simplify them. 1383 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1384 return true; 1385 1386 // Only known if known in both the LHS and RHS. 1387 Known.One &= Known2.One; 1388 Known.Zero &= Known2.Zero; 1389 break; 1390 case ISD::SELECT_CC: 1391 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1392 Depth + 1)) 1393 return true; 1394 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1395 Depth + 1)) 1396 return true; 1397 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1398 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1399 1400 // If the operands are constants, see if we can simplify them. 1401 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1402 return true; 1403 1404 // Only known if known in both the LHS and RHS. 1405 Known.One &= Known2.One; 1406 Known.Zero &= Known2.Zero; 1407 break; 1408 case ISD::SETCC: { 1409 SDValue Op0 = Op.getOperand(0); 1410 SDValue Op1 = Op.getOperand(1); 1411 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1412 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1413 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1414 // -1, we may be able to bypass the setcc. 1415 if (DemandedBits.isSignMask() && 1416 Op0.getScalarValueSizeInBits() == BitWidth && 1417 getBooleanContents(Op0.getValueType()) == 1418 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1419 // If we're testing X < 0, then this compare isn't needed - just use X! 1420 // FIXME: We're limiting to integer types here, but this should also work 1421 // if we don't care about FP signed-zero. The use of SETLT with FP means 1422 // that we don't care about NaNs. 1423 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1424 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1425 return TLO.CombineTo(Op, Op0); 1426 1427 // TODO: Should we check for other forms of sign-bit comparisons? 1428 // Examples: X <= -1, X >= 0 1429 } 1430 if (getBooleanContents(Op0.getValueType()) == 1431 TargetLowering::ZeroOrOneBooleanContent && 1432 BitWidth > 1) 1433 Known.Zero.setBitsFrom(1); 1434 break; 1435 } 1436 case ISD::SHL: { 1437 SDValue Op0 = Op.getOperand(0); 1438 SDValue Op1 = Op.getOperand(1); 1439 EVT ShiftVT = Op1.getValueType(); 1440 1441 if (const APInt *SA = 1442 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1443 unsigned ShAmt = SA->getZExtValue(); 1444 if (ShAmt == 0) 1445 return TLO.CombineTo(Op, Op0); 1446 1447 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1448 // single shift. We can do this if the bottom bits (which are shifted 1449 // out) are never demanded. 1450 // TODO - support non-uniform vector amounts. 1451 if (Op0.getOpcode() == ISD::SRL) { 1452 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1453 if (const APInt *SA2 = 1454 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1455 unsigned C1 = SA2->getZExtValue(); 1456 unsigned Opc = ISD::SHL; 1457 int Diff = ShAmt - C1; 1458 if (Diff < 0) { 1459 Diff = -Diff; 1460 Opc = ISD::SRL; 1461 } 1462 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1463 return TLO.CombineTo( 1464 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1465 } 1466 } 1467 } 1468 1469 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1470 // are not demanded. This will likely allow the anyext to be folded away. 1471 // TODO - support non-uniform vector amounts. 1472 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1473 SDValue InnerOp = Op0.getOperand(0); 1474 EVT InnerVT = InnerOp.getValueType(); 1475 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1476 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1477 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1478 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1479 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1480 ShTy = InnerVT; 1481 SDValue NarrowShl = 1482 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1483 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1484 return TLO.CombineTo( 1485 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1486 } 1487 1488 // Repeat the SHL optimization above in cases where an extension 1489 // intervenes: (shl (anyext (shr x, c1)), c2) to 1490 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1491 // aren't demanded (as above) and that the shifted upper c1 bits of 1492 // x aren't demanded. 1493 // TODO - support non-uniform vector amounts. 1494 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1495 InnerOp.hasOneUse()) { 1496 if (const APInt *SA2 = 1497 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1498 unsigned InnerShAmt = SA2->getZExtValue(); 1499 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1500 DemandedBits.getActiveBits() <= 1501 (InnerBits - InnerShAmt + ShAmt) && 1502 DemandedBits.countTrailingZeros() >= ShAmt) { 1503 SDValue NewSA = 1504 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1505 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1506 InnerOp.getOperand(0)); 1507 return TLO.CombineTo( 1508 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1509 } 1510 } 1511 } 1512 } 1513 1514 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1515 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1516 Depth + 1)) 1517 return true; 1518 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1519 Known.Zero <<= ShAmt; 1520 Known.One <<= ShAmt; 1521 // low bits known zero. 1522 Known.Zero.setLowBits(ShAmt); 1523 1524 // Try shrinking the operation as long as the shift amount will still be 1525 // in range. 1526 if ((ShAmt < DemandedBits.getActiveBits()) && 1527 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1528 return true; 1529 } 1530 1531 // If we are only demanding sign bits then we can use the shift source 1532 // directly. 1533 if (const APInt *MaxSA = 1534 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1535 unsigned ShAmt = MaxSA->getZExtValue(); 1536 unsigned NumSignBits = 1537 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1538 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1539 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1540 return TLO.CombineTo(Op, Op0); 1541 } 1542 break; 1543 } 1544 case ISD::SRL: { 1545 SDValue Op0 = Op.getOperand(0); 1546 SDValue Op1 = Op.getOperand(1); 1547 EVT ShiftVT = Op1.getValueType(); 1548 1549 if (const APInt *SA = 1550 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1551 unsigned ShAmt = SA->getZExtValue(); 1552 if (ShAmt == 0) 1553 return TLO.CombineTo(Op, Op0); 1554 1555 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1556 // single shift. We can do this if the top bits (which are shifted out) 1557 // are never demanded. 1558 // TODO - support non-uniform vector amounts. 1559 if (Op0.getOpcode() == ISD::SHL) { 1560 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1561 if (const APInt *SA2 = 1562 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1563 unsigned C1 = SA2->getZExtValue(); 1564 unsigned Opc = ISD::SRL; 1565 int Diff = ShAmt - C1; 1566 if (Diff < 0) { 1567 Diff = -Diff; 1568 Opc = ISD::SHL; 1569 } 1570 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1571 return TLO.CombineTo( 1572 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1573 } 1574 } 1575 } 1576 1577 APInt InDemandedMask = (DemandedBits << ShAmt); 1578 1579 // If the shift is exact, then it does demand the low bits (and knows that 1580 // they are zero). 1581 if (Op->getFlags().hasExact()) 1582 InDemandedMask.setLowBits(ShAmt); 1583 1584 // Compute the new bits that are at the top now. 1585 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1586 Depth + 1)) 1587 return true; 1588 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1589 Known.Zero.lshrInPlace(ShAmt); 1590 Known.One.lshrInPlace(ShAmt); 1591 // High bits known zero. 1592 Known.Zero.setHighBits(ShAmt); 1593 } 1594 break; 1595 } 1596 case ISD::SRA: { 1597 SDValue Op0 = Op.getOperand(0); 1598 SDValue Op1 = Op.getOperand(1); 1599 EVT ShiftVT = Op1.getValueType(); 1600 1601 // If we only want bits that already match the signbit then we don't need 1602 // to shift. 1603 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1604 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1605 NumHiDemandedBits) 1606 return TLO.CombineTo(Op, Op0); 1607 1608 // If this is an arithmetic shift right and only the low-bit is set, we can 1609 // always convert this into a logical shr, even if the shift amount is 1610 // variable. The low bit of the shift cannot be an input sign bit unless 1611 // the shift amount is >= the size of the datatype, which is undefined. 1612 if (DemandedBits.isOneValue()) 1613 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1614 1615 if (const APInt *SA = 1616 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1617 unsigned ShAmt = SA->getZExtValue(); 1618 if (ShAmt == 0) 1619 return TLO.CombineTo(Op, Op0); 1620 1621 APInt InDemandedMask = (DemandedBits << ShAmt); 1622 1623 // If the shift is exact, then it does demand the low bits (and knows that 1624 // they are zero). 1625 if (Op->getFlags().hasExact()) 1626 InDemandedMask.setLowBits(ShAmt); 1627 1628 // If any of the demanded bits are produced by the sign extension, we also 1629 // demand the input sign bit. 1630 if (DemandedBits.countLeadingZeros() < ShAmt) 1631 InDemandedMask.setSignBit(); 1632 1633 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1634 Depth + 1)) 1635 return true; 1636 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1637 Known.Zero.lshrInPlace(ShAmt); 1638 Known.One.lshrInPlace(ShAmt); 1639 1640 // If the input sign bit is known to be zero, or if none of the top bits 1641 // are demanded, turn this into an unsigned shift right. 1642 if (Known.Zero[BitWidth - ShAmt - 1] || 1643 DemandedBits.countLeadingZeros() >= ShAmt) { 1644 SDNodeFlags Flags; 1645 Flags.setExact(Op->getFlags().hasExact()); 1646 return TLO.CombineTo( 1647 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1648 } 1649 1650 int Log2 = DemandedBits.exactLogBase2(); 1651 if (Log2 >= 0) { 1652 // The bit must come from the sign. 1653 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1654 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1655 } 1656 1657 if (Known.One[BitWidth - ShAmt - 1]) 1658 // New bits are known one. 1659 Known.One.setHighBits(ShAmt); 1660 1661 // Attempt to avoid multi-use ops if we don't need anything from them. 1662 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1663 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1664 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1665 if (DemandedOp0) { 1666 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1667 return TLO.CombineTo(Op, NewOp); 1668 } 1669 } 1670 } 1671 break; 1672 } 1673 case ISD::FSHL: 1674 case ISD::FSHR: { 1675 SDValue Op0 = Op.getOperand(0); 1676 SDValue Op1 = Op.getOperand(1); 1677 SDValue Op2 = Op.getOperand(2); 1678 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1679 1680 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1681 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1682 1683 // For fshl, 0-shift returns the 1st arg. 1684 // For fshr, 0-shift returns the 2nd arg. 1685 if (Amt == 0) { 1686 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1687 Known, TLO, Depth + 1)) 1688 return true; 1689 break; 1690 } 1691 1692 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1693 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1694 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1695 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1696 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1697 Depth + 1)) 1698 return true; 1699 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1700 Depth + 1)) 1701 return true; 1702 1703 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1704 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1705 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1706 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1707 Known.One |= Known2.One; 1708 Known.Zero |= Known2.Zero; 1709 } 1710 1711 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1712 if (isPowerOf2_32(BitWidth)) { 1713 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1714 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1715 Known2, TLO, Depth + 1)) 1716 return true; 1717 } 1718 break; 1719 } 1720 case ISD::ROTL: 1721 case ISD::ROTR: { 1722 SDValue Op0 = Op.getOperand(0); 1723 SDValue Op1 = Op.getOperand(1); 1724 1725 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1726 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1727 return TLO.CombineTo(Op, Op0); 1728 1729 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1730 if (isPowerOf2_32(BitWidth)) { 1731 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1732 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1733 Depth + 1)) 1734 return true; 1735 } 1736 break; 1737 } 1738 case ISD::BITREVERSE: { 1739 SDValue Src = Op.getOperand(0); 1740 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1741 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1742 Depth + 1)) 1743 return true; 1744 Known.One = Known2.One.reverseBits(); 1745 Known.Zero = Known2.Zero.reverseBits(); 1746 break; 1747 } 1748 case ISD::BSWAP: { 1749 SDValue Src = Op.getOperand(0); 1750 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1751 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1752 Depth + 1)) 1753 return true; 1754 Known.One = Known2.One.byteSwap(); 1755 Known.Zero = Known2.Zero.byteSwap(); 1756 break; 1757 } 1758 case ISD::CTPOP: { 1759 // If only 1 bit is demanded, replace with PARITY as long as we're before 1760 // op legalization. 1761 // FIXME: Limit to scalars for now. 1762 if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector()) 1763 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 1764 Op.getOperand(0))); 1765 1766 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1767 break; 1768 } 1769 case ISD::SIGN_EXTEND_INREG: { 1770 SDValue Op0 = Op.getOperand(0); 1771 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1772 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1773 1774 // If we only care about the highest bit, don't bother shifting right. 1775 if (DemandedBits.isSignMask()) { 1776 unsigned NumSignBits = 1777 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1778 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1; 1779 // However if the input is already sign extended we expect the sign 1780 // extension to be dropped altogether later and do not simplify. 1781 if (!AlreadySignExtended) { 1782 // Compute the correct shift amount type, which must be getShiftAmountTy 1783 // for scalar types after legalization. 1784 EVT ShiftAmtTy = VT; 1785 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1786 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1787 1788 SDValue ShiftAmt = 1789 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1790 return TLO.CombineTo(Op, 1791 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1792 } 1793 } 1794 1795 // If none of the extended bits are demanded, eliminate the sextinreg. 1796 if (DemandedBits.getActiveBits() <= ExVTBits) 1797 return TLO.CombineTo(Op, Op0); 1798 1799 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1800 1801 // Since the sign extended bits are demanded, we know that the sign 1802 // bit is demanded. 1803 InputDemandedBits.setBit(ExVTBits - 1); 1804 1805 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1806 return true; 1807 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1808 1809 // If the sign bit of the input is known set or clear, then we know the 1810 // top bits of the result. 1811 1812 // If the input sign bit is known zero, convert this into a zero extension. 1813 if (Known.Zero[ExVTBits - 1]) 1814 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 1815 1816 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1817 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1818 Known.One.setBitsFrom(ExVTBits); 1819 Known.Zero &= Mask; 1820 } else { // Input sign bit unknown 1821 Known.Zero &= Mask; 1822 Known.One &= Mask; 1823 } 1824 break; 1825 } 1826 case ISD::BUILD_PAIR: { 1827 EVT HalfVT = Op.getOperand(0).getValueType(); 1828 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1829 1830 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1831 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1832 1833 KnownBits KnownLo, KnownHi; 1834 1835 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1836 return true; 1837 1838 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1839 return true; 1840 1841 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1842 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1843 1844 Known.One = KnownLo.One.zext(BitWidth) | 1845 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1846 break; 1847 } 1848 case ISD::ZERO_EXTEND: 1849 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1850 SDValue Src = Op.getOperand(0); 1851 EVT SrcVT = Src.getValueType(); 1852 unsigned InBits = SrcVT.getScalarSizeInBits(); 1853 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1854 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1855 1856 // If none of the top bits are demanded, convert this into an any_extend. 1857 if (DemandedBits.getActiveBits() <= InBits) { 1858 // If we only need the non-extended bits of the bottom element 1859 // then we can just bitcast to the result. 1860 if (IsVecInReg && DemandedElts == 1 && 1861 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1862 TLO.DAG.getDataLayout().isLittleEndian()) 1863 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1864 1865 unsigned Opc = 1866 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1867 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1868 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1869 } 1870 1871 APInt InDemandedBits = DemandedBits.trunc(InBits); 1872 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1873 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1874 Depth + 1)) 1875 return true; 1876 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1877 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1878 Known = Known.zext(BitWidth); 1879 1880 // Attempt to avoid multi-use ops if we don't need anything from them. 1881 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1882 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1883 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1884 break; 1885 } 1886 case ISD::SIGN_EXTEND: 1887 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1888 SDValue Src = Op.getOperand(0); 1889 EVT SrcVT = Src.getValueType(); 1890 unsigned InBits = SrcVT.getScalarSizeInBits(); 1891 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1892 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1893 1894 // If none of the top bits are demanded, convert this into an any_extend. 1895 if (DemandedBits.getActiveBits() <= InBits) { 1896 // If we only need the non-extended bits of the bottom element 1897 // then we can just bitcast to the result. 1898 if (IsVecInReg && DemandedElts == 1 && 1899 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1900 TLO.DAG.getDataLayout().isLittleEndian()) 1901 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1902 1903 unsigned Opc = 1904 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1905 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1906 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1907 } 1908 1909 APInt InDemandedBits = DemandedBits.trunc(InBits); 1910 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1911 1912 // Since some of the sign extended bits are demanded, we know that the sign 1913 // bit is demanded. 1914 InDemandedBits.setBit(InBits - 1); 1915 1916 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1917 Depth + 1)) 1918 return true; 1919 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1920 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1921 1922 // If the sign bit is known one, the top bits match. 1923 Known = Known.sext(BitWidth); 1924 1925 // If the sign bit is known zero, convert this to a zero extend. 1926 if (Known.isNonNegative()) { 1927 unsigned Opc = 1928 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 1929 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1930 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1931 } 1932 1933 // Attempt to avoid multi-use ops if we don't need anything from them. 1934 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1935 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1936 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1937 break; 1938 } 1939 case ISD::ANY_EXTEND: 1940 case ISD::ANY_EXTEND_VECTOR_INREG: { 1941 SDValue Src = Op.getOperand(0); 1942 EVT SrcVT = Src.getValueType(); 1943 unsigned InBits = SrcVT.getScalarSizeInBits(); 1944 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1945 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 1946 1947 // If we only need the bottom element then we can just bitcast. 1948 // TODO: Handle ANY_EXTEND? 1949 if (IsVecInReg && DemandedElts == 1 && 1950 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1951 TLO.DAG.getDataLayout().isLittleEndian()) 1952 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1953 1954 APInt InDemandedBits = DemandedBits.trunc(InBits); 1955 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1956 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1957 Depth + 1)) 1958 return true; 1959 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1960 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1961 Known = Known.anyext(BitWidth); 1962 1963 // Attempt to avoid multi-use ops if we don't need anything from them. 1964 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1965 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1966 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1967 break; 1968 } 1969 case ISD::TRUNCATE: { 1970 SDValue Src = Op.getOperand(0); 1971 1972 // Simplify the input, using demanded bit information, and compute the known 1973 // zero/one bits live out. 1974 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 1975 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 1976 if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1)) 1977 return true; 1978 Known = Known.trunc(BitWidth); 1979 1980 // Attempt to avoid multi-use ops if we don't need anything from them. 1981 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1982 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 1983 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 1984 1985 // If the input is only used by this truncate, see if we can shrink it based 1986 // on the known demanded bits. 1987 if (Src.getNode()->hasOneUse()) { 1988 switch (Src.getOpcode()) { 1989 default: 1990 break; 1991 case ISD::SRL: 1992 // Shrink SRL by a constant if none of the high bits shifted in are 1993 // demanded. 1994 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 1995 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1996 // undesirable. 1997 break; 1998 1999 SDValue ShAmt = Src.getOperand(1); 2000 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt); 2001 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 2002 break; 2003 uint64_t ShVal = ShAmtC->getZExtValue(); 2004 2005 APInt HighBits = 2006 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2007 HighBits.lshrInPlace(ShVal); 2008 HighBits = HighBits.trunc(BitWidth); 2009 2010 if (!(HighBits & DemandedBits)) { 2011 // None of the shifted in bits are needed. Add a truncate of the 2012 // shift input, then shift it. 2013 if (TLO.LegalTypes()) 2014 ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL)); 2015 SDValue NewTrunc = 2016 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2017 return TLO.CombineTo( 2018 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt)); 2019 } 2020 break; 2021 } 2022 } 2023 2024 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2025 break; 2026 } 2027 case ISD::AssertZext: { 2028 // AssertZext demands all of the high bits, plus any of the low bits 2029 // demanded by its users. 2030 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2031 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2032 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2033 TLO, Depth + 1)) 2034 return true; 2035 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2036 2037 Known.Zero |= ~InMask; 2038 break; 2039 } 2040 case ISD::EXTRACT_VECTOR_ELT: { 2041 SDValue Src = Op.getOperand(0); 2042 SDValue Idx = Op.getOperand(1); 2043 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2044 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2045 2046 if (SrcEltCnt.isScalable()) 2047 return false; 2048 2049 // Demand the bits from every vector element without a constant index. 2050 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2051 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 2052 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2053 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2054 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2055 2056 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2057 // anything about the extended bits. 2058 APInt DemandedSrcBits = DemandedBits; 2059 if (BitWidth > EltBitWidth) 2060 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2061 2062 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2063 Depth + 1)) 2064 return true; 2065 2066 // Attempt to avoid multi-use ops if we don't need anything from them. 2067 if (!DemandedSrcBits.isAllOnesValue() || 2068 !DemandedSrcElts.isAllOnesValue()) { 2069 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2070 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2071 SDValue NewOp = 2072 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2073 return TLO.CombineTo(Op, NewOp); 2074 } 2075 } 2076 2077 Known = Known2; 2078 if (BitWidth > EltBitWidth) 2079 Known = Known.anyext(BitWidth); 2080 break; 2081 } 2082 case ISD::BITCAST: { 2083 SDValue Src = Op.getOperand(0); 2084 EVT SrcVT = Src.getValueType(); 2085 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2086 2087 // If this is an FP->Int bitcast and if the sign bit is the only 2088 // thing demanded, turn this into a FGETSIGN. 2089 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2090 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2091 SrcVT.isFloatingPoint()) { 2092 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2093 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2094 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2095 SrcVT != MVT::f128) { 2096 // Cannot eliminate/lower SHL for f128 yet. 2097 EVT Ty = OpVTLegal ? VT : MVT::i32; 2098 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2099 // place. We expect the SHL to be eliminated by other optimizations. 2100 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2101 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2102 if (!OpVTLegal && OpVTSizeInBits > 32) 2103 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2104 unsigned ShVal = Op.getValueSizeInBits() - 1; 2105 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2106 return TLO.CombineTo(Op, 2107 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2108 } 2109 } 2110 2111 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2112 // Demand the elt/bit if any of the original elts/bits are demanded. 2113 // TODO - bigendian once we have test coverage. 2114 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 && 2115 TLO.DAG.getDataLayout().isLittleEndian()) { 2116 unsigned Scale = BitWidth / NumSrcEltBits; 2117 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2118 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2119 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2120 for (unsigned i = 0; i != Scale; ++i) { 2121 unsigned Offset = i * NumSrcEltBits; 2122 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 2123 if (!Sub.isNullValue()) { 2124 DemandedSrcBits |= Sub; 2125 for (unsigned j = 0; j != NumElts; ++j) 2126 if (DemandedElts[j]) 2127 DemandedSrcElts.setBit((j * Scale) + i); 2128 } 2129 } 2130 2131 APInt KnownSrcUndef, KnownSrcZero; 2132 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2133 KnownSrcZero, TLO, Depth + 1)) 2134 return true; 2135 2136 KnownBits KnownSrcBits; 2137 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2138 KnownSrcBits, TLO, Depth + 1)) 2139 return true; 2140 } else if ((NumSrcEltBits % BitWidth) == 0 && 2141 TLO.DAG.getDataLayout().isLittleEndian()) { 2142 unsigned Scale = NumSrcEltBits / BitWidth; 2143 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2144 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2145 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2146 for (unsigned i = 0; i != NumElts; ++i) 2147 if (DemandedElts[i]) { 2148 unsigned Offset = (i % Scale) * BitWidth; 2149 DemandedSrcBits.insertBits(DemandedBits, Offset); 2150 DemandedSrcElts.setBit(i / Scale); 2151 } 2152 2153 if (SrcVT.isVector()) { 2154 APInt KnownSrcUndef, KnownSrcZero; 2155 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2156 KnownSrcZero, TLO, Depth + 1)) 2157 return true; 2158 } 2159 2160 KnownBits KnownSrcBits; 2161 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2162 KnownSrcBits, TLO, Depth + 1)) 2163 return true; 2164 } 2165 2166 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2167 // recursive call where Known may be useful to the caller. 2168 if (Depth > 0) { 2169 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2170 return false; 2171 } 2172 break; 2173 } 2174 case ISD::ADD: 2175 case ISD::MUL: 2176 case ISD::SUB: { 2177 // Add, Sub, and Mul don't demand any bits in positions beyond that 2178 // of the highest bit demanded of them. 2179 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2180 SDNodeFlags Flags = Op.getNode()->getFlags(); 2181 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2182 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2183 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2184 Depth + 1) || 2185 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2186 Depth + 1) || 2187 // See if the operation should be performed at a smaller bit width. 2188 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2189 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2190 // Disable the nsw and nuw flags. We can no longer guarantee that we 2191 // won't wrap after simplification. 2192 Flags.setNoSignedWrap(false); 2193 Flags.setNoUnsignedWrap(false); 2194 SDValue NewOp = 2195 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2196 return TLO.CombineTo(Op, NewOp); 2197 } 2198 return true; 2199 } 2200 2201 // Attempt to avoid multi-use ops if we don't need anything from them. 2202 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 2203 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2204 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2205 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2206 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2207 if (DemandedOp0 || DemandedOp1) { 2208 Flags.setNoSignedWrap(false); 2209 Flags.setNoUnsignedWrap(false); 2210 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2211 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2212 SDValue NewOp = 2213 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2214 return TLO.CombineTo(Op, NewOp); 2215 } 2216 } 2217 2218 // If we have a constant operand, we may be able to turn it into -1 if we 2219 // do not demand the high bits. This can make the constant smaller to 2220 // encode, allow more general folding, or match specialized instruction 2221 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2222 // is probably not useful (and could be detrimental). 2223 ConstantSDNode *C = isConstOrConstSplat(Op1); 2224 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2225 if (C && !C->isAllOnesValue() && !C->isOne() && 2226 (C->getAPIntValue() | HighMask).isAllOnesValue()) { 2227 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2228 // Disable the nsw and nuw flags. We can no longer guarantee that we 2229 // won't wrap after simplification. 2230 Flags.setNoSignedWrap(false); 2231 Flags.setNoUnsignedWrap(false); 2232 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2233 return TLO.CombineTo(Op, NewOp); 2234 } 2235 2236 LLVM_FALLTHROUGH; 2237 } 2238 default: 2239 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2240 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2241 Known, TLO, Depth)) 2242 return true; 2243 break; 2244 } 2245 2246 // Just use computeKnownBits to compute output bits. 2247 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2248 break; 2249 } 2250 2251 // If we know the value of all of the demanded bits, return this as a 2252 // constant. 2253 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2254 // Avoid folding to a constant if any OpaqueConstant is involved. 2255 const SDNode *N = Op.getNode(); 2256 for (SDNodeIterator I = SDNodeIterator::begin(N), 2257 E = SDNodeIterator::end(N); 2258 I != E; ++I) { 2259 SDNode *Op = *I; 2260 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2261 if (C->isOpaque()) 2262 return false; 2263 } 2264 if (VT.isInteger()) 2265 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2266 if (VT.isFloatingPoint()) 2267 return TLO.CombineTo( 2268 Op, 2269 TLO.DAG.getConstantFP( 2270 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2271 } 2272 2273 return false; 2274 } 2275 2276 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2277 const APInt &DemandedElts, 2278 APInt &KnownUndef, 2279 APInt &KnownZero, 2280 DAGCombinerInfo &DCI) const { 2281 SelectionDAG &DAG = DCI.DAG; 2282 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2283 !DCI.isBeforeLegalizeOps()); 2284 2285 bool Simplified = 2286 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2287 if (Simplified) { 2288 DCI.AddToWorklist(Op.getNode()); 2289 DCI.CommitTargetLoweringOpt(TLO); 2290 } 2291 2292 return Simplified; 2293 } 2294 2295 /// Given a vector binary operation and known undefined elements for each input 2296 /// operand, compute whether each element of the output is undefined. 2297 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2298 const APInt &UndefOp0, 2299 const APInt &UndefOp1) { 2300 EVT VT = BO.getValueType(); 2301 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2302 "Vector binop only"); 2303 2304 EVT EltVT = VT.getVectorElementType(); 2305 unsigned NumElts = VT.getVectorNumElements(); 2306 assert(UndefOp0.getBitWidth() == NumElts && 2307 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2308 2309 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2310 const APInt &UndefVals) { 2311 if (UndefVals[Index]) 2312 return DAG.getUNDEF(EltVT); 2313 2314 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2315 // Try hard to make sure that the getNode() call is not creating temporary 2316 // nodes. Ignore opaque integers because they do not constant fold. 2317 SDValue Elt = BV->getOperand(Index); 2318 auto *C = dyn_cast<ConstantSDNode>(Elt); 2319 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2320 return Elt; 2321 } 2322 2323 return SDValue(); 2324 }; 2325 2326 APInt KnownUndef = APInt::getNullValue(NumElts); 2327 for (unsigned i = 0; i != NumElts; ++i) { 2328 // If both inputs for this element are either constant or undef and match 2329 // the element type, compute the constant/undef result for this element of 2330 // the vector. 2331 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2332 // not handle FP constants. The code within getNode() should be refactored 2333 // to avoid the danger of creating a bogus temporary node here. 2334 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2335 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2336 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2337 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2338 KnownUndef.setBit(i); 2339 } 2340 return KnownUndef; 2341 } 2342 2343 bool TargetLowering::SimplifyDemandedVectorElts( 2344 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2345 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2346 bool AssumeSingleUse) const { 2347 EVT VT = Op.getValueType(); 2348 unsigned Opcode = Op.getOpcode(); 2349 APInt DemandedElts = OriginalDemandedElts; 2350 unsigned NumElts = DemandedElts.getBitWidth(); 2351 assert(VT.isVector() && "Expected vector op"); 2352 2353 KnownUndef = KnownZero = APInt::getNullValue(NumElts); 2354 2355 // TODO: For now we assume we know nothing about scalable vectors. 2356 if (VT.isScalableVector()) 2357 return false; 2358 2359 assert(VT.getVectorNumElements() == NumElts && 2360 "Mask size mismatches value type element count!"); 2361 2362 // Undef operand. 2363 if (Op.isUndef()) { 2364 KnownUndef.setAllBits(); 2365 return false; 2366 } 2367 2368 // If Op has other users, assume that all elements are needed. 2369 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2370 DemandedElts.setAllBits(); 2371 2372 // Not demanding any elements from Op. 2373 if (DemandedElts == 0) { 2374 KnownUndef.setAllBits(); 2375 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2376 } 2377 2378 // Limit search depth. 2379 if (Depth >= SelectionDAG::MaxRecursionDepth) 2380 return false; 2381 2382 SDLoc DL(Op); 2383 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2384 2385 // Helper for demanding the specified elements and all the bits of both binary 2386 // operands. 2387 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2388 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2389 TLO.DAG, Depth + 1); 2390 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2391 TLO.DAG, Depth + 1); 2392 if (NewOp0 || NewOp1) { 2393 SDValue NewOp = TLO.DAG.getNode( 2394 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2395 return TLO.CombineTo(Op, NewOp); 2396 } 2397 return false; 2398 }; 2399 2400 switch (Opcode) { 2401 case ISD::SCALAR_TO_VECTOR: { 2402 if (!DemandedElts[0]) { 2403 KnownUndef.setAllBits(); 2404 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2405 } 2406 KnownUndef.setHighBits(NumElts - 1); 2407 break; 2408 } 2409 case ISD::BITCAST: { 2410 SDValue Src = Op.getOperand(0); 2411 EVT SrcVT = Src.getValueType(); 2412 2413 // We only handle vectors here. 2414 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2415 if (!SrcVT.isVector()) 2416 break; 2417 2418 // Fast handling of 'identity' bitcasts. 2419 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2420 if (NumSrcElts == NumElts) 2421 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2422 KnownZero, TLO, Depth + 1); 2423 2424 APInt SrcZero, SrcUndef; 2425 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts); 2426 2427 // Bitcast from 'large element' src vector to 'small element' vector, we 2428 // must demand a source element if any DemandedElt maps to it. 2429 if ((NumElts % NumSrcElts) == 0) { 2430 unsigned Scale = NumElts / NumSrcElts; 2431 for (unsigned i = 0; i != NumElts; ++i) 2432 if (DemandedElts[i]) 2433 SrcDemandedElts.setBit(i / Scale); 2434 2435 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2436 TLO, Depth + 1)) 2437 return true; 2438 2439 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2440 // of the large element. 2441 // TODO - bigendian once we have test coverage. 2442 if (TLO.DAG.getDataLayout().isLittleEndian()) { 2443 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2444 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits); 2445 for (unsigned i = 0; i != NumElts; ++i) 2446 if (DemandedElts[i]) { 2447 unsigned Ofs = (i % Scale) * EltSizeInBits; 2448 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2449 } 2450 2451 KnownBits Known; 2452 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2453 TLO, Depth + 1)) 2454 return true; 2455 } 2456 2457 // If the src element is zero/undef then all the output elements will be - 2458 // only demanded elements are guaranteed to be correct. 2459 for (unsigned i = 0; i != NumSrcElts; ++i) { 2460 if (SrcDemandedElts[i]) { 2461 if (SrcZero[i]) 2462 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2463 if (SrcUndef[i]) 2464 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2465 } 2466 } 2467 } 2468 2469 // Bitcast from 'small element' src vector to 'large element' vector, we 2470 // demand all smaller source elements covered by the larger demanded element 2471 // of this vector. 2472 if ((NumSrcElts % NumElts) == 0) { 2473 unsigned Scale = NumSrcElts / NumElts; 2474 for (unsigned i = 0; i != NumElts; ++i) 2475 if (DemandedElts[i]) 2476 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale); 2477 2478 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2479 TLO, Depth + 1)) 2480 return true; 2481 2482 // If all the src elements covering an output element are zero/undef, then 2483 // the output element will be as well, assuming it was demanded. 2484 for (unsigned i = 0; i != NumElts; ++i) { 2485 if (DemandedElts[i]) { 2486 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue()) 2487 KnownZero.setBit(i); 2488 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue()) 2489 KnownUndef.setBit(i); 2490 } 2491 } 2492 } 2493 break; 2494 } 2495 case ISD::BUILD_VECTOR: { 2496 // Check all elements and simplify any unused elements with UNDEF. 2497 if (!DemandedElts.isAllOnesValue()) { 2498 // Don't simplify BROADCASTS. 2499 if (llvm::any_of(Op->op_values(), 2500 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2501 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2502 bool Updated = false; 2503 for (unsigned i = 0; i != NumElts; ++i) { 2504 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2505 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2506 KnownUndef.setBit(i); 2507 Updated = true; 2508 } 2509 } 2510 if (Updated) 2511 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2512 } 2513 } 2514 for (unsigned i = 0; i != NumElts; ++i) { 2515 SDValue SrcOp = Op.getOperand(i); 2516 if (SrcOp.isUndef()) { 2517 KnownUndef.setBit(i); 2518 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2519 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2520 KnownZero.setBit(i); 2521 } 2522 } 2523 break; 2524 } 2525 case ISD::CONCAT_VECTORS: { 2526 EVT SubVT = Op.getOperand(0).getValueType(); 2527 unsigned NumSubVecs = Op.getNumOperands(); 2528 unsigned NumSubElts = SubVT.getVectorNumElements(); 2529 for (unsigned i = 0; i != NumSubVecs; ++i) { 2530 SDValue SubOp = Op.getOperand(i); 2531 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2532 APInt SubUndef, SubZero; 2533 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2534 Depth + 1)) 2535 return true; 2536 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2537 KnownZero.insertBits(SubZero, i * NumSubElts); 2538 } 2539 break; 2540 } 2541 case ISD::INSERT_SUBVECTOR: { 2542 // Demand any elements from the subvector and the remainder from the src its 2543 // inserted into. 2544 SDValue Src = Op.getOperand(0); 2545 SDValue Sub = Op.getOperand(1); 2546 uint64_t Idx = Op.getConstantOperandVal(2); 2547 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2548 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2549 APInt DemandedSrcElts = DemandedElts; 2550 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2551 2552 APInt SubUndef, SubZero; 2553 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2554 Depth + 1)) 2555 return true; 2556 2557 // If none of the src operand elements are demanded, replace it with undef. 2558 if (!DemandedSrcElts && !Src.isUndef()) 2559 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2560 TLO.DAG.getUNDEF(VT), Sub, 2561 Op.getOperand(2))); 2562 2563 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2564 TLO, Depth + 1)) 2565 return true; 2566 KnownUndef.insertBits(SubUndef, Idx); 2567 KnownZero.insertBits(SubZero, Idx); 2568 2569 // Attempt to avoid multi-use ops if we don't need anything from them. 2570 if (!DemandedSrcElts.isAllOnesValue() || 2571 !DemandedSubElts.isAllOnesValue()) { 2572 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2573 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2574 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2575 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2576 if (NewSrc || NewSub) { 2577 NewSrc = NewSrc ? NewSrc : Src; 2578 NewSub = NewSub ? NewSub : Sub; 2579 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2580 NewSub, Op.getOperand(2)); 2581 return TLO.CombineTo(Op, NewOp); 2582 } 2583 } 2584 break; 2585 } 2586 case ISD::EXTRACT_SUBVECTOR: { 2587 // Offset the demanded elts by the subvector index. 2588 SDValue Src = Op.getOperand(0); 2589 if (Src.getValueType().isScalableVector()) 2590 break; 2591 uint64_t Idx = Op.getConstantOperandVal(1); 2592 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2593 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2594 2595 APInt SrcUndef, SrcZero; 2596 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2597 Depth + 1)) 2598 return true; 2599 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2600 KnownZero = SrcZero.extractBits(NumElts, Idx); 2601 2602 // Attempt to avoid multi-use ops if we don't need anything from them. 2603 if (!DemandedElts.isAllOnesValue()) { 2604 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2605 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2606 if (NewSrc) { 2607 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2608 Op.getOperand(1)); 2609 return TLO.CombineTo(Op, NewOp); 2610 } 2611 } 2612 break; 2613 } 2614 case ISD::INSERT_VECTOR_ELT: { 2615 SDValue Vec = Op.getOperand(0); 2616 SDValue Scl = Op.getOperand(1); 2617 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2618 2619 // For a legal, constant insertion index, if we don't need this insertion 2620 // then strip it, else remove it from the demanded elts. 2621 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2622 unsigned Idx = CIdx->getZExtValue(); 2623 if (!DemandedElts[Idx]) 2624 return TLO.CombineTo(Op, Vec); 2625 2626 APInt DemandedVecElts(DemandedElts); 2627 DemandedVecElts.clearBit(Idx); 2628 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2629 KnownZero, TLO, Depth + 1)) 2630 return true; 2631 2632 KnownUndef.setBitVal(Idx, Scl.isUndef()); 2633 2634 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 2635 break; 2636 } 2637 2638 APInt VecUndef, VecZero; 2639 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2640 Depth + 1)) 2641 return true; 2642 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2643 break; 2644 } 2645 case ISD::VSELECT: { 2646 // Try to transform the select condition based on the current demanded 2647 // elements. 2648 // TODO: If a condition element is undef, we can choose from one arm of the 2649 // select (and if one arm is undef, then we can propagate that to the 2650 // result). 2651 // TODO - add support for constant vselect masks (see IR version of this). 2652 APInt UnusedUndef, UnusedZero; 2653 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2654 UnusedZero, TLO, Depth + 1)) 2655 return true; 2656 2657 // See if we can simplify either vselect operand. 2658 APInt DemandedLHS(DemandedElts); 2659 APInt DemandedRHS(DemandedElts); 2660 APInt UndefLHS, ZeroLHS; 2661 APInt UndefRHS, ZeroRHS; 2662 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2663 ZeroLHS, TLO, Depth + 1)) 2664 return true; 2665 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2666 ZeroRHS, TLO, Depth + 1)) 2667 return true; 2668 2669 KnownUndef = UndefLHS & UndefRHS; 2670 KnownZero = ZeroLHS & ZeroRHS; 2671 break; 2672 } 2673 case ISD::VECTOR_SHUFFLE: { 2674 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2675 2676 // Collect demanded elements from shuffle operands.. 2677 APInt DemandedLHS(NumElts, 0); 2678 APInt DemandedRHS(NumElts, 0); 2679 for (unsigned i = 0; i != NumElts; ++i) { 2680 int M = ShuffleMask[i]; 2681 if (M < 0 || !DemandedElts[i]) 2682 continue; 2683 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2684 if (M < (int)NumElts) 2685 DemandedLHS.setBit(M); 2686 else 2687 DemandedRHS.setBit(M - NumElts); 2688 } 2689 2690 // See if we can simplify either shuffle operand. 2691 APInt UndefLHS, ZeroLHS; 2692 APInt UndefRHS, ZeroRHS; 2693 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2694 ZeroLHS, TLO, Depth + 1)) 2695 return true; 2696 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2697 ZeroRHS, TLO, Depth + 1)) 2698 return true; 2699 2700 // Simplify mask using undef elements from LHS/RHS. 2701 bool Updated = false; 2702 bool IdentityLHS = true, IdentityRHS = true; 2703 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2704 for (unsigned i = 0; i != NumElts; ++i) { 2705 int &M = NewMask[i]; 2706 if (M < 0) 2707 continue; 2708 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2709 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2710 Updated = true; 2711 M = -1; 2712 } 2713 IdentityLHS &= (M < 0) || (M == (int)i); 2714 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2715 } 2716 2717 // Update legal shuffle masks based on demanded elements if it won't reduce 2718 // to Identity which can cause premature removal of the shuffle mask. 2719 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2720 SDValue LegalShuffle = 2721 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2722 NewMask, TLO.DAG); 2723 if (LegalShuffle) 2724 return TLO.CombineTo(Op, LegalShuffle); 2725 } 2726 2727 // Propagate undef/zero elements from LHS/RHS. 2728 for (unsigned i = 0; i != NumElts; ++i) { 2729 int M = ShuffleMask[i]; 2730 if (M < 0) { 2731 KnownUndef.setBit(i); 2732 } else if (M < (int)NumElts) { 2733 if (UndefLHS[M]) 2734 KnownUndef.setBit(i); 2735 if (ZeroLHS[M]) 2736 KnownZero.setBit(i); 2737 } else { 2738 if (UndefRHS[M - NumElts]) 2739 KnownUndef.setBit(i); 2740 if (ZeroRHS[M - NumElts]) 2741 KnownZero.setBit(i); 2742 } 2743 } 2744 break; 2745 } 2746 case ISD::ANY_EXTEND_VECTOR_INREG: 2747 case ISD::SIGN_EXTEND_VECTOR_INREG: 2748 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2749 APInt SrcUndef, SrcZero; 2750 SDValue Src = Op.getOperand(0); 2751 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2752 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2753 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2754 Depth + 1)) 2755 return true; 2756 KnownZero = SrcZero.zextOrTrunc(NumElts); 2757 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2758 2759 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2760 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2761 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) { 2762 // aext - if we just need the bottom element then we can bitcast. 2763 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2764 } 2765 2766 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2767 // zext(undef) upper bits are guaranteed to be zero. 2768 if (DemandedElts.isSubsetOf(KnownUndef)) 2769 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2770 KnownUndef.clearAllBits(); 2771 } 2772 break; 2773 } 2774 2775 // TODO: There are more binop opcodes that could be handled here - MIN, 2776 // MAX, saturated math, etc. 2777 case ISD::OR: 2778 case ISD::XOR: 2779 case ISD::ADD: 2780 case ISD::SUB: 2781 case ISD::FADD: 2782 case ISD::FSUB: 2783 case ISD::FMUL: 2784 case ISD::FDIV: 2785 case ISD::FREM: { 2786 SDValue Op0 = Op.getOperand(0); 2787 SDValue Op1 = Op.getOperand(1); 2788 2789 APInt UndefRHS, ZeroRHS; 2790 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2791 Depth + 1)) 2792 return true; 2793 APInt UndefLHS, ZeroLHS; 2794 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2795 Depth + 1)) 2796 return true; 2797 2798 KnownZero = ZeroLHS & ZeroRHS; 2799 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2800 2801 // Attempt to avoid multi-use ops if we don't need anything from them. 2802 // TODO - use KnownUndef to relax the demandedelts? 2803 if (!DemandedElts.isAllOnesValue()) 2804 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2805 return true; 2806 break; 2807 } 2808 case ISD::SHL: 2809 case ISD::SRL: 2810 case ISD::SRA: 2811 case ISD::ROTL: 2812 case ISD::ROTR: { 2813 SDValue Op0 = Op.getOperand(0); 2814 SDValue Op1 = Op.getOperand(1); 2815 2816 APInt UndefRHS, ZeroRHS; 2817 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2818 Depth + 1)) 2819 return true; 2820 APInt UndefLHS, ZeroLHS; 2821 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2822 Depth + 1)) 2823 return true; 2824 2825 KnownZero = ZeroLHS; 2826 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2827 2828 // Attempt to avoid multi-use ops if we don't need anything from them. 2829 // TODO - use KnownUndef to relax the demandedelts? 2830 if (!DemandedElts.isAllOnesValue()) 2831 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2832 return true; 2833 break; 2834 } 2835 case ISD::MUL: 2836 case ISD::AND: { 2837 SDValue Op0 = Op.getOperand(0); 2838 SDValue Op1 = Op.getOperand(1); 2839 2840 APInt SrcUndef, SrcZero; 2841 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 2842 Depth + 1)) 2843 return true; 2844 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 2845 TLO, Depth + 1)) 2846 return true; 2847 2848 // If either side has a zero element, then the result element is zero, even 2849 // if the other is an UNDEF. 2850 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2851 // and then handle 'and' nodes with the rest of the binop opcodes. 2852 KnownZero |= SrcZero; 2853 KnownUndef &= SrcUndef; 2854 KnownUndef &= ~KnownZero; 2855 2856 // Attempt to avoid multi-use ops if we don't need anything from them. 2857 // TODO - use KnownUndef to relax the demandedelts? 2858 if (!DemandedElts.isAllOnesValue()) 2859 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2860 return true; 2861 break; 2862 } 2863 case ISD::TRUNCATE: 2864 case ISD::SIGN_EXTEND: 2865 case ISD::ZERO_EXTEND: 2866 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2867 KnownZero, TLO, Depth + 1)) 2868 return true; 2869 2870 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2871 // zext(undef) upper bits are guaranteed to be zero. 2872 if (DemandedElts.isSubsetOf(KnownUndef)) 2873 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2874 KnownUndef.clearAllBits(); 2875 } 2876 break; 2877 default: { 2878 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2879 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 2880 KnownZero, TLO, Depth)) 2881 return true; 2882 } else { 2883 KnownBits Known; 2884 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits); 2885 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 2886 TLO, Depth, AssumeSingleUse)) 2887 return true; 2888 } 2889 break; 2890 } 2891 } 2892 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 2893 2894 // Constant fold all undef cases. 2895 // TODO: Handle zero cases as well. 2896 if (DemandedElts.isSubsetOf(KnownUndef)) 2897 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2898 2899 return false; 2900 } 2901 2902 /// Determine which of the bits specified in Mask are known to be either zero or 2903 /// one and return them in the Known. 2904 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 2905 KnownBits &Known, 2906 const APInt &DemandedElts, 2907 const SelectionDAG &DAG, 2908 unsigned Depth) const { 2909 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2910 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2911 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2912 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2913 "Should use MaskedValueIsZero if you don't know whether Op" 2914 " is a target node!"); 2915 Known.resetAll(); 2916 } 2917 2918 void TargetLowering::computeKnownBitsForTargetInstr( 2919 GISelKnownBits &Analysis, Register R, KnownBits &Known, 2920 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 2921 unsigned Depth) const { 2922 Known.resetAll(); 2923 } 2924 2925 void TargetLowering::computeKnownBitsForFrameIndex( 2926 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 2927 // The low bits are known zero if the pointer is aligned. 2928 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 2929 } 2930 2931 Align TargetLowering::computeKnownAlignForTargetInstr( 2932 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 2933 unsigned Depth) const { 2934 return Align(1); 2935 } 2936 2937 /// This method can be implemented by targets that want to expose additional 2938 /// information about sign bits to the DAG Combiner. 2939 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 2940 const APInt &, 2941 const SelectionDAG &, 2942 unsigned Depth) const { 2943 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2944 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2945 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2946 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2947 "Should use ComputeNumSignBits if you don't know whether Op" 2948 " is a target node!"); 2949 return 1; 2950 } 2951 2952 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 2953 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 2954 const MachineRegisterInfo &MRI, unsigned Depth) const { 2955 return 1; 2956 } 2957 2958 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 2959 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 2960 TargetLoweringOpt &TLO, unsigned Depth) const { 2961 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2962 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2963 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2964 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2965 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 2966 " is a target node!"); 2967 return false; 2968 } 2969 2970 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 2971 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2972 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 2973 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2974 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2975 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2976 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2977 "Should use SimplifyDemandedBits if you don't know whether Op" 2978 " is a target node!"); 2979 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 2980 return false; 2981 } 2982 2983 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 2984 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2985 SelectionDAG &DAG, unsigned Depth) const { 2986 assert( 2987 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2988 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2989 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2990 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2991 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 2992 " is a target node!"); 2993 return SDValue(); 2994 } 2995 2996 SDValue 2997 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 2998 SDValue N1, MutableArrayRef<int> Mask, 2999 SelectionDAG &DAG) const { 3000 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3001 if (!LegalMask) { 3002 std::swap(N0, N1); 3003 ShuffleVectorSDNode::commuteMask(Mask); 3004 LegalMask = isShuffleMaskLegal(Mask, VT); 3005 } 3006 3007 if (!LegalMask) 3008 return SDValue(); 3009 3010 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3011 } 3012 3013 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3014 return nullptr; 3015 } 3016 3017 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3018 const SelectionDAG &DAG, 3019 bool SNaN, 3020 unsigned Depth) const { 3021 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3022 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3023 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3024 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3025 "Should use isKnownNeverNaN if you don't know whether Op" 3026 " is a target node!"); 3027 return false; 3028 } 3029 3030 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3031 // work with truncating build vectors and vectors with elements of less than 3032 // 8 bits. 3033 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 3034 if (!N) 3035 return false; 3036 3037 APInt CVal; 3038 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 3039 CVal = CN->getAPIntValue(); 3040 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 3041 auto *CN = BV->getConstantSplatNode(); 3042 if (!CN) 3043 return false; 3044 3045 // If this is a truncating build vector, truncate the splat value. 3046 // Otherwise, we may fail to match the expected values below. 3047 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 3048 CVal = CN->getAPIntValue(); 3049 if (BVEltWidth < CVal.getBitWidth()) 3050 CVal = CVal.trunc(BVEltWidth); 3051 } else { 3052 return false; 3053 } 3054 3055 switch (getBooleanContents(N->getValueType(0))) { 3056 case UndefinedBooleanContent: 3057 return CVal[0]; 3058 case ZeroOrOneBooleanContent: 3059 return CVal.isOneValue(); 3060 case ZeroOrNegativeOneBooleanContent: 3061 return CVal.isAllOnesValue(); 3062 } 3063 3064 llvm_unreachable("Invalid boolean contents"); 3065 } 3066 3067 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 3068 if (!N) 3069 return false; 3070 3071 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3072 if (!CN) { 3073 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3074 if (!BV) 3075 return false; 3076 3077 // Only interested in constant splats, we don't care about undef 3078 // elements in identifying boolean constants and getConstantSplatNode 3079 // returns NULL if all ops are undef; 3080 CN = BV->getConstantSplatNode(); 3081 if (!CN) 3082 return false; 3083 } 3084 3085 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3086 return !CN->getAPIntValue()[0]; 3087 3088 return CN->isNullValue(); 3089 } 3090 3091 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3092 bool SExt) const { 3093 if (VT == MVT::i1) 3094 return N->isOne(); 3095 3096 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3097 switch (Cnt) { 3098 case TargetLowering::ZeroOrOneBooleanContent: 3099 // An extended value of 1 is always true, unless its original type is i1, 3100 // in which case it will be sign extended to -1. 3101 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3102 case TargetLowering::UndefinedBooleanContent: 3103 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3104 return N->isAllOnesValue() && SExt; 3105 } 3106 llvm_unreachable("Unexpected enumeration."); 3107 } 3108 3109 /// This helper function of SimplifySetCC tries to optimize the comparison when 3110 /// either operand of the SetCC node is a bitwise-and instruction. 3111 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3112 ISD::CondCode Cond, const SDLoc &DL, 3113 DAGCombinerInfo &DCI) const { 3114 // Match these patterns in any of their permutations: 3115 // (X & Y) == Y 3116 // (X & Y) != Y 3117 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3118 std::swap(N0, N1); 3119 3120 EVT OpVT = N0.getValueType(); 3121 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3122 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3123 return SDValue(); 3124 3125 SDValue X, Y; 3126 if (N0.getOperand(0) == N1) { 3127 X = N0.getOperand(1); 3128 Y = N0.getOperand(0); 3129 } else if (N0.getOperand(1) == N1) { 3130 X = N0.getOperand(0); 3131 Y = N0.getOperand(1); 3132 } else { 3133 return SDValue(); 3134 } 3135 3136 SelectionDAG &DAG = DCI.DAG; 3137 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3138 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3139 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3140 // Note that where Y is variable and is known to have at most one bit set 3141 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3142 // equivalent when Y == 0. 3143 assert(OpVT.isInteger()); 3144 Cond = ISD::getSetCCInverse(Cond, OpVT); 3145 if (DCI.isBeforeLegalizeOps() || 3146 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3147 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3148 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3149 // If the target supports an 'and-not' or 'and-complement' logic operation, 3150 // try to use that to make a comparison operation more efficient. 3151 // But don't do this transform if the mask is a single bit because there are 3152 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3153 // 'rlwinm' on PPC). 3154 3155 // Bail out if the compare operand that we want to turn into a zero is 3156 // already a zero (otherwise, infinite loop). 3157 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3158 if (YConst && YConst->isNullValue()) 3159 return SDValue(); 3160 3161 // Transform this into: ~X & Y == 0. 3162 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3163 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3164 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3165 } 3166 3167 return SDValue(); 3168 } 3169 3170 /// There are multiple IR patterns that could be checking whether certain 3171 /// truncation of a signed number would be lossy or not. The pattern which is 3172 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3173 /// We are looking for the following pattern: (KeptBits is a constant) 3174 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3175 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3176 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3177 /// We will unfold it into the natural trunc+sext pattern: 3178 /// ((%x << C) a>> C) dstcond %x 3179 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3180 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3181 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3182 const SDLoc &DL) const { 3183 // We must be comparing with a constant. 3184 ConstantSDNode *C1; 3185 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3186 return SDValue(); 3187 3188 // N0 should be: add %x, (1 << (KeptBits-1)) 3189 if (N0->getOpcode() != ISD::ADD) 3190 return SDValue(); 3191 3192 // And we must be 'add'ing a constant. 3193 ConstantSDNode *C01; 3194 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3195 return SDValue(); 3196 3197 SDValue X = N0->getOperand(0); 3198 EVT XVT = X.getValueType(); 3199 3200 // Validate constants ... 3201 3202 APInt I1 = C1->getAPIntValue(); 3203 3204 ISD::CondCode NewCond; 3205 if (Cond == ISD::CondCode::SETULT) { 3206 NewCond = ISD::CondCode::SETEQ; 3207 } else if (Cond == ISD::CondCode::SETULE) { 3208 NewCond = ISD::CondCode::SETEQ; 3209 // But need to 'canonicalize' the constant. 3210 I1 += 1; 3211 } else if (Cond == ISD::CondCode::SETUGT) { 3212 NewCond = ISD::CondCode::SETNE; 3213 // But need to 'canonicalize' the constant. 3214 I1 += 1; 3215 } else if (Cond == ISD::CondCode::SETUGE) { 3216 NewCond = ISD::CondCode::SETNE; 3217 } else 3218 return SDValue(); 3219 3220 APInt I01 = C01->getAPIntValue(); 3221 3222 auto checkConstants = [&I1, &I01]() -> bool { 3223 // Both of them must be power-of-two, and the constant from setcc is bigger. 3224 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3225 }; 3226 3227 if (checkConstants()) { 3228 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3229 } else { 3230 // What if we invert constants? (and the target predicate) 3231 I1.negate(); 3232 I01.negate(); 3233 assert(XVT.isInteger()); 3234 NewCond = getSetCCInverse(NewCond, XVT); 3235 if (!checkConstants()) 3236 return SDValue(); 3237 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3238 } 3239 3240 // They are power-of-two, so which bit is set? 3241 const unsigned KeptBits = I1.logBase2(); 3242 const unsigned KeptBitsMinusOne = I01.logBase2(); 3243 3244 // Magic! 3245 if (KeptBits != (KeptBitsMinusOne + 1)) 3246 return SDValue(); 3247 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3248 3249 // We don't want to do this in every single case. 3250 SelectionDAG &DAG = DCI.DAG; 3251 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3252 XVT, KeptBits)) 3253 return SDValue(); 3254 3255 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3256 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3257 3258 // Unfold into: ((%x << C) a>> C) cond %x 3259 // Where 'cond' will be either 'eq' or 'ne'. 3260 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3261 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3262 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3263 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3264 3265 return T2; 3266 } 3267 3268 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3269 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3270 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3271 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3272 assert(isConstOrConstSplat(N1C) && 3273 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && 3274 "Should be a comparison with 0."); 3275 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3276 "Valid only for [in]equality comparisons."); 3277 3278 unsigned NewShiftOpcode; 3279 SDValue X, C, Y; 3280 3281 SelectionDAG &DAG = DCI.DAG; 3282 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3283 3284 // Look for '(C l>>/<< Y)'. 3285 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3286 // The shift should be one-use. 3287 if (!V.hasOneUse()) 3288 return false; 3289 unsigned OldShiftOpcode = V.getOpcode(); 3290 switch (OldShiftOpcode) { 3291 case ISD::SHL: 3292 NewShiftOpcode = ISD::SRL; 3293 break; 3294 case ISD::SRL: 3295 NewShiftOpcode = ISD::SHL; 3296 break; 3297 default: 3298 return false; // must be a logical shift. 3299 } 3300 // We should be shifting a constant. 3301 // FIXME: best to use isConstantOrConstantVector(). 3302 C = V.getOperand(0); 3303 ConstantSDNode *CC = 3304 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3305 if (!CC) 3306 return false; 3307 Y = V.getOperand(1); 3308 3309 ConstantSDNode *XC = 3310 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3311 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3312 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3313 }; 3314 3315 // LHS of comparison should be an one-use 'and'. 3316 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3317 return SDValue(); 3318 3319 X = N0.getOperand(0); 3320 SDValue Mask = N0.getOperand(1); 3321 3322 // 'and' is commutative! 3323 if (!Match(Mask)) { 3324 std::swap(X, Mask); 3325 if (!Match(Mask)) 3326 return SDValue(); 3327 } 3328 3329 EVT VT = X.getValueType(); 3330 3331 // Produce: 3332 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3333 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3334 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3335 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3336 return T2; 3337 } 3338 3339 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3340 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3341 /// handle the commuted versions of these patterns. 3342 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3343 ISD::CondCode Cond, const SDLoc &DL, 3344 DAGCombinerInfo &DCI) const { 3345 unsigned BOpcode = N0.getOpcode(); 3346 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3347 "Unexpected binop"); 3348 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3349 3350 // (X + Y) == X --> Y == 0 3351 // (X - Y) == X --> Y == 0 3352 // (X ^ Y) == X --> Y == 0 3353 SelectionDAG &DAG = DCI.DAG; 3354 EVT OpVT = N0.getValueType(); 3355 SDValue X = N0.getOperand(0); 3356 SDValue Y = N0.getOperand(1); 3357 if (X == N1) 3358 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3359 3360 if (Y != N1) 3361 return SDValue(); 3362 3363 // (X + Y) == Y --> X == 0 3364 // (X ^ Y) == Y --> X == 0 3365 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3366 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3367 3368 // The shift would not be valid if the operands are boolean (i1). 3369 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3370 return SDValue(); 3371 3372 // (X - Y) == Y --> X == Y << 1 3373 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3374 !DCI.isBeforeLegalize()); 3375 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3376 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3377 if (!DCI.isCalledByLegalizer()) 3378 DCI.AddToWorklist(YShl1.getNode()); 3379 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3380 } 3381 3382 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3383 SDValue N0, const APInt &C1, 3384 ISD::CondCode Cond, const SDLoc &dl, 3385 SelectionDAG &DAG) { 3386 // Look through truncs that don't change the value of a ctpop. 3387 // FIXME: Add vector support? Need to be careful with setcc result type below. 3388 SDValue CTPOP = N0; 3389 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3390 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3391 CTPOP = N0.getOperand(0); 3392 3393 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3394 return SDValue(); 3395 3396 EVT CTVT = CTPOP.getValueType(); 3397 SDValue CTOp = CTPOP.getOperand(0); 3398 3399 // (ctpop x) u< 2 -> (x & x-1) == 0 3400 // (ctpop x) u> 1 -> (x & x-1) != 0 3401 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)) { 3402 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3403 // This based on X86's custom lowering for vector CTPOP which produces more 3404 // instructions than the expansion here. 3405 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3406 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3407 return SDValue(); 3408 3409 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3410 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3411 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3412 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3413 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC); 3414 } 3415 3416 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3417 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3418 // For scalars, keep CTPOP if it is legal or custom. 3419 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 3420 return SDValue(); 3421 // For vectors, keep CTPOP only if it is legal. 3422 // This is based on X86's custom lowering for CTPOP which produces more 3423 // instructions than the expansion here. 3424 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3425 return SDValue(); 3426 3427 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3428 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3429 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3430 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3431 assert(CTVT.isInteger()); 3432 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3433 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3434 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3435 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3436 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3437 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3438 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3439 } 3440 3441 return SDValue(); 3442 } 3443 3444 /// Try to simplify a setcc built with the specified operands and cc. If it is 3445 /// unable to simplify it, return a null SDValue. 3446 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3447 ISD::CondCode Cond, bool foldBooleans, 3448 DAGCombinerInfo &DCI, 3449 const SDLoc &dl) const { 3450 SelectionDAG &DAG = DCI.DAG; 3451 const DataLayout &Layout = DAG.getDataLayout(); 3452 EVT OpVT = N0.getValueType(); 3453 3454 // Constant fold or commute setcc. 3455 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3456 return Fold; 3457 3458 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3459 // TODO: Handle non-splat vector constants. All undef causes trouble. 3460 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3461 if (isConstOrConstSplat(N0) && 3462 (DCI.isBeforeLegalizeOps() || 3463 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3464 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3465 3466 // If we have a subtract with the same 2 non-constant operands as this setcc 3467 // -- but in reverse order -- then try to commute the operands of this setcc 3468 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3469 // instruction on some targets. 3470 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3471 (DCI.isBeforeLegalizeOps() || 3472 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3473 DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) && 3474 !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } )) 3475 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3476 3477 if (auto *N1C = isConstOrConstSplat(N1)) { 3478 const APInt &C1 = N1C->getAPIntValue(); 3479 3480 // Optimize some CTPOP cases. 3481 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 3482 return V; 3483 } 3484 3485 // FIXME: Support vectors. 3486 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3487 const APInt &C1 = N1C->getAPIntValue(); 3488 3489 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3490 // equality comparison, then we're just comparing whether X itself is 3491 // zero. 3492 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && 3493 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3494 N0.getOperand(1).getOpcode() == ISD::Constant) { 3495 const APInt &ShAmt = N0.getConstantOperandAPInt(1); 3496 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3497 ShAmt == Log2_32(N0.getValueSizeInBits())) { 3498 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3499 // (srl (ctlz x), 5) == 0 -> X != 0 3500 // (srl (ctlz x), 5) != 1 -> X != 0 3501 Cond = ISD::SETNE; 3502 } else { 3503 // (srl (ctlz x), 5) != 0 -> X == 0 3504 // (srl (ctlz x), 5) == 1 -> X == 0 3505 Cond = ISD::SETEQ; 3506 } 3507 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3508 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 3509 Zero, Cond); 3510 } 3511 } 3512 3513 // (zext x) == C --> x == (trunc C) 3514 // (sext x) == C --> x == (trunc C) 3515 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3516 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3517 unsigned MinBits = N0.getValueSizeInBits(); 3518 SDValue PreExt; 3519 bool Signed = false; 3520 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3521 // ZExt 3522 MinBits = N0->getOperand(0).getValueSizeInBits(); 3523 PreExt = N0->getOperand(0); 3524 } else if (N0->getOpcode() == ISD::AND) { 3525 // DAGCombine turns costly ZExts into ANDs 3526 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3527 if ((C->getAPIntValue()+1).isPowerOf2()) { 3528 MinBits = C->getAPIntValue().countTrailingOnes(); 3529 PreExt = N0->getOperand(0); 3530 } 3531 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3532 // SExt 3533 MinBits = N0->getOperand(0).getValueSizeInBits(); 3534 PreExt = N0->getOperand(0); 3535 Signed = true; 3536 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3537 // ZEXTLOAD / SEXTLOAD 3538 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3539 MinBits = LN0->getMemoryVT().getSizeInBits(); 3540 PreExt = N0; 3541 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3542 Signed = true; 3543 MinBits = LN0->getMemoryVT().getSizeInBits(); 3544 PreExt = N0; 3545 } 3546 } 3547 3548 // Figure out how many bits we need to preserve this constant. 3549 unsigned ReqdBits = Signed ? 3550 C1.getBitWidth() - C1.getNumSignBits() + 1 : 3551 C1.getActiveBits(); 3552 3553 // Make sure we're not losing bits from the constant. 3554 if (MinBits > 0 && 3555 MinBits < C1.getBitWidth() && 3556 MinBits >= ReqdBits) { 3557 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3558 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3559 // Will get folded away. 3560 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3561 if (MinBits == 1 && C1 == 1) 3562 // Invert the condition. 3563 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3564 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3565 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3566 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3567 } 3568 3569 // If truncating the setcc operands is not desirable, we can still 3570 // simplify the expression in some cases: 3571 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3572 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3573 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3574 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3575 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3576 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3577 SDValue TopSetCC = N0->getOperand(0); 3578 unsigned N0Opc = N0->getOpcode(); 3579 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3580 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3581 TopSetCC.getOpcode() == ISD::SETCC && 3582 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3583 (isConstFalseVal(N1C) || 3584 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3585 3586 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) || 3587 (!N1C->isNullValue() && Cond == ISD::SETNE); 3588 3589 if (!Inverse) 3590 return TopSetCC; 3591 3592 ISD::CondCode InvCond = ISD::getSetCCInverse( 3593 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3594 TopSetCC.getOperand(0).getValueType()); 3595 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3596 TopSetCC.getOperand(1), 3597 InvCond); 3598 } 3599 } 3600 } 3601 3602 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3603 // equality or unsigned, and all 1 bits of the const are in the same 3604 // partial word, see if we can shorten the load. 3605 if (DCI.isBeforeLegalize() && 3606 !ISD::isSignedIntSetCC(Cond) && 3607 N0.getOpcode() == ISD::AND && C1 == 0 && 3608 N0.getNode()->hasOneUse() && 3609 isa<LoadSDNode>(N0.getOperand(0)) && 3610 N0.getOperand(0).getNode()->hasOneUse() && 3611 isa<ConstantSDNode>(N0.getOperand(1))) { 3612 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3613 APInt bestMask; 3614 unsigned bestWidth = 0, bestOffset = 0; 3615 if (Lod->isSimple() && Lod->isUnindexed()) { 3616 unsigned origWidth = N0.getValueSizeInBits(); 3617 unsigned maskWidth = origWidth; 3618 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3619 // 8 bits, but have to be careful... 3620 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3621 origWidth = Lod->getMemoryVT().getSizeInBits(); 3622 const APInt &Mask = N0.getConstantOperandAPInt(1); 3623 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3624 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3625 for (unsigned offset=0; offset<origWidth/width; offset++) { 3626 if (Mask.isSubsetOf(newMask)) { 3627 if (Layout.isLittleEndian()) 3628 bestOffset = (uint64_t)offset * (width/8); 3629 else 3630 bestOffset = (origWidth/width - offset - 1) * (width/8); 3631 bestMask = Mask.lshr(offset * (width/8) * 8); 3632 bestWidth = width; 3633 break; 3634 } 3635 newMask <<= width; 3636 } 3637 } 3638 } 3639 if (bestWidth) { 3640 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3641 if (newVT.isRound() && 3642 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3643 SDValue Ptr = Lod->getBasePtr(); 3644 if (bestOffset != 0) 3645 Ptr = 3646 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 3647 SDValue NewLoad = 3648 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 3649 Lod->getPointerInfo().getWithOffset(bestOffset), 3650 Lod->getOriginalAlign()); 3651 return DAG.getSetCC(dl, VT, 3652 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3653 DAG.getConstant(bestMask.trunc(bestWidth), 3654 dl, newVT)), 3655 DAG.getConstant(0LL, dl, newVT), Cond); 3656 } 3657 } 3658 } 3659 3660 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3661 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3662 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3663 3664 // If the comparison constant has bits in the upper part, the 3665 // zero-extended value could never match. 3666 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3667 C1.getBitWidth() - InSize))) { 3668 switch (Cond) { 3669 case ISD::SETUGT: 3670 case ISD::SETUGE: 3671 case ISD::SETEQ: 3672 return DAG.getConstant(0, dl, VT); 3673 case ISD::SETULT: 3674 case ISD::SETULE: 3675 case ISD::SETNE: 3676 return DAG.getConstant(1, dl, VT); 3677 case ISD::SETGT: 3678 case ISD::SETGE: 3679 // True if the sign bit of C1 is set. 3680 return DAG.getConstant(C1.isNegative(), dl, VT); 3681 case ISD::SETLT: 3682 case ISD::SETLE: 3683 // True if the sign bit of C1 isn't set. 3684 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3685 default: 3686 break; 3687 } 3688 } 3689 3690 // Otherwise, we can perform the comparison with the low bits. 3691 switch (Cond) { 3692 case ISD::SETEQ: 3693 case ISD::SETNE: 3694 case ISD::SETUGT: 3695 case ISD::SETUGE: 3696 case ISD::SETULT: 3697 case ISD::SETULE: { 3698 EVT newVT = N0.getOperand(0).getValueType(); 3699 if (DCI.isBeforeLegalizeOps() || 3700 (isOperationLegal(ISD::SETCC, newVT) && 3701 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3702 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3703 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3704 3705 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3706 NewConst, Cond); 3707 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3708 } 3709 break; 3710 } 3711 default: 3712 break; // todo, be more careful with signed comparisons 3713 } 3714 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3715 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3716 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3717 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3718 EVT ExtDstTy = N0.getValueType(); 3719 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3720 3721 // If the constant doesn't fit into the number of bits for the source of 3722 // the sign extension, it is impossible for both sides to be equal. 3723 if (C1.getMinSignedBits() > ExtSrcTyBits) 3724 return DAG.getConstant(Cond == ISD::SETNE, dl, VT); 3725 3726 SDValue ZextOp; 3727 EVT Op0Ty = N0.getOperand(0).getValueType(); 3728 if (Op0Ty == ExtSrcTy) { 3729 ZextOp = N0.getOperand(0); 3730 } else { 3731 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3732 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 3733 DAG.getConstant(Imm, dl, Op0Ty)); 3734 } 3735 if (!DCI.isCalledByLegalizer()) 3736 DCI.AddToWorklist(ZextOp.getNode()); 3737 // Otherwise, make this a use of a zext. 3738 return DAG.getSetCC(dl, VT, ZextOp, 3739 DAG.getConstant(C1 & APInt::getLowBitsSet( 3740 ExtDstTyBits, 3741 ExtSrcTyBits), 3742 dl, ExtDstTy), 3743 Cond); 3744 } else if ((N1C->isNullValue() || N1C->isOne()) && 3745 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3746 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3747 if (N0.getOpcode() == ISD::SETCC && 3748 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3749 (N0.getValueType() == MVT::i1 || 3750 getBooleanContents(N0.getOperand(0).getValueType()) == 3751 ZeroOrOneBooleanContent)) { 3752 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3753 if (TrueWhenTrue) 3754 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3755 // Invert the condition. 3756 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3757 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3758 if (DCI.isBeforeLegalizeOps() || 3759 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3760 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3761 } 3762 3763 if ((N0.getOpcode() == ISD::XOR || 3764 (N0.getOpcode() == ISD::AND && 3765 N0.getOperand(0).getOpcode() == ISD::XOR && 3766 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3767 isa<ConstantSDNode>(N0.getOperand(1)) && 3768 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) { 3769 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3770 // can only do this if the top bits are known zero. 3771 unsigned BitWidth = N0.getValueSizeInBits(); 3772 if (DAG.MaskedValueIsZero(N0, 3773 APInt::getHighBitsSet(BitWidth, 3774 BitWidth-1))) { 3775 // Okay, get the un-inverted input value. 3776 SDValue Val; 3777 if (N0.getOpcode() == ISD::XOR) { 3778 Val = N0.getOperand(0); 3779 } else { 3780 assert(N0.getOpcode() == ISD::AND && 3781 N0.getOperand(0).getOpcode() == ISD::XOR); 3782 // ((X^1)&1)^1 -> X & 1 3783 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3784 N0.getOperand(0).getOperand(0), 3785 N0.getOperand(1)); 3786 } 3787 3788 return DAG.getSetCC(dl, VT, Val, N1, 3789 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3790 } 3791 } else if (N1C->isOne()) { 3792 SDValue Op0 = N0; 3793 if (Op0.getOpcode() == ISD::TRUNCATE) 3794 Op0 = Op0.getOperand(0); 3795 3796 if ((Op0.getOpcode() == ISD::XOR) && 3797 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3798 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3799 SDValue XorLHS = Op0.getOperand(0); 3800 SDValue XorRHS = Op0.getOperand(1); 3801 // Ensure that the input setccs return an i1 type or 0/1 value. 3802 if (Op0.getValueType() == MVT::i1 || 3803 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3804 ZeroOrOneBooleanContent && 3805 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3806 ZeroOrOneBooleanContent)) { 3807 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3808 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3809 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3810 } 3811 } 3812 if (Op0.getOpcode() == ISD::AND && 3813 isa<ConstantSDNode>(Op0.getOperand(1)) && 3814 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) { 3815 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3816 if (Op0.getValueType().bitsGT(VT)) 3817 Op0 = DAG.getNode(ISD::AND, dl, VT, 3818 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3819 DAG.getConstant(1, dl, VT)); 3820 else if (Op0.getValueType().bitsLT(VT)) 3821 Op0 = DAG.getNode(ISD::AND, dl, VT, 3822 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3823 DAG.getConstant(1, dl, VT)); 3824 3825 return DAG.getSetCC(dl, VT, Op0, 3826 DAG.getConstant(0, dl, Op0.getValueType()), 3827 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3828 } 3829 if (Op0.getOpcode() == ISD::AssertZext && 3830 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3831 return DAG.getSetCC(dl, VT, Op0, 3832 DAG.getConstant(0, dl, Op0.getValueType()), 3833 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3834 } 3835 } 3836 3837 // Given: 3838 // icmp eq/ne (urem %x, %y), 0 3839 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3840 // icmp eq/ne %x, 0 3841 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() && 3842 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3843 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3844 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3845 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3846 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3847 } 3848 3849 if (SDValue V = 3850 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 3851 return V; 3852 } 3853 3854 // These simplifications apply to splat vectors as well. 3855 // TODO: Handle more splat vector cases. 3856 if (auto *N1C = isConstOrConstSplat(N1)) { 3857 const APInt &C1 = N1C->getAPIntValue(); 3858 3859 APInt MinVal, MaxVal; 3860 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 3861 if (ISD::isSignedIntSetCC(Cond)) { 3862 MinVal = APInt::getSignedMinValue(OperandBitSize); 3863 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 3864 } else { 3865 MinVal = APInt::getMinValue(OperandBitSize); 3866 MaxVal = APInt::getMaxValue(OperandBitSize); 3867 } 3868 3869 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 3870 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 3871 // X >= MIN --> true 3872 if (C1 == MinVal) 3873 return DAG.getBoolConstant(true, dl, VT, OpVT); 3874 3875 if (!VT.isVector()) { // TODO: Support this for vectors. 3876 // X >= C0 --> X > (C0 - 1) 3877 APInt C = C1 - 1; 3878 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 3879 if ((DCI.isBeforeLegalizeOps() || 3880 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3881 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3882 isLegalICmpImmediate(C.getSExtValue())))) { 3883 return DAG.getSetCC(dl, VT, N0, 3884 DAG.getConstant(C, dl, N1.getValueType()), 3885 NewCC); 3886 } 3887 } 3888 } 3889 3890 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 3891 // X <= MAX --> true 3892 if (C1 == MaxVal) 3893 return DAG.getBoolConstant(true, dl, VT, OpVT); 3894 3895 // X <= C0 --> X < (C0 + 1) 3896 if (!VT.isVector()) { // TODO: Support this for vectors. 3897 APInt C = C1 + 1; 3898 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 3899 if ((DCI.isBeforeLegalizeOps() || 3900 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3901 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3902 isLegalICmpImmediate(C.getSExtValue())))) { 3903 return DAG.getSetCC(dl, VT, N0, 3904 DAG.getConstant(C, dl, N1.getValueType()), 3905 NewCC); 3906 } 3907 } 3908 } 3909 3910 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 3911 if (C1 == MinVal) 3912 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 3913 3914 // TODO: Support this for vectors after legalize ops. 3915 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3916 // Canonicalize setlt X, Max --> setne X, Max 3917 if (C1 == MaxVal) 3918 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3919 3920 // If we have setult X, 1, turn it into seteq X, 0 3921 if (C1 == MinVal+1) 3922 return DAG.getSetCC(dl, VT, N0, 3923 DAG.getConstant(MinVal, dl, N0.getValueType()), 3924 ISD::SETEQ); 3925 } 3926 } 3927 3928 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 3929 if (C1 == MaxVal) 3930 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 3931 3932 // TODO: Support this for vectors after legalize ops. 3933 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3934 // Canonicalize setgt X, Min --> setne X, Min 3935 if (C1 == MinVal) 3936 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3937 3938 // If we have setugt X, Max-1, turn it into seteq X, Max 3939 if (C1 == MaxVal-1) 3940 return DAG.getSetCC(dl, VT, N0, 3941 DAG.getConstant(MaxVal, dl, N0.getValueType()), 3942 ISD::SETEQ); 3943 } 3944 } 3945 3946 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 3947 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3948 if (C1.isNullValue()) 3949 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 3950 VT, N0, N1, Cond, DCI, dl)) 3951 return CC; 3952 } 3953 3954 // If we have "setcc X, C0", check to see if we can shrink the immediate 3955 // by changing cc. 3956 // TODO: Support this for vectors after legalize ops. 3957 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3958 // SETUGT X, SINTMAX -> SETLT X, 0 3959 // SETUGE X, SINTMIN -> SETLT X, 0 3960 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 3961 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 3962 return DAG.getSetCC(dl, VT, N0, 3963 DAG.getConstant(0, dl, N1.getValueType()), 3964 ISD::SETLT); 3965 3966 // SETULT X, SINTMIN -> SETGT X, -1 3967 // SETULE X, SINTMAX -> SETGT X, -1 3968 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 3969 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 3970 return DAG.getSetCC(dl, VT, N0, 3971 DAG.getAllOnesConstant(dl, N1.getValueType()), 3972 ISD::SETGT); 3973 } 3974 } 3975 3976 // Back to non-vector simplifications. 3977 // TODO: Can we do these for vector splats? 3978 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3979 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3980 const APInt &C1 = N1C->getAPIntValue(); 3981 EVT ShValTy = N0.getValueType(); 3982 3983 // Fold bit comparisons when we can. 3984 if (getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent && 3985 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3986 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 3987 N0.getOpcode() == ISD::AND) { 3988 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3989 EVT ShiftTy = 3990 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3991 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 3992 // Perform the xform if the AND RHS is a single bit. 3993 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 3994 if (AndRHS->getAPIntValue().isPowerOf2() && 3995 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3996 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3997 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3998 DAG.getConstant(ShCt, dl, ShiftTy))); 3999 } 4000 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4001 // (X & 8) == 8 --> (X & 8) >> 3 4002 // Perform the xform if C1 is a single bit. 4003 unsigned ShCt = C1.logBase2(); 4004 if (C1.isPowerOf2() && 4005 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4006 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4007 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4008 DAG.getConstant(ShCt, dl, ShiftTy))); 4009 } 4010 } 4011 } 4012 } 4013 4014 if (C1.getMinSignedBits() <= 64 && 4015 !isLegalICmpImmediate(C1.getSExtValue())) { 4016 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4017 // (X & -256) == 256 -> (X >> 8) == 1 4018 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4019 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4020 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4021 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4022 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) { 4023 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4024 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4025 SDValue Shift = 4026 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4027 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4028 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4029 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4030 } 4031 } 4032 } 4033 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4034 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4035 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4036 // X < 0x100000000 -> (X >> 32) < 1 4037 // X >= 0x100000000 -> (X >> 32) >= 1 4038 // X <= 0x0ffffffff -> (X >> 32) < 1 4039 // X > 0x0ffffffff -> (X >> 32) >= 1 4040 unsigned ShiftBits; 4041 APInt NewC = C1; 4042 ISD::CondCode NewCond = Cond; 4043 if (AdjOne) { 4044 ShiftBits = C1.countTrailingOnes(); 4045 NewC = NewC + 1; 4046 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4047 } else { 4048 ShiftBits = C1.countTrailingZeros(); 4049 } 4050 NewC.lshrInPlace(ShiftBits); 4051 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4052 isLegalICmpImmediate(NewC.getSExtValue()) && 4053 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4054 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4055 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4056 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4057 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4058 } 4059 } 4060 } 4061 } 4062 4063 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4064 auto *CFP = cast<ConstantFPSDNode>(N1); 4065 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4066 4067 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4068 // constant if knowing that the operand is non-nan is enough. We prefer to 4069 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4070 // materialize 0.0. 4071 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4072 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4073 4074 // setcc (fneg x), C -> setcc swap(pred) x, -C 4075 if (N0.getOpcode() == ISD::FNEG) { 4076 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4077 if (DCI.isBeforeLegalizeOps() || 4078 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4079 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4080 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4081 } 4082 } 4083 4084 // If the condition is not legal, see if we can find an equivalent one 4085 // which is legal. 4086 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4087 // If the comparison was an awkward floating-point == or != and one of 4088 // the comparison operands is infinity or negative infinity, convert the 4089 // condition to a less-awkward <= or >=. 4090 if (CFP->getValueAPF().isInfinity()) { 4091 bool IsNegInf = CFP->getValueAPF().isNegative(); 4092 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4093 switch (Cond) { 4094 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4095 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4096 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4097 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4098 default: break; 4099 } 4100 if (NewCond != ISD::SETCC_INVALID && 4101 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4102 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4103 } 4104 } 4105 } 4106 4107 if (N0 == N1) { 4108 // The sext(setcc()) => setcc() optimization relies on the appropriate 4109 // constant being emitted. 4110 assert(!N0.getValueType().isInteger() && 4111 "Integer types should be handled by FoldSetCC"); 4112 4113 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4114 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4115 if (UOF == 2) // FP operators that are undefined on NaNs. 4116 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4117 if (UOF == unsigned(EqTrue)) 4118 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4119 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4120 // if it is not already. 4121 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4122 if (NewCond != Cond && 4123 (DCI.isBeforeLegalizeOps() || 4124 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4125 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4126 } 4127 4128 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4129 N0.getValueType().isInteger()) { 4130 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4131 N0.getOpcode() == ISD::XOR) { 4132 // Simplify (X+Y) == (X+Z) --> Y == Z 4133 if (N0.getOpcode() == N1.getOpcode()) { 4134 if (N0.getOperand(0) == N1.getOperand(0)) 4135 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4136 if (N0.getOperand(1) == N1.getOperand(1)) 4137 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4138 if (isCommutativeBinOp(N0.getOpcode())) { 4139 // If X op Y == Y op X, try other combinations. 4140 if (N0.getOperand(0) == N1.getOperand(1)) 4141 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4142 Cond); 4143 if (N0.getOperand(1) == N1.getOperand(0)) 4144 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4145 Cond); 4146 } 4147 } 4148 4149 // If RHS is a legal immediate value for a compare instruction, we need 4150 // to be careful about increasing register pressure needlessly. 4151 bool LegalRHSImm = false; 4152 4153 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4154 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4155 // Turn (X+C1) == C2 --> X == C2-C1 4156 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 4157 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4158 DAG.getConstant(RHSC->getAPIntValue()- 4159 LHSR->getAPIntValue(), 4160 dl, N0.getValueType()), Cond); 4161 } 4162 4163 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 4164 if (N0.getOpcode() == ISD::XOR) 4165 // If we know that all of the inverted bits are zero, don't bother 4166 // performing the inversion. 4167 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 4168 return 4169 DAG.getSetCC(dl, VT, N0.getOperand(0), 4170 DAG.getConstant(LHSR->getAPIntValue() ^ 4171 RHSC->getAPIntValue(), 4172 dl, N0.getValueType()), 4173 Cond); 4174 } 4175 4176 // Turn (C1-X) == C2 --> X == C1-C2 4177 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 4178 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 4179 return 4180 DAG.getSetCC(dl, VT, N0.getOperand(1), 4181 DAG.getConstant(SUBC->getAPIntValue() - 4182 RHSC->getAPIntValue(), 4183 dl, N0.getValueType()), 4184 Cond); 4185 } 4186 } 4187 4188 // Could RHSC fold directly into a compare? 4189 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4190 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4191 } 4192 4193 // (X+Y) == X --> Y == 0 and similar folds. 4194 // Don't do this if X is an immediate that can fold into a cmp 4195 // instruction and X+Y has other uses. It could be an induction variable 4196 // chain, and the transform would increase register pressure. 4197 if (!LegalRHSImm || N0.hasOneUse()) 4198 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4199 return V; 4200 } 4201 4202 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4203 N1.getOpcode() == ISD::XOR) 4204 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4205 return V; 4206 4207 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4208 return V; 4209 } 4210 4211 // Fold remainder of division by a constant. 4212 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4213 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4214 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4215 4216 // When division is cheap or optimizing for minimum size, 4217 // fall through to DIVREM creation by skipping this fold. 4218 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) { 4219 if (N0.getOpcode() == ISD::UREM) { 4220 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4221 return Folded; 4222 } else if (N0.getOpcode() == ISD::SREM) { 4223 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4224 return Folded; 4225 } 4226 } 4227 } 4228 4229 // Fold away ALL boolean setcc's. 4230 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4231 SDValue Temp; 4232 switch (Cond) { 4233 default: llvm_unreachable("Unknown integer setcc!"); 4234 case ISD::SETEQ: // X == Y -> ~(X^Y) 4235 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4236 N0 = DAG.getNOT(dl, Temp, OpVT); 4237 if (!DCI.isCalledByLegalizer()) 4238 DCI.AddToWorklist(Temp.getNode()); 4239 break; 4240 case ISD::SETNE: // X != Y --> (X^Y) 4241 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4242 break; 4243 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4244 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4245 Temp = DAG.getNOT(dl, N0, OpVT); 4246 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4247 if (!DCI.isCalledByLegalizer()) 4248 DCI.AddToWorklist(Temp.getNode()); 4249 break; 4250 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4251 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4252 Temp = DAG.getNOT(dl, N1, OpVT); 4253 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4254 if (!DCI.isCalledByLegalizer()) 4255 DCI.AddToWorklist(Temp.getNode()); 4256 break; 4257 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4258 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4259 Temp = DAG.getNOT(dl, N0, OpVT); 4260 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4261 if (!DCI.isCalledByLegalizer()) 4262 DCI.AddToWorklist(Temp.getNode()); 4263 break; 4264 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4265 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4266 Temp = DAG.getNOT(dl, N1, OpVT); 4267 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4268 break; 4269 } 4270 if (VT.getScalarType() != MVT::i1) { 4271 if (!DCI.isCalledByLegalizer()) 4272 DCI.AddToWorklist(N0.getNode()); 4273 // FIXME: If running after legalize, we probably can't do this. 4274 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4275 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4276 } 4277 return N0; 4278 } 4279 4280 // Could not fold it. 4281 return SDValue(); 4282 } 4283 4284 /// Returns true (and the GlobalValue and the offset) if the node is a 4285 /// GlobalAddress + offset. 4286 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4287 int64_t &Offset) const { 4288 4289 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4290 4291 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4292 GA = GASD->getGlobal(); 4293 Offset += GASD->getOffset(); 4294 return true; 4295 } 4296 4297 if (N->getOpcode() == ISD::ADD) { 4298 SDValue N1 = N->getOperand(0); 4299 SDValue N2 = N->getOperand(1); 4300 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4301 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4302 Offset += V->getSExtValue(); 4303 return true; 4304 } 4305 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4306 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4307 Offset += V->getSExtValue(); 4308 return true; 4309 } 4310 } 4311 } 4312 4313 return false; 4314 } 4315 4316 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4317 DAGCombinerInfo &DCI) const { 4318 // Default implementation: no optimization. 4319 return SDValue(); 4320 } 4321 4322 //===----------------------------------------------------------------------===// 4323 // Inline Assembler Implementation Methods 4324 //===----------------------------------------------------------------------===// 4325 4326 TargetLowering::ConstraintType 4327 TargetLowering::getConstraintType(StringRef Constraint) const { 4328 unsigned S = Constraint.size(); 4329 4330 if (S == 1) { 4331 switch (Constraint[0]) { 4332 default: break; 4333 case 'r': 4334 return C_RegisterClass; 4335 case 'm': // memory 4336 case 'o': // offsetable 4337 case 'V': // not offsetable 4338 return C_Memory; 4339 case 'n': // Simple Integer 4340 case 'E': // Floating Point Constant 4341 case 'F': // Floating Point Constant 4342 return C_Immediate; 4343 case 'i': // Simple Integer or Relocatable Constant 4344 case 's': // Relocatable Constant 4345 case 'p': // Address. 4346 case 'X': // Allow ANY value. 4347 case 'I': // Target registers. 4348 case 'J': 4349 case 'K': 4350 case 'L': 4351 case 'M': 4352 case 'N': 4353 case 'O': 4354 case 'P': 4355 case '<': 4356 case '>': 4357 return C_Other; 4358 } 4359 } 4360 4361 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4362 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4363 return C_Memory; 4364 return C_Register; 4365 } 4366 return C_Unknown; 4367 } 4368 4369 /// Try to replace an X constraint, which matches anything, with another that 4370 /// has more specific requirements based on the type of the corresponding 4371 /// operand. 4372 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4373 if (ConstraintVT.isInteger()) 4374 return "r"; 4375 if (ConstraintVT.isFloatingPoint()) 4376 return "f"; // works for many targets 4377 return nullptr; 4378 } 4379 4380 SDValue TargetLowering::LowerAsmOutputForConstraint( 4381 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 4382 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 4383 return SDValue(); 4384 } 4385 4386 /// Lower the specified operand into the Ops vector. 4387 /// If it is invalid, don't add anything to Ops. 4388 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4389 std::string &Constraint, 4390 std::vector<SDValue> &Ops, 4391 SelectionDAG &DAG) const { 4392 4393 if (Constraint.length() > 1) return; 4394 4395 char ConstraintLetter = Constraint[0]; 4396 switch (ConstraintLetter) { 4397 default: break; 4398 case 'X': // Allows any operand; labels (basic block) use this. 4399 if (Op.getOpcode() == ISD::BasicBlock || 4400 Op.getOpcode() == ISD::TargetBlockAddress) { 4401 Ops.push_back(Op); 4402 return; 4403 } 4404 LLVM_FALLTHROUGH; 4405 case 'i': // Simple Integer or Relocatable Constant 4406 case 'n': // Simple Integer 4407 case 's': { // Relocatable Constant 4408 4409 GlobalAddressSDNode *GA; 4410 ConstantSDNode *C; 4411 BlockAddressSDNode *BA; 4412 uint64_t Offset = 0; 4413 4414 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4415 // etc., since getelementpointer is variadic. We can't use 4416 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4417 // while in this case the GA may be furthest from the root node which is 4418 // likely an ISD::ADD. 4419 while (1) { 4420 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') { 4421 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4422 GA->getValueType(0), 4423 Offset + GA->getOffset())); 4424 return; 4425 } else if ((C = dyn_cast<ConstantSDNode>(Op)) && 4426 ConstraintLetter != 's') { 4427 // gcc prints these as sign extended. Sign extend value to 64 bits 4428 // now; without this it would get ZExt'd later in 4429 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4430 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4431 BooleanContent BCont = getBooleanContents(MVT::i64); 4432 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont) 4433 : ISD::SIGN_EXTEND; 4434 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() 4435 : C->getSExtValue(); 4436 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal, 4437 SDLoc(C), MVT::i64)); 4438 return; 4439 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && 4440 ConstraintLetter != 'n') { 4441 Ops.push_back(DAG.getTargetBlockAddress( 4442 BA->getBlockAddress(), BA->getValueType(0), 4443 Offset + BA->getOffset(), BA->getTargetFlags())); 4444 return; 4445 } else { 4446 const unsigned OpCode = Op.getOpcode(); 4447 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4448 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4449 Op = Op.getOperand(1); 4450 // Subtraction is not commutative. 4451 else if (OpCode == ISD::ADD && 4452 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4453 Op = Op.getOperand(0); 4454 else 4455 return; 4456 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4457 continue; 4458 } 4459 } 4460 return; 4461 } 4462 break; 4463 } 4464 } 4465 } 4466 4467 std::pair<unsigned, const TargetRegisterClass *> 4468 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4469 StringRef Constraint, 4470 MVT VT) const { 4471 if (Constraint.empty() || Constraint[0] != '{') 4472 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4473 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4474 4475 // Remove the braces from around the name. 4476 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4477 4478 std::pair<unsigned, const TargetRegisterClass *> R = 4479 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4480 4481 // Figure out which register class contains this reg. 4482 for (const TargetRegisterClass *RC : RI->regclasses()) { 4483 // If none of the value types for this register class are valid, we 4484 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4485 if (!isLegalRC(*RI, *RC)) 4486 continue; 4487 4488 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 4489 I != E; ++I) { 4490 if (RegName.equals_lower(RI->getRegAsmName(*I))) { 4491 std::pair<unsigned, const TargetRegisterClass *> S = 4492 std::make_pair(*I, RC); 4493 4494 // If this register class has the requested value type, return it, 4495 // otherwise keep searching and return the first class found 4496 // if no other is found which explicitly has the requested type. 4497 if (RI->isTypeLegalForClass(*RC, VT)) 4498 return S; 4499 if (!R.second) 4500 R = S; 4501 } 4502 } 4503 } 4504 4505 return R; 4506 } 4507 4508 //===----------------------------------------------------------------------===// 4509 // Constraint Selection. 4510 4511 /// Return true of this is an input operand that is a matching constraint like 4512 /// "4". 4513 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4514 assert(!ConstraintCode.empty() && "No known constraint!"); 4515 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4516 } 4517 4518 /// If this is an input matching constraint, this method returns the output 4519 /// operand it matches. 4520 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4521 assert(!ConstraintCode.empty() && "No known constraint!"); 4522 return atoi(ConstraintCode.c_str()); 4523 } 4524 4525 /// Split up the constraint string from the inline assembly value into the 4526 /// specific constraints and their prefixes, and also tie in the associated 4527 /// operand values. 4528 /// If this returns an empty vector, and if the constraint string itself 4529 /// isn't empty, there was an error parsing. 4530 TargetLowering::AsmOperandInfoVector 4531 TargetLowering::ParseConstraints(const DataLayout &DL, 4532 const TargetRegisterInfo *TRI, 4533 const CallBase &Call) const { 4534 /// Information about all of the constraints. 4535 AsmOperandInfoVector ConstraintOperands; 4536 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 4537 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4538 4539 // Do a prepass over the constraints, canonicalizing them, and building up the 4540 // ConstraintOperands list. 4541 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4542 unsigned ResNo = 0; // ResNo - The result number of the next output. 4543 4544 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4545 ConstraintOperands.emplace_back(std::move(CI)); 4546 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4547 4548 // Update multiple alternative constraint count. 4549 if (OpInfo.multipleAlternatives.size() > maCount) 4550 maCount = OpInfo.multipleAlternatives.size(); 4551 4552 OpInfo.ConstraintVT = MVT::Other; 4553 4554 // Compute the value type for each operand. 4555 switch (OpInfo.Type) { 4556 case InlineAsm::isOutput: 4557 // Indirect outputs just consume an argument. 4558 if (OpInfo.isIndirect) { 4559 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4560 break; 4561 } 4562 4563 // The return value of the call is this value. As such, there is no 4564 // corresponding argument. 4565 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 4566 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 4567 OpInfo.ConstraintVT = 4568 getSimpleValueType(DL, STy->getElementType(ResNo)); 4569 } else { 4570 assert(ResNo == 0 && "Asm only has one result!"); 4571 OpInfo.ConstraintVT = getSimpleValueType(DL, Call.getType()); 4572 } 4573 ++ResNo; 4574 break; 4575 case InlineAsm::isInput: 4576 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4577 break; 4578 case InlineAsm::isClobber: 4579 // Nothing to do. 4580 break; 4581 } 4582 4583 if (OpInfo.CallOperandVal) { 4584 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4585 if (OpInfo.isIndirect) { 4586 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 4587 if (!PtrTy) 4588 report_fatal_error("Indirect operand for inline asm not a pointer!"); 4589 OpTy = PtrTy->getElementType(); 4590 } 4591 4592 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4593 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4594 if (STy->getNumElements() == 1) 4595 OpTy = STy->getElementType(0); 4596 4597 // If OpTy is not a single value, it may be a struct/union that we 4598 // can tile with integers. 4599 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4600 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4601 switch (BitSize) { 4602 default: break; 4603 case 1: 4604 case 8: 4605 case 16: 4606 case 32: 4607 case 64: 4608 case 128: 4609 OpInfo.ConstraintVT = 4610 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4611 break; 4612 } 4613 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4614 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4615 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4616 } else { 4617 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4618 } 4619 } 4620 } 4621 4622 // If we have multiple alternative constraints, select the best alternative. 4623 if (!ConstraintOperands.empty()) { 4624 if (maCount) { 4625 unsigned bestMAIndex = 0; 4626 int bestWeight = -1; 4627 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4628 int weight = -1; 4629 unsigned maIndex; 4630 // Compute the sums of the weights for each alternative, keeping track 4631 // of the best (highest weight) one so far. 4632 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4633 int weightSum = 0; 4634 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4635 cIndex != eIndex; ++cIndex) { 4636 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4637 if (OpInfo.Type == InlineAsm::isClobber) 4638 continue; 4639 4640 // If this is an output operand with a matching input operand, 4641 // look up the matching input. If their types mismatch, e.g. one 4642 // is an integer, the other is floating point, or their sizes are 4643 // different, flag it as an maCantMatch. 4644 if (OpInfo.hasMatchingInput()) { 4645 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4646 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4647 if ((OpInfo.ConstraintVT.isInteger() != 4648 Input.ConstraintVT.isInteger()) || 4649 (OpInfo.ConstraintVT.getSizeInBits() != 4650 Input.ConstraintVT.getSizeInBits())) { 4651 weightSum = -1; // Can't match. 4652 break; 4653 } 4654 } 4655 } 4656 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4657 if (weight == -1) { 4658 weightSum = -1; 4659 break; 4660 } 4661 weightSum += weight; 4662 } 4663 // Update best. 4664 if (weightSum > bestWeight) { 4665 bestWeight = weightSum; 4666 bestMAIndex = maIndex; 4667 } 4668 } 4669 4670 // Now select chosen alternative in each constraint. 4671 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4672 cIndex != eIndex; ++cIndex) { 4673 AsmOperandInfo &cInfo = ConstraintOperands[cIndex]; 4674 if (cInfo.Type == InlineAsm::isClobber) 4675 continue; 4676 cInfo.selectAlternative(bestMAIndex); 4677 } 4678 } 4679 } 4680 4681 // Check and hook up tied operands, choose constraint code to use. 4682 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4683 cIndex != eIndex; ++cIndex) { 4684 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4685 4686 // If this is an output operand with a matching input operand, look up the 4687 // matching input. If their types mismatch, e.g. one is an integer, the 4688 // other is floating point, or their sizes are different, flag it as an 4689 // error. 4690 if (OpInfo.hasMatchingInput()) { 4691 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4692 4693 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4694 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4695 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4696 OpInfo.ConstraintVT); 4697 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4698 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4699 Input.ConstraintVT); 4700 if ((OpInfo.ConstraintVT.isInteger() != 4701 Input.ConstraintVT.isInteger()) || 4702 (MatchRC.second != InputRC.second)) { 4703 report_fatal_error("Unsupported asm: input constraint" 4704 " with a matching output constraint of" 4705 " incompatible type!"); 4706 } 4707 } 4708 } 4709 } 4710 4711 return ConstraintOperands; 4712 } 4713 4714 /// Return an integer indicating how general CT is. 4715 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4716 switch (CT) { 4717 case TargetLowering::C_Immediate: 4718 case TargetLowering::C_Other: 4719 case TargetLowering::C_Unknown: 4720 return 0; 4721 case TargetLowering::C_Register: 4722 return 1; 4723 case TargetLowering::C_RegisterClass: 4724 return 2; 4725 case TargetLowering::C_Memory: 4726 return 3; 4727 } 4728 llvm_unreachable("Invalid constraint type"); 4729 } 4730 4731 /// Examine constraint type and operand type and determine a weight value. 4732 /// This object must already have been set up with the operand type 4733 /// and the current alternative constraint selected. 4734 TargetLowering::ConstraintWeight 4735 TargetLowering::getMultipleConstraintMatchWeight( 4736 AsmOperandInfo &info, int maIndex) const { 4737 InlineAsm::ConstraintCodeVector *rCodes; 4738 if (maIndex >= (int)info.multipleAlternatives.size()) 4739 rCodes = &info.Codes; 4740 else 4741 rCodes = &info.multipleAlternatives[maIndex].Codes; 4742 ConstraintWeight BestWeight = CW_Invalid; 4743 4744 // Loop over the options, keeping track of the most general one. 4745 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 4746 ConstraintWeight weight = 4747 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 4748 if (weight > BestWeight) 4749 BestWeight = weight; 4750 } 4751 4752 return BestWeight; 4753 } 4754 4755 /// Examine constraint type and operand type and determine a weight value. 4756 /// This object must already have been set up with the operand type 4757 /// and the current alternative constraint selected. 4758 TargetLowering::ConstraintWeight 4759 TargetLowering::getSingleConstraintMatchWeight( 4760 AsmOperandInfo &info, const char *constraint) const { 4761 ConstraintWeight weight = CW_Invalid; 4762 Value *CallOperandVal = info.CallOperandVal; 4763 // If we don't have a value, we can't do a match, 4764 // but allow it at the lowest weight. 4765 if (!CallOperandVal) 4766 return CW_Default; 4767 // Look at the constraint type. 4768 switch (*constraint) { 4769 case 'i': // immediate integer. 4770 case 'n': // immediate integer with a known value. 4771 if (isa<ConstantInt>(CallOperandVal)) 4772 weight = CW_Constant; 4773 break; 4774 case 's': // non-explicit intregal immediate. 4775 if (isa<GlobalValue>(CallOperandVal)) 4776 weight = CW_Constant; 4777 break; 4778 case 'E': // immediate float if host format. 4779 case 'F': // immediate float. 4780 if (isa<ConstantFP>(CallOperandVal)) 4781 weight = CW_Constant; 4782 break; 4783 case '<': // memory operand with autodecrement. 4784 case '>': // memory operand with autoincrement. 4785 case 'm': // memory operand. 4786 case 'o': // offsettable memory operand 4787 case 'V': // non-offsettable memory operand 4788 weight = CW_Memory; 4789 break; 4790 case 'r': // general register. 4791 case 'g': // general register, memory operand or immediate integer. 4792 // note: Clang converts "g" to "imr". 4793 if (CallOperandVal->getType()->isIntegerTy()) 4794 weight = CW_Register; 4795 break; 4796 case 'X': // any operand. 4797 default: 4798 weight = CW_Default; 4799 break; 4800 } 4801 return weight; 4802 } 4803 4804 /// If there are multiple different constraints that we could pick for this 4805 /// operand (e.g. "imr") try to pick the 'best' one. 4806 /// This is somewhat tricky: constraints fall into four classes: 4807 /// Other -> immediates and magic values 4808 /// Register -> one specific register 4809 /// RegisterClass -> a group of regs 4810 /// Memory -> memory 4811 /// Ideally, we would pick the most specific constraint possible: if we have 4812 /// something that fits into a register, we would pick it. The problem here 4813 /// is that if we have something that could either be in a register or in 4814 /// memory that use of the register could cause selection of *other* 4815 /// operands to fail: they might only succeed if we pick memory. Because of 4816 /// this the heuristic we use is: 4817 /// 4818 /// 1) If there is an 'other' constraint, and if the operand is valid for 4819 /// that constraint, use it. This makes us take advantage of 'i' 4820 /// constraints when available. 4821 /// 2) Otherwise, pick the most general constraint present. This prefers 4822 /// 'm' over 'r', for example. 4823 /// 4824 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 4825 const TargetLowering &TLI, 4826 SDValue Op, SelectionDAG *DAG) { 4827 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 4828 unsigned BestIdx = 0; 4829 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 4830 int BestGenerality = -1; 4831 4832 // Loop over the options, keeping track of the most general one. 4833 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 4834 TargetLowering::ConstraintType CType = 4835 TLI.getConstraintType(OpInfo.Codes[i]); 4836 4837 // Indirect 'other' or 'immediate' constraints are not allowed. 4838 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 4839 CType == TargetLowering::C_Register || 4840 CType == TargetLowering::C_RegisterClass)) 4841 continue; 4842 4843 // If this is an 'other' or 'immediate' constraint, see if the operand is 4844 // valid for it. For example, on X86 we might have an 'rI' constraint. If 4845 // the operand is an integer in the range [0..31] we want to use I (saving a 4846 // load of a register), otherwise we must use 'r'. 4847 if ((CType == TargetLowering::C_Other || 4848 CType == TargetLowering::C_Immediate) && Op.getNode()) { 4849 assert(OpInfo.Codes[i].size() == 1 && 4850 "Unhandled multi-letter 'other' constraint"); 4851 std::vector<SDValue> ResultOps; 4852 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 4853 ResultOps, *DAG); 4854 if (!ResultOps.empty()) { 4855 BestType = CType; 4856 BestIdx = i; 4857 break; 4858 } 4859 } 4860 4861 // Things with matching constraints can only be registers, per gcc 4862 // documentation. This mainly affects "g" constraints. 4863 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 4864 continue; 4865 4866 // This constraint letter is more general than the previous one, use it. 4867 int Generality = getConstraintGenerality(CType); 4868 if (Generality > BestGenerality) { 4869 BestType = CType; 4870 BestIdx = i; 4871 BestGenerality = Generality; 4872 } 4873 } 4874 4875 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 4876 OpInfo.ConstraintType = BestType; 4877 } 4878 4879 /// Determines the constraint code and constraint type to use for the specific 4880 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4881 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4882 SDValue Op, 4883 SelectionDAG *DAG) const { 4884 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 4885 4886 // Single-letter constraints ('r') are very common. 4887 if (OpInfo.Codes.size() == 1) { 4888 OpInfo.ConstraintCode = OpInfo.Codes[0]; 4889 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4890 } else { 4891 ChooseConstraint(OpInfo, *this, Op, DAG); 4892 } 4893 4894 // 'X' matches anything. 4895 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 4896 // Labels and constants are handled elsewhere ('X' is the only thing 4897 // that matches labels). For Functions, the type here is the type of 4898 // the result, which is not what we want to look at; leave them alone. 4899 Value *v = OpInfo.CallOperandVal; 4900 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 4901 OpInfo.CallOperandVal = v; 4902 return; 4903 } 4904 4905 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress) 4906 return; 4907 4908 // Otherwise, try to resolve it to something we know about by looking at 4909 // the actual operand type. 4910 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 4911 OpInfo.ConstraintCode = Repl; 4912 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4913 } 4914 } 4915 } 4916 4917 /// Given an exact SDIV by a constant, create a multiplication 4918 /// with the multiplicative inverse of the constant. 4919 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 4920 const SDLoc &dl, SelectionDAG &DAG, 4921 SmallVectorImpl<SDNode *> &Created) { 4922 SDValue Op0 = N->getOperand(0); 4923 SDValue Op1 = N->getOperand(1); 4924 EVT VT = N->getValueType(0); 4925 EVT SVT = VT.getScalarType(); 4926 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 4927 EVT ShSVT = ShVT.getScalarType(); 4928 4929 bool UseSRA = false; 4930 SmallVector<SDValue, 16> Shifts, Factors; 4931 4932 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4933 if (C->isNullValue()) 4934 return false; 4935 APInt Divisor = C->getAPIntValue(); 4936 unsigned Shift = Divisor.countTrailingZeros(); 4937 if (Shift) { 4938 Divisor.ashrInPlace(Shift); 4939 UseSRA = true; 4940 } 4941 // Calculate the multiplicative inverse, using Newton's method. 4942 APInt t; 4943 APInt Factor = Divisor; 4944 while ((t = Divisor * Factor) != 1) 4945 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 4946 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 4947 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 4948 return true; 4949 }; 4950 4951 // Collect all magic values from the build vector. 4952 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 4953 return SDValue(); 4954 4955 SDValue Shift, Factor; 4956 if (VT.isVector()) { 4957 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4958 Factor = DAG.getBuildVector(VT, dl, Factors); 4959 } else { 4960 Shift = Shifts[0]; 4961 Factor = Factors[0]; 4962 } 4963 4964 SDValue Res = Op0; 4965 4966 // Shift the value upfront if it is even, so the LSB is one. 4967 if (UseSRA) { 4968 // TODO: For UDIV use SRL instead of SRA. 4969 SDNodeFlags Flags; 4970 Flags.setExact(true); 4971 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 4972 Created.push_back(Res.getNode()); 4973 } 4974 4975 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 4976 } 4977 4978 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4979 SelectionDAG &DAG, 4980 SmallVectorImpl<SDNode *> &Created) const { 4981 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4982 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4983 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 4984 return SDValue(N, 0); // Lower SDIV as SDIV 4985 return SDValue(); 4986 } 4987 4988 /// Given an ISD::SDIV node expressing a divide by constant, 4989 /// return a DAG expression to select that will generate the same value by 4990 /// multiplying by a magic number. 4991 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 4992 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 4993 bool IsAfterLegalization, 4994 SmallVectorImpl<SDNode *> &Created) const { 4995 SDLoc dl(N); 4996 EVT VT = N->getValueType(0); 4997 EVT SVT = VT.getScalarType(); 4998 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 4999 EVT ShSVT = ShVT.getScalarType(); 5000 unsigned EltBits = VT.getScalarSizeInBits(); 5001 5002 // Check to see if we can do this. 5003 // FIXME: We should be more aggressive here. 5004 if (!isTypeLegal(VT)) 5005 return SDValue(); 5006 5007 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5008 if (N->getFlags().hasExact()) 5009 return BuildExactSDIV(*this, N, dl, DAG, Created); 5010 5011 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5012 5013 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5014 if (C->isNullValue()) 5015 return false; 5016 5017 const APInt &Divisor = C->getAPIntValue(); 5018 APInt::ms magics = Divisor.magic(); 5019 int NumeratorFactor = 0; 5020 int ShiftMask = -1; 5021 5022 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) { 5023 // If d is +1/-1, we just multiply the numerator by +1/-1. 5024 NumeratorFactor = Divisor.getSExtValue(); 5025 magics.m = 0; 5026 magics.s = 0; 5027 ShiftMask = 0; 5028 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { 5029 // If d > 0 and m < 0, add the numerator. 5030 NumeratorFactor = 1; 5031 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { 5032 // If d < 0 and m > 0, subtract the numerator. 5033 NumeratorFactor = -1; 5034 } 5035 5036 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); 5037 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5038 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); 5039 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5040 return true; 5041 }; 5042 5043 SDValue N0 = N->getOperand(0); 5044 SDValue N1 = N->getOperand(1); 5045 5046 // Collect the shifts / magic values from each element. 5047 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5048 return SDValue(); 5049 5050 SDValue MagicFactor, Factor, Shift, ShiftMask; 5051 if (VT.isVector()) { 5052 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5053 Factor = DAG.getBuildVector(VT, dl, Factors); 5054 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5055 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5056 } else { 5057 MagicFactor = MagicFactors[0]; 5058 Factor = Factors[0]; 5059 Shift = Shifts[0]; 5060 ShiftMask = ShiftMasks[0]; 5061 } 5062 5063 // Multiply the numerator (operand 0) by the magic value. 5064 // FIXME: We should support doing a MUL in a wider type. 5065 SDValue Q; 5066 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) 5067 : isOperationLegalOrCustom(ISD::MULHS, VT)) 5068 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); 5069 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) 5070 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { 5071 SDValue LoHi = 5072 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); 5073 Q = SDValue(LoHi.getNode(), 1); 5074 } else 5075 return SDValue(); // No mulhs or equivalent. 5076 Created.push_back(Q.getNode()); 5077 5078 // (Optionally) Add/subtract the numerator using Factor. 5079 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5080 Created.push_back(Factor.getNode()); 5081 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5082 Created.push_back(Q.getNode()); 5083 5084 // Shift right algebraic by shift value. 5085 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5086 Created.push_back(Q.getNode()); 5087 5088 // Extract the sign bit, mask it and add it to the quotient. 5089 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5090 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5091 Created.push_back(T.getNode()); 5092 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5093 Created.push_back(T.getNode()); 5094 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5095 } 5096 5097 /// Given an ISD::UDIV node expressing a divide by constant, 5098 /// return a DAG expression to select that will generate the same value by 5099 /// multiplying by a magic number. 5100 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5101 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5102 bool IsAfterLegalization, 5103 SmallVectorImpl<SDNode *> &Created) const { 5104 SDLoc dl(N); 5105 EVT VT = N->getValueType(0); 5106 EVT SVT = VT.getScalarType(); 5107 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5108 EVT ShSVT = ShVT.getScalarType(); 5109 unsigned EltBits = VT.getScalarSizeInBits(); 5110 5111 // Check to see if we can do this. 5112 // FIXME: We should be more aggressive here. 5113 if (!isTypeLegal(VT)) 5114 return SDValue(); 5115 5116 bool UseNPQ = false; 5117 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5118 5119 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5120 if (C->isNullValue()) 5121 return false; 5122 // FIXME: We should use a narrower constant when the upper 5123 // bits are known to be zero. 5124 APInt Divisor = C->getAPIntValue(); 5125 APInt::mu magics = Divisor.magicu(); 5126 unsigned PreShift = 0, PostShift = 0; 5127 5128 // If the divisor is even, we can avoid using the expensive fixup by 5129 // shifting the divided value upfront. 5130 if (magics.a != 0 && !Divisor[0]) { 5131 PreShift = Divisor.countTrailingZeros(); 5132 // Get magic number for the shifted divisor. 5133 magics = Divisor.lshr(PreShift).magicu(PreShift); 5134 assert(magics.a == 0 && "Should use cheap fixup now"); 5135 } 5136 5137 APInt Magic = magics.m; 5138 5139 unsigned SelNPQ; 5140 if (magics.a == 0 || Divisor.isOneValue()) { 5141 assert(magics.s < Divisor.getBitWidth() && 5142 "We shouldn't generate an undefined shift!"); 5143 PostShift = magics.s; 5144 SelNPQ = false; 5145 } else { 5146 PostShift = magics.s - 1; 5147 SelNPQ = true; 5148 } 5149 5150 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5151 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5152 NPQFactors.push_back( 5153 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5154 : APInt::getNullValue(EltBits), 5155 dl, SVT)); 5156 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5157 UseNPQ |= SelNPQ; 5158 return true; 5159 }; 5160 5161 SDValue N0 = N->getOperand(0); 5162 SDValue N1 = N->getOperand(1); 5163 5164 // Collect the shifts/magic values from each element. 5165 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5166 return SDValue(); 5167 5168 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5169 if (VT.isVector()) { 5170 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5171 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5172 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5173 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5174 } else { 5175 PreShift = PreShifts[0]; 5176 MagicFactor = MagicFactors[0]; 5177 PostShift = PostShifts[0]; 5178 } 5179 5180 SDValue Q = N0; 5181 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5182 Created.push_back(Q.getNode()); 5183 5184 // FIXME: We should support doing a MUL in a wider type. 5185 auto GetMULHU = [&](SDValue X, SDValue Y) { 5186 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) 5187 : isOperationLegalOrCustom(ISD::MULHU, VT)) 5188 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5189 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) 5190 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) { 5191 SDValue LoHi = 5192 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5193 return SDValue(LoHi.getNode(), 1); 5194 } 5195 return SDValue(); // No mulhu or equivalent 5196 }; 5197 5198 // Multiply the numerator (operand 0) by the magic value. 5199 Q = GetMULHU(Q, MagicFactor); 5200 if (!Q) 5201 return SDValue(); 5202 5203 Created.push_back(Q.getNode()); 5204 5205 if (UseNPQ) { 5206 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5207 Created.push_back(NPQ.getNode()); 5208 5209 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5210 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5211 if (VT.isVector()) 5212 NPQ = GetMULHU(NPQ, NPQFactor); 5213 else 5214 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5215 5216 Created.push_back(NPQ.getNode()); 5217 5218 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5219 Created.push_back(Q.getNode()); 5220 } 5221 5222 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5223 Created.push_back(Q.getNode()); 5224 5225 SDValue One = DAG.getConstant(1, dl, VT); 5226 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ); 5227 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5228 } 5229 5230 /// If all values in Values that *don't* match the predicate are same 'splat' 5231 /// value, then replace all values with that splat value. 5232 /// Else, if AlternativeReplacement was provided, then replace all values that 5233 /// do match predicate with AlternativeReplacement value. 5234 static void 5235 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5236 std::function<bool(SDValue)> Predicate, 5237 SDValue AlternativeReplacement = SDValue()) { 5238 SDValue Replacement; 5239 // Is there a value for which the Predicate does *NOT* match? What is it? 5240 auto SplatValue = llvm::find_if_not(Values, Predicate); 5241 if (SplatValue != Values.end()) { 5242 // Does Values consist only of SplatValue's and values matching Predicate? 5243 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5244 return Value == *SplatValue || Predicate(Value); 5245 })) // Then we shall replace values matching predicate with SplatValue. 5246 Replacement = *SplatValue; 5247 } 5248 if (!Replacement) { 5249 // Oops, we did not find the "baseline" splat value. 5250 if (!AlternativeReplacement) 5251 return; // Nothing to do. 5252 // Let's replace with provided value then. 5253 Replacement = AlternativeReplacement; 5254 } 5255 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5256 } 5257 5258 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5259 /// where the divisor is constant and the comparison target is zero, 5260 /// return a DAG expression that will generate the same comparison result 5261 /// using only multiplications, additions and shifts/rotations. 5262 /// Ref: "Hacker's Delight" 10-17. 5263 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5264 SDValue CompTargetNode, 5265 ISD::CondCode Cond, 5266 DAGCombinerInfo &DCI, 5267 const SDLoc &DL) const { 5268 SmallVector<SDNode *, 5> Built; 5269 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5270 DCI, DL, Built)) { 5271 for (SDNode *N : Built) 5272 DCI.AddToWorklist(N); 5273 return Folded; 5274 } 5275 5276 return SDValue(); 5277 } 5278 5279 SDValue 5280 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5281 SDValue CompTargetNode, ISD::CondCode Cond, 5282 DAGCombinerInfo &DCI, const SDLoc &DL, 5283 SmallVectorImpl<SDNode *> &Created) const { 5284 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5285 // - D must be constant, with D = D0 * 2^K where D0 is odd 5286 // - P is the multiplicative inverse of D0 modulo 2^W 5287 // - Q = floor(((2^W) - 1) / D) 5288 // where W is the width of the common type of N and D. 5289 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5290 "Only applicable for (in)equality comparisons."); 5291 5292 SelectionDAG &DAG = DCI.DAG; 5293 5294 EVT VT = REMNode.getValueType(); 5295 EVT SVT = VT.getScalarType(); 5296 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5297 EVT ShSVT = ShVT.getScalarType(); 5298 5299 // If MUL is unavailable, we cannot proceed in any case. 5300 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5301 return SDValue(); 5302 5303 bool ComparingWithAllZeros = true; 5304 bool AllComparisonsWithNonZerosAreTautological = true; 5305 bool HadTautologicalLanes = false; 5306 bool AllLanesAreTautological = true; 5307 bool HadEvenDivisor = false; 5308 bool AllDivisorsArePowerOfTwo = true; 5309 bool HadTautologicalInvertedLanes = false; 5310 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5311 5312 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5313 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5314 if (CDiv->isNullValue()) 5315 return false; 5316 5317 const APInt &D = CDiv->getAPIntValue(); 5318 const APInt &Cmp = CCmp->getAPIntValue(); 5319 5320 ComparingWithAllZeros &= Cmp.isNullValue(); 5321 5322 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5323 // if C2 is not less than C1, the comparison is always false. 5324 // But we will only be able to produce the comparison that will give the 5325 // opposive tautological answer. So this lane would need to be fixed up. 5326 bool TautologicalInvertedLane = D.ule(Cmp); 5327 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5328 5329 // If all lanes are tautological (either all divisors are ones, or divisor 5330 // is not greater than the constant we are comparing with), 5331 // we will prefer to avoid the fold. 5332 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; 5333 HadTautologicalLanes |= TautologicalLane; 5334 AllLanesAreTautological &= TautologicalLane; 5335 5336 // If we are comparing with non-zero, we need'll need to subtract said 5337 // comparison value from the LHS. But there is no point in doing that if 5338 // every lane where we are comparing with non-zero is tautological.. 5339 if (!Cmp.isNullValue()) 5340 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5341 5342 // Decompose D into D0 * 2^K 5343 unsigned K = D.countTrailingZeros(); 5344 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5345 APInt D0 = D.lshr(K); 5346 5347 // D is even if it has trailing zeros. 5348 HadEvenDivisor |= (K != 0); 5349 // D is a power-of-two if D0 is one. 5350 // If all divisors are power-of-two, we will prefer to avoid the fold. 5351 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5352 5353 // P = inv(D0, 2^W) 5354 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5355 unsigned W = D.getBitWidth(); 5356 APInt P = D0.zext(W + 1) 5357 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5358 .trunc(W); 5359 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5360 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5361 5362 // Q = floor((2^W - 1) u/ D) 5363 // R = ((2^W - 1) u% D) 5364 APInt Q, R; 5365 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R); 5366 5367 // If we are comparing with zero, then that comparison constant is okay, 5368 // else it may need to be one less than that. 5369 if (Cmp.ugt(R)) 5370 Q -= 1; 5371 5372 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5373 "We are expecting that K is always less than all-ones for ShSVT"); 5374 5375 // If the lane is tautological the result can be constant-folded. 5376 if (TautologicalLane) { 5377 // Set P and K amount to a bogus values so we can try to splat them. 5378 P = 0; 5379 K = -1; 5380 // And ensure that comparison constant is tautological, 5381 // it will always compare true/false. 5382 Q = -1; 5383 } 5384 5385 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5386 KAmts.push_back( 5387 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5388 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5389 return true; 5390 }; 5391 5392 SDValue N = REMNode.getOperand(0); 5393 SDValue D = REMNode.getOperand(1); 5394 5395 // Collect the values from each element. 5396 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5397 return SDValue(); 5398 5399 // If all lanes are tautological, the result can be constant-folded. 5400 if (AllLanesAreTautological) 5401 return SDValue(); 5402 5403 // If this is a urem by a powers-of-two, avoid the fold since it can be 5404 // best implemented as a bit test. 5405 if (AllDivisorsArePowerOfTwo) 5406 return SDValue(); 5407 5408 SDValue PVal, KVal, QVal; 5409 if (VT.isVector()) { 5410 if (HadTautologicalLanes) { 5411 // Try to turn PAmts into a splat, since we don't care about the values 5412 // that are currently '0'. If we can't, just keep '0'`s. 5413 turnVectorIntoSplatVector(PAmts, isNullConstant); 5414 // Try to turn KAmts into a splat, since we don't care about the values 5415 // that are currently '-1'. If we can't, change them to '0'`s. 5416 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5417 DAG.getConstant(0, DL, ShSVT)); 5418 } 5419 5420 PVal = DAG.getBuildVector(VT, DL, PAmts); 5421 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5422 QVal = DAG.getBuildVector(VT, DL, QAmts); 5423 } else { 5424 PVal = PAmts[0]; 5425 KVal = KAmts[0]; 5426 QVal = QAmts[0]; 5427 } 5428 5429 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5430 if (!isOperationLegalOrCustom(ISD::SUB, VT)) 5431 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5432 assert(CompTargetNode.getValueType() == N.getValueType() && 5433 "Expecting that the types on LHS and RHS of comparisons match."); 5434 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5435 } 5436 5437 // (mul N, P) 5438 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5439 Created.push_back(Op0.getNode()); 5440 5441 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5442 // divisors as a performance improvement, since rotating by 0 is a no-op. 5443 if (HadEvenDivisor) { 5444 // We need ROTR to do this. 5445 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5446 return SDValue(); 5447 SDNodeFlags Flags; 5448 Flags.setExact(true); 5449 // UREM: (rotr (mul N, P), K) 5450 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5451 Created.push_back(Op0.getNode()); 5452 } 5453 5454 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5455 SDValue NewCC = 5456 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5457 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5458 if (!HadTautologicalInvertedLanes) 5459 return NewCC; 5460 5461 // If any lanes previously compared always-false, the NewCC will give 5462 // always-true result for them, so we need to fixup those lanes. 5463 // Or the other way around for inequality predicate. 5464 assert(VT.isVector() && "Can/should only get here for vectors."); 5465 Created.push_back(NewCC.getNode()); 5466 5467 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5468 // if C2 is not less than C1, the comparison is always false. 5469 // But we have produced the comparison that will give the 5470 // opposive tautological answer. So these lanes would need to be fixed up. 5471 SDValue TautologicalInvertedChannels = 5472 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5473 Created.push_back(TautologicalInvertedChannels.getNode()); 5474 5475 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5476 // If we have a vector select, let's replace the comparison results in the 5477 // affected lanes with the correct tautological result. 5478 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5479 DL, SETCCVT, SETCCVT); 5480 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5481 Replacement, NewCC); 5482 } 5483 5484 // Else, we can just invert the comparison result in the appropriate lanes. 5485 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5486 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5487 TautologicalInvertedChannels); 5488 5489 return SDValue(); // Don't know how to lower. 5490 } 5491 5492 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5493 /// where the divisor is constant and the comparison target is zero, 5494 /// return a DAG expression that will generate the same comparison result 5495 /// using only multiplications, additions and shifts/rotations. 5496 /// Ref: "Hacker's Delight" 10-17. 5497 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5498 SDValue CompTargetNode, 5499 ISD::CondCode Cond, 5500 DAGCombinerInfo &DCI, 5501 const SDLoc &DL) const { 5502 SmallVector<SDNode *, 7> Built; 5503 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5504 DCI, DL, Built)) { 5505 assert(Built.size() <= 7 && "Max size prediction failed."); 5506 for (SDNode *N : Built) 5507 DCI.AddToWorklist(N); 5508 return Folded; 5509 } 5510 5511 return SDValue(); 5512 } 5513 5514 SDValue 5515 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5516 SDValue CompTargetNode, ISD::CondCode Cond, 5517 DAGCombinerInfo &DCI, const SDLoc &DL, 5518 SmallVectorImpl<SDNode *> &Created) const { 5519 // Fold: 5520 // (seteq/ne (srem N, D), 0) 5521 // To: 5522 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5523 // 5524 // - D must be constant, with D = D0 * 2^K where D0 is odd 5525 // - P is the multiplicative inverse of D0 modulo 2^W 5526 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5527 // - Q = floor((2 * A) / (2^K)) 5528 // where W is the width of the common type of N and D. 5529 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5530 "Only applicable for (in)equality comparisons."); 5531 5532 SelectionDAG &DAG = DCI.DAG; 5533 5534 EVT VT = REMNode.getValueType(); 5535 EVT SVT = VT.getScalarType(); 5536 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5537 EVT ShSVT = ShVT.getScalarType(); 5538 5539 // If MUL is unavailable, we cannot proceed in any case. 5540 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5541 return SDValue(); 5542 5543 // TODO: Could support comparing with non-zero too. 5544 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5545 if (!CompTarget || !CompTarget->isNullValue()) 5546 return SDValue(); 5547 5548 bool HadIntMinDivisor = false; 5549 bool HadOneDivisor = false; 5550 bool AllDivisorsAreOnes = true; 5551 bool HadEvenDivisor = false; 5552 bool NeedToApplyOffset = false; 5553 bool AllDivisorsArePowerOfTwo = true; 5554 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5555 5556 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5557 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5558 if (C->isNullValue()) 5559 return false; 5560 5561 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5562 5563 // WARNING: this fold is only valid for positive divisors! 5564 APInt D = C->getAPIntValue(); 5565 if (D.isNegative()) 5566 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5567 5568 HadIntMinDivisor |= D.isMinSignedValue(); 5569 5570 // If all divisors are ones, we will prefer to avoid the fold. 5571 HadOneDivisor |= D.isOneValue(); 5572 AllDivisorsAreOnes &= D.isOneValue(); 5573 5574 // Decompose D into D0 * 2^K 5575 unsigned K = D.countTrailingZeros(); 5576 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5577 APInt D0 = D.lshr(K); 5578 5579 if (!D.isMinSignedValue()) { 5580 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5581 // we don't care about this lane in this fold, we'll special-handle it. 5582 HadEvenDivisor |= (K != 0); 5583 } 5584 5585 // D is a power-of-two if D0 is one. This includes INT_MIN. 5586 // If all divisors are power-of-two, we will prefer to avoid the fold. 5587 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5588 5589 // P = inv(D0, 2^W) 5590 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5591 unsigned W = D.getBitWidth(); 5592 APInt P = D0.zext(W + 1) 5593 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5594 .trunc(W); 5595 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5596 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5597 5598 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5599 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5600 A.clearLowBits(K); 5601 5602 if (!D.isMinSignedValue()) { 5603 // If divisor INT_MIN, then we don't care about this lane in this fold, 5604 // we'll special-handle it. 5605 NeedToApplyOffset |= A != 0; 5606 } 5607 5608 // Q = floor((2 * A) / (2^K)) 5609 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5610 5611 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) && 5612 "We are expecting that A is always less than all-ones for SVT"); 5613 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5614 "We are expecting that K is always less than all-ones for ShSVT"); 5615 5616 // If the divisor is 1 the result can be constant-folded. Likewise, we 5617 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5618 if (D.isOneValue()) { 5619 // Set P, A and K to a bogus values so we can try to splat them. 5620 P = 0; 5621 A = -1; 5622 K = -1; 5623 5624 // x ?% 1 == 0 <--> true <--> x u<= -1 5625 Q = -1; 5626 } 5627 5628 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5629 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5630 KAmts.push_back( 5631 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5632 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5633 return true; 5634 }; 5635 5636 SDValue N = REMNode.getOperand(0); 5637 SDValue D = REMNode.getOperand(1); 5638 5639 // Collect the values from each element. 5640 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5641 return SDValue(); 5642 5643 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5644 if (AllDivisorsAreOnes) 5645 return SDValue(); 5646 5647 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5648 // since it can be best implemented as a bit test. 5649 if (AllDivisorsArePowerOfTwo) 5650 return SDValue(); 5651 5652 SDValue PVal, AVal, KVal, QVal; 5653 if (VT.isVector()) { 5654 if (HadOneDivisor) { 5655 // Try to turn PAmts into a splat, since we don't care about the values 5656 // that are currently '0'. If we can't, just keep '0'`s. 5657 turnVectorIntoSplatVector(PAmts, isNullConstant); 5658 // Try to turn AAmts into a splat, since we don't care about the 5659 // values that are currently '-1'. If we can't, change them to '0'`s. 5660 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5661 DAG.getConstant(0, DL, SVT)); 5662 // Try to turn KAmts into a splat, since we don't care about the values 5663 // that are currently '-1'. If we can't, change them to '0'`s. 5664 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5665 DAG.getConstant(0, DL, ShSVT)); 5666 } 5667 5668 PVal = DAG.getBuildVector(VT, DL, PAmts); 5669 AVal = DAG.getBuildVector(VT, DL, AAmts); 5670 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5671 QVal = DAG.getBuildVector(VT, DL, QAmts); 5672 } else { 5673 PVal = PAmts[0]; 5674 AVal = AAmts[0]; 5675 KVal = KAmts[0]; 5676 QVal = QAmts[0]; 5677 } 5678 5679 // (mul N, P) 5680 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5681 Created.push_back(Op0.getNode()); 5682 5683 if (NeedToApplyOffset) { 5684 // We need ADD to do this. 5685 if (!isOperationLegalOrCustom(ISD::ADD, VT)) 5686 return SDValue(); 5687 5688 // (add (mul N, P), A) 5689 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 5690 Created.push_back(Op0.getNode()); 5691 } 5692 5693 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5694 // divisors as a performance improvement, since rotating by 0 is a no-op. 5695 if (HadEvenDivisor) { 5696 // We need ROTR to do this. 5697 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5698 return SDValue(); 5699 SDNodeFlags Flags; 5700 Flags.setExact(true); 5701 // SREM: (rotr (add (mul N, P), A), K) 5702 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5703 Created.push_back(Op0.getNode()); 5704 } 5705 5706 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 5707 SDValue Fold = 5708 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5709 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5710 5711 // If we didn't have lanes with INT_MIN divisor, then we're done. 5712 if (!HadIntMinDivisor) 5713 return Fold; 5714 5715 // That fold is only valid for positive divisors. Which effectively means, 5716 // it is invalid for INT_MIN divisors. So if we have such a lane, 5717 // we must fix-up results for said lanes. 5718 assert(VT.isVector() && "Can/should only get here for vectors."); 5719 5720 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 5721 !isOperationLegalOrCustom(ISD::AND, VT) || 5722 !isOperationLegalOrCustom(Cond, VT) || 5723 !isOperationLegalOrCustom(ISD::VSELECT, VT)) 5724 return SDValue(); 5725 5726 Created.push_back(Fold.getNode()); 5727 5728 SDValue IntMin = DAG.getConstant( 5729 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 5730 SDValue IntMax = DAG.getConstant( 5731 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 5732 SDValue Zero = 5733 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT); 5734 5735 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 5736 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 5737 Created.push_back(DivisorIsIntMin.getNode()); 5738 5739 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 5740 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 5741 Created.push_back(Masked.getNode()); 5742 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 5743 Created.push_back(MaskedIsZero.getNode()); 5744 5745 // To produce final result we need to blend 2 vectors: 'SetCC' and 5746 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 5747 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 5748 // constant-folded, select can get lowered to a shuffle with constant mask. 5749 SDValue Blended = 5750 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold); 5751 5752 return Blended; 5753 } 5754 5755 bool TargetLowering:: 5756 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 5757 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 5758 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 5759 "be a constant integer"); 5760 return true; 5761 } 5762 5763 return false; 5764 } 5765 5766 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 5767 bool LegalOps, bool OptForSize, 5768 NegatibleCost &Cost, 5769 unsigned Depth) const { 5770 // fneg is removable even if it has multiple uses. 5771 if (Op.getOpcode() == ISD::FNEG) { 5772 Cost = NegatibleCost::Cheaper; 5773 return Op.getOperand(0); 5774 } 5775 5776 // Don't recurse exponentially. 5777 if (Depth > SelectionDAG::MaxRecursionDepth) 5778 return SDValue(); 5779 5780 // Pre-increment recursion depth for use in recursive calls. 5781 ++Depth; 5782 const SDNodeFlags Flags = Op->getFlags(); 5783 const TargetOptions &Options = DAG.getTarget().Options; 5784 EVT VT = Op.getValueType(); 5785 unsigned Opcode = Op.getOpcode(); 5786 5787 // Don't allow anything with multiple uses unless we know it is free. 5788 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 5789 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 5790 isFPExtFree(VT, Op.getOperand(0).getValueType()); 5791 if (!IsFreeExtend) 5792 return SDValue(); 5793 } 5794 5795 auto RemoveDeadNode = [&](SDValue N) { 5796 if (N && N.getNode()->use_empty()) 5797 DAG.RemoveDeadNode(N.getNode()); 5798 }; 5799 5800 SDLoc DL(Op); 5801 5802 switch (Opcode) { 5803 case ISD::ConstantFP: { 5804 // Don't invert constant FP values after legalization unless the target says 5805 // the negated constant is legal. 5806 bool IsOpLegal = 5807 isOperationLegal(ISD::ConstantFP, VT) || 5808 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 5809 OptForSize); 5810 5811 if (LegalOps && !IsOpLegal) 5812 break; 5813 5814 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5815 V.changeSign(); 5816 SDValue CFP = DAG.getConstantFP(V, DL, VT); 5817 5818 // If we already have the use of the negated floating constant, it is free 5819 // to negate it even it has multiple uses. 5820 if (!Op.hasOneUse() && CFP.use_empty()) 5821 break; 5822 Cost = NegatibleCost::Neutral; 5823 return CFP; 5824 } 5825 case ISD::BUILD_VECTOR: { 5826 // Only permit BUILD_VECTOR of constants. 5827 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 5828 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 5829 })) 5830 break; 5831 5832 bool IsOpLegal = 5833 (isOperationLegal(ISD::ConstantFP, VT) && 5834 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 5835 llvm::all_of(Op->op_values(), [&](SDValue N) { 5836 return N.isUndef() || 5837 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 5838 OptForSize); 5839 }); 5840 5841 if (LegalOps && !IsOpLegal) 5842 break; 5843 5844 SmallVector<SDValue, 4> Ops; 5845 for (SDValue C : Op->op_values()) { 5846 if (C.isUndef()) { 5847 Ops.push_back(C); 5848 continue; 5849 } 5850 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 5851 V.changeSign(); 5852 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 5853 } 5854 Cost = NegatibleCost::Neutral; 5855 return DAG.getBuildVector(VT, DL, Ops); 5856 } 5857 case ISD::FADD: { 5858 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5859 break; 5860 5861 // After operation legalization, it might not be legal to create new FSUBs. 5862 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 5863 break; 5864 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5865 5866 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 5867 NegatibleCost CostX = NegatibleCost::Expensive; 5868 SDValue NegX = 5869 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5870 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 5871 NegatibleCost CostY = NegatibleCost::Expensive; 5872 SDValue NegY = 5873 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5874 5875 // Negate the X if its cost is less or equal than Y. 5876 if (NegX && (CostX <= CostY)) { 5877 Cost = CostX; 5878 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 5879 if (NegY != N) 5880 RemoveDeadNode(NegY); 5881 return N; 5882 } 5883 5884 // Negate the Y if it is not expensive. 5885 if (NegY) { 5886 Cost = CostY; 5887 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 5888 if (NegX != N) 5889 RemoveDeadNode(NegX); 5890 return N; 5891 } 5892 break; 5893 } 5894 case ISD::FSUB: { 5895 // We can't turn -(A-B) into B-A when we honor signed zeros. 5896 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5897 break; 5898 5899 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5900 // fold (fneg (fsub 0, Y)) -> Y 5901 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 5902 if (C->isZero()) { 5903 Cost = NegatibleCost::Cheaper; 5904 return Y; 5905 } 5906 5907 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 5908 Cost = NegatibleCost::Neutral; 5909 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 5910 } 5911 case ISD::FMUL: 5912 case ISD::FDIV: { 5913 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5914 5915 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 5916 NegatibleCost CostX = NegatibleCost::Expensive; 5917 SDValue NegX = 5918 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5919 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 5920 NegatibleCost CostY = NegatibleCost::Expensive; 5921 SDValue NegY = 5922 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5923 5924 // Negate the X if its cost is less or equal than Y. 5925 if (NegX && (CostX <= CostY)) { 5926 Cost = CostX; 5927 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 5928 if (NegY != N) 5929 RemoveDeadNode(NegY); 5930 return N; 5931 } 5932 5933 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 5934 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 5935 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 5936 break; 5937 5938 // Negate the Y if it is not expensive. 5939 if (NegY) { 5940 Cost = CostY; 5941 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 5942 if (NegX != N) 5943 RemoveDeadNode(NegX); 5944 return N; 5945 } 5946 break; 5947 } 5948 case ISD::FMA: 5949 case ISD::FMAD: { 5950 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5951 break; 5952 5953 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 5954 NegatibleCost CostZ = NegatibleCost::Expensive; 5955 SDValue NegZ = 5956 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 5957 // Give up if fail to negate the Z. 5958 if (!NegZ) 5959 break; 5960 5961 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 5962 NegatibleCost CostX = NegatibleCost::Expensive; 5963 SDValue NegX = 5964 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5965 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 5966 NegatibleCost CostY = NegatibleCost::Expensive; 5967 SDValue NegY = 5968 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5969 5970 // Negate the X if its cost is less or equal than Y. 5971 if (NegX && (CostX <= CostY)) { 5972 Cost = std::min(CostX, CostZ); 5973 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 5974 if (NegY != N) 5975 RemoveDeadNode(NegY); 5976 return N; 5977 } 5978 5979 // Negate the Y if it is not expensive. 5980 if (NegY) { 5981 Cost = std::min(CostY, CostZ); 5982 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 5983 if (NegX != N) 5984 RemoveDeadNode(NegX); 5985 return N; 5986 } 5987 break; 5988 } 5989 5990 case ISD::FP_EXTEND: 5991 case ISD::FSIN: 5992 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5993 OptForSize, Cost, Depth)) 5994 return DAG.getNode(Opcode, DL, VT, NegV); 5995 break; 5996 case ISD::FP_ROUND: 5997 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5998 OptForSize, Cost, Depth)) 5999 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6000 break; 6001 } 6002 6003 return SDValue(); 6004 } 6005 6006 //===----------------------------------------------------------------------===// 6007 // Legalization Utilities 6008 //===----------------------------------------------------------------------===// 6009 6010 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6011 SDValue LHS, SDValue RHS, 6012 SmallVectorImpl<SDValue> &Result, 6013 EVT HiLoVT, SelectionDAG &DAG, 6014 MulExpansionKind Kind, SDValue LL, 6015 SDValue LH, SDValue RL, SDValue RH) const { 6016 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6017 Opcode == ISD::SMUL_LOHI); 6018 6019 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6020 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6021 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6022 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6023 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6024 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6025 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6026 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6027 6028 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6029 return false; 6030 6031 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6032 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6033 unsigned LHSSB = DAG.ComputeNumSignBits(LHS); 6034 unsigned RHSSB = DAG.ComputeNumSignBits(RHS); 6035 6036 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6037 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6038 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6039 6040 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6041 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6042 bool Signed) -> bool { 6043 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6044 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6045 Hi = SDValue(Lo.getNode(), 1); 6046 return true; 6047 } 6048 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6049 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6050 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6051 return true; 6052 } 6053 return false; 6054 }; 6055 6056 SDValue Lo, Hi; 6057 6058 if (!LL.getNode() && !RL.getNode() && 6059 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6060 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6061 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6062 } 6063 6064 if (!LL.getNode()) 6065 return false; 6066 6067 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6068 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6069 DAG.MaskedValueIsZero(RHS, HighMask)) { 6070 // The inputs are both zero-extended. 6071 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6072 Result.push_back(Lo); 6073 Result.push_back(Hi); 6074 if (Opcode != ISD::MUL) { 6075 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6076 Result.push_back(Zero); 6077 Result.push_back(Zero); 6078 } 6079 return true; 6080 } 6081 } 6082 6083 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize && 6084 RHSSB > InnerBitSize) { 6085 // The input values are both sign-extended. 6086 // TODO non-MUL case? 6087 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6088 Result.push_back(Lo); 6089 Result.push_back(Hi); 6090 return true; 6091 } 6092 } 6093 6094 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6095 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6096 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) { 6097 // FIXME getShiftAmountTy does not always return a sensible result when VT 6098 // is an illegal type, and so the type may be too small to fit the shift 6099 // amount. Override it with i32. The shift will have to be legalized. 6100 ShiftAmountTy = MVT::i32; 6101 } 6102 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6103 6104 if (!LH.getNode() && !RH.getNode() && 6105 isOperationLegalOrCustom(ISD::SRL, VT) && 6106 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6107 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6108 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6109 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6110 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6111 } 6112 6113 if (!LH.getNode()) 6114 return false; 6115 6116 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6117 return false; 6118 6119 Result.push_back(Lo); 6120 6121 if (Opcode == ISD::MUL) { 6122 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6123 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6124 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6125 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6126 Result.push_back(Hi); 6127 return true; 6128 } 6129 6130 // Compute the full width result. 6131 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6132 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6133 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6134 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6135 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6136 }; 6137 6138 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6139 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6140 return false; 6141 6142 // This is effectively the add part of a multiply-add of half-sized operands, 6143 // so it cannot overflow. 6144 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6145 6146 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6147 return false; 6148 6149 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6150 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6151 6152 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6153 isOperationLegalOrCustom(ISD::ADDE, VT)); 6154 if (UseGlue) 6155 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6156 Merge(Lo, Hi)); 6157 else 6158 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6159 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6160 6161 SDValue Carry = Next.getValue(1); 6162 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6163 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6164 6165 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6166 return false; 6167 6168 if (UseGlue) 6169 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6170 Carry); 6171 else 6172 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6173 Zero, Carry); 6174 6175 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6176 6177 if (Opcode == ISD::SMUL_LOHI) { 6178 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6179 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6180 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6181 6182 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6183 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6184 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6185 } 6186 6187 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6188 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6189 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6190 return true; 6191 } 6192 6193 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6194 SelectionDAG &DAG, MulExpansionKind Kind, 6195 SDValue LL, SDValue LH, SDValue RL, 6196 SDValue RH) const { 6197 SmallVector<SDValue, 2> Result; 6198 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 6199 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6200 DAG, Kind, LL, LH, RL, RH); 6201 if (Ok) { 6202 assert(Result.size() == 2); 6203 Lo = Result[0]; 6204 Hi = Result[1]; 6205 } 6206 return Ok; 6207 } 6208 6209 // Check that (every element of) Z is undef or not an exact multiple of BW. 6210 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 6211 return ISD::matchUnaryPredicate( 6212 Z, 6213 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 6214 true); 6215 } 6216 6217 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result, 6218 SelectionDAG &DAG) const { 6219 EVT VT = Node->getValueType(0); 6220 6221 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6222 !isOperationLegalOrCustom(ISD::SRL, VT) || 6223 !isOperationLegalOrCustom(ISD::SUB, VT) || 6224 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6225 return false; 6226 6227 SDValue X = Node->getOperand(0); 6228 SDValue Y = Node->getOperand(1); 6229 SDValue Z = Node->getOperand(2); 6230 6231 unsigned BW = VT.getScalarSizeInBits(); 6232 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6233 SDLoc DL(SDValue(Node, 0)); 6234 6235 EVT ShVT = Z.getValueType(); 6236 6237 // If a funnel shift in the other direction is more supported, use it. 6238 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 6239 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 6240 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 6241 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6242 // fshl X, Y, Z -> fshr X, Y, -Z 6243 // fshr X, Y, Z -> fshl X, Y, -Z 6244 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6245 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 6246 } else { 6247 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 6248 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 6249 SDValue One = DAG.getConstant(1, DL, ShVT); 6250 if (IsFSHL) { 6251 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6252 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 6253 } else { 6254 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6255 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 6256 } 6257 Z = DAG.getNOT(DL, Z, ShVT); 6258 } 6259 Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 6260 return true; 6261 } 6262 6263 SDValue ShX, ShY; 6264 SDValue ShAmt, InvShAmt; 6265 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6266 // fshl: X << C | Y >> (BW - C) 6267 // fshr: X << (BW - C) | Y >> C 6268 // where C = Z % BW is not zero 6269 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6270 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6271 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 6272 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 6273 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6274 } else { 6275 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6276 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6277 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 6278 if (isPowerOf2_32(BW)) { 6279 // Z % BW -> Z & (BW - 1) 6280 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6281 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6282 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6283 } else { 6284 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6285 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6286 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6287 } 6288 6289 SDValue One = DAG.getConstant(1, DL, ShVT); 6290 if (IsFSHL) { 6291 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 6292 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 6293 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 6294 } else { 6295 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 6296 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 6297 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 6298 } 6299 } 6300 Result = DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 6301 return true; 6302 } 6303 6304 // TODO: Merge with expandFunnelShift. 6305 bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 6306 SDValue &Result, SelectionDAG &DAG) const { 6307 EVT VT = Node->getValueType(0); 6308 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6309 bool IsLeft = Node->getOpcode() == ISD::ROTL; 6310 SDValue Op0 = Node->getOperand(0); 6311 SDValue Op1 = Node->getOperand(1); 6312 SDLoc DL(SDValue(Node, 0)); 6313 6314 EVT ShVT = Op1.getValueType(); 6315 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6316 6317 // If a rotate in the other direction is supported, use it. 6318 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 6319 if (isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 6320 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6321 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub); 6322 return true; 6323 } 6324 6325 if (!AllowVectorOps && VT.isVector() && 6326 (!isOperationLegalOrCustom(ISD::SHL, VT) || 6327 !isOperationLegalOrCustom(ISD::SRL, VT) || 6328 !isOperationLegalOrCustom(ISD::SUB, VT) || 6329 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6330 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6331 return false; 6332 6333 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6334 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6335 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6336 SDValue ShVal; 6337 SDValue HsVal; 6338 if (isPowerOf2_32(EltSizeInBits)) { 6339 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 6340 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 6341 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6342 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6343 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6344 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6345 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 6346 } else { 6347 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 6348 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 6349 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 6350 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 6351 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6352 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 6353 SDValue One = DAG.getConstant(1, DL, ShVT); 6354 HsVal = 6355 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 6356 } 6357 Result = DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 6358 return true; 6359 } 6360 6361 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6362 SelectionDAG &DAG) const { 6363 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6364 SDValue Src = Node->getOperand(OpNo); 6365 EVT SrcVT = Src.getValueType(); 6366 EVT DstVT = Node->getValueType(0); 6367 SDLoc dl(SDValue(Node, 0)); 6368 6369 // FIXME: Only f32 to i64 conversions are supported. 6370 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6371 return false; 6372 6373 if (Node->isStrictFPOpcode()) 6374 // When a NaN is converted to an integer a trap is allowed. We can't 6375 // use this expansion here because it would eliminate that trap. Other 6376 // traps are also allowed and cannot be eliminated. See 6377 // IEEE 754-2008 sec 5.8. 6378 return false; 6379 6380 // Expand f32 -> i64 conversion 6381 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6382 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c 6383 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6384 EVT IntVT = SrcVT.changeTypeToInteger(); 6385 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6386 6387 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6388 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6389 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6390 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6391 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6392 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6393 6394 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6395 6396 SDValue ExponentBits = DAG.getNode( 6397 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6398 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6399 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6400 6401 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6402 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6403 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6404 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6405 6406 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6407 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6408 DAG.getConstant(0x00800000, dl, IntVT)); 6409 6410 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6411 6412 R = DAG.getSelectCC( 6413 dl, Exponent, ExponentLoBit, 6414 DAG.getNode(ISD::SHL, dl, DstVT, R, 6415 DAG.getZExtOrTrunc( 6416 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6417 dl, IntShVT)), 6418 DAG.getNode(ISD::SRL, dl, DstVT, R, 6419 DAG.getZExtOrTrunc( 6420 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6421 dl, IntShVT)), 6422 ISD::SETGT); 6423 6424 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6425 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6426 6427 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6428 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6429 return true; 6430 } 6431 6432 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6433 SDValue &Chain, 6434 SelectionDAG &DAG) const { 6435 SDLoc dl(SDValue(Node, 0)); 6436 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6437 SDValue Src = Node->getOperand(OpNo); 6438 6439 EVT SrcVT = Src.getValueType(); 6440 EVT DstVT = Node->getValueType(0); 6441 EVT SetCCVT = 6442 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6443 EVT DstSetCCVT = 6444 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6445 6446 // Only expand vector types if we have the appropriate vector bit operations. 6447 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6448 ISD::FP_TO_SINT; 6449 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6450 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6451 return false; 6452 6453 // If the maximum float value is smaller then the signed integer range, 6454 // the destination signmask can't be represented by the float, so we can 6455 // just use FP_TO_SINT directly. 6456 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6457 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits())); 6458 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6459 if (APFloat::opOverflow & 6460 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6461 if (Node->isStrictFPOpcode()) { 6462 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6463 { Node->getOperand(0), Src }); 6464 Chain = Result.getValue(1); 6465 } else 6466 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6467 return true; 6468 } 6469 6470 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6471 SDValue Sel; 6472 6473 if (Node->isStrictFPOpcode()) { 6474 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6475 Node->getOperand(0), /*IsSignaling*/ true); 6476 Chain = Sel.getValue(1); 6477 } else { 6478 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6479 } 6480 6481 bool Strict = Node->isStrictFPOpcode() || 6482 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6483 6484 if (Strict) { 6485 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6486 // signmask then offset (the result of which should be fully representable). 6487 // Sel = Src < 0x8000000000000000 6488 // FltOfs = select Sel, 0, 0x8000000000000000 6489 // IntOfs = select Sel, 0, 0x8000000000000000 6490 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6491 6492 // TODO: Should any fast-math-flags be set for the FSUB? 6493 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6494 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6495 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6496 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6497 DAG.getConstant(0, dl, DstVT), 6498 DAG.getConstant(SignMask, dl, DstVT)); 6499 SDValue SInt; 6500 if (Node->isStrictFPOpcode()) { 6501 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6502 { Chain, Src, FltOfs }); 6503 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6504 { Val.getValue(1), Val }); 6505 Chain = SInt.getValue(1); 6506 } else { 6507 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6508 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6509 } 6510 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6511 } else { 6512 // Expand based on maximum range of FP_TO_SINT: 6513 // True = fp_to_sint(Src) 6514 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6515 // Result = select (Src < 0x8000000000000000), True, False 6516 6517 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6518 // TODO: Should any fast-math-flags be set for the FSUB? 6519 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6520 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6521 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6522 DAG.getConstant(SignMask, dl, DstVT)); 6523 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6524 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6525 } 6526 return true; 6527 } 6528 6529 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6530 SDValue &Chain, 6531 SelectionDAG &DAG) const { 6532 // This transform is not correct for converting 0 when rounding mode is set 6533 // to round toward negative infinity which will produce -0.0. So disable under 6534 // strictfp. 6535 if (Node->isStrictFPOpcode()) 6536 return false; 6537 6538 SDValue Src = Node->getOperand(0); 6539 EVT SrcVT = Src.getValueType(); 6540 EVT DstVT = Node->getValueType(0); 6541 6542 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6543 return false; 6544 6545 // Only expand vector types if we have the appropriate vector bit operations. 6546 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6547 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6548 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6549 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6550 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6551 return false; 6552 6553 SDLoc dl(SDValue(Node, 0)); 6554 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6555 6556 // Implementation of unsigned i64 to f64 following the algorithm in 6557 // __floatundidf in compiler_rt. This implementation performs rounding 6558 // correctly in all rounding modes with the exception of converting 0 6559 // when rounding toward negative infinity. In that case the fsub will produce 6560 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 6561 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6562 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6563 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6564 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6565 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6566 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6567 6568 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6569 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6570 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6571 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6572 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6573 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6574 SDValue HiSub = 6575 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6576 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6577 return true; 6578 } 6579 6580 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 6581 SelectionDAG &DAG) const { 6582 SDLoc dl(Node); 6583 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 6584 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 6585 EVT VT = Node->getValueType(0); 6586 if (isOperationLegalOrCustom(NewOp, VT)) { 6587 SDValue Quiet0 = Node->getOperand(0); 6588 SDValue Quiet1 = Node->getOperand(1); 6589 6590 if (!Node->getFlags().hasNoNaNs()) { 6591 // Insert canonicalizes if it's possible we need to quiet to get correct 6592 // sNaN behavior. 6593 if (!DAG.isKnownNeverSNaN(Quiet0)) { 6594 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 6595 Node->getFlags()); 6596 } 6597 if (!DAG.isKnownNeverSNaN(Quiet1)) { 6598 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 6599 Node->getFlags()); 6600 } 6601 } 6602 6603 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 6604 } 6605 6606 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 6607 // instead if there are no NaNs. 6608 if (Node->getFlags().hasNoNaNs()) { 6609 unsigned IEEE2018Op = 6610 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 6611 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 6612 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 6613 Node->getOperand(1), Node->getFlags()); 6614 } 6615 } 6616 6617 // If none of the above worked, but there are no NaNs, then expand to 6618 // a compare/select sequence. This is required for correctness since 6619 // InstCombine might have canonicalized a fcmp+select sequence to a 6620 // FMINNUM/FMAXNUM node. If we were to fall through to the default 6621 // expansion to libcall, we might introduce a link-time dependency 6622 // on libm into a file that originally did not have one. 6623 if (Node->getFlags().hasNoNaNs()) { 6624 ISD::CondCode Pred = 6625 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 6626 SDValue Op1 = Node->getOperand(0); 6627 SDValue Op2 = Node->getOperand(1); 6628 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 6629 // Copy FMF flags, but always set the no-signed-zeros flag 6630 // as this is implied by the FMINNUM/FMAXNUM semantics. 6631 SDNodeFlags Flags = Node->getFlags(); 6632 Flags.setNoSignedZeros(true); 6633 SelCC->setFlags(Flags); 6634 return SelCC; 6635 } 6636 6637 return SDValue(); 6638 } 6639 6640 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result, 6641 SelectionDAG &DAG) const { 6642 SDLoc dl(Node); 6643 EVT VT = Node->getValueType(0); 6644 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6645 SDValue Op = Node->getOperand(0); 6646 unsigned Len = VT.getScalarSizeInBits(); 6647 assert(VT.isInteger() && "CTPOP not implemented for this type."); 6648 6649 // TODO: Add support for irregular type lengths. 6650 if (!(Len <= 128 && Len % 8 == 0)) 6651 return false; 6652 6653 // Only expand vector types if we have the appropriate vector bit operations. 6654 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) || 6655 !isOperationLegalOrCustom(ISD::SUB, VT) || 6656 !isOperationLegalOrCustom(ISD::SRL, VT) || 6657 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) || 6658 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6659 return false; 6660 6661 // This is the "best" algorithm from 6662 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 6663 SDValue Mask55 = 6664 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 6665 SDValue Mask33 = 6666 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 6667 SDValue Mask0F = 6668 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 6669 SDValue Mask01 = 6670 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 6671 6672 // v = v - ((v >> 1) & 0x55555555...) 6673 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 6674 DAG.getNode(ISD::AND, dl, VT, 6675 DAG.getNode(ISD::SRL, dl, VT, Op, 6676 DAG.getConstant(1, dl, ShVT)), 6677 Mask55)); 6678 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 6679 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 6680 DAG.getNode(ISD::AND, dl, VT, 6681 DAG.getNode(ISD::SRL, dl, VT, Op, 6682 DAG.getConstant(2, dl, ShVT)), 6683 Mask33)); 6684 // v = (v + (v >> 4)) & 0x0F0F0F0F... 6685 Op = DAG.getNode(ISD::AND, dl, VT, 6686 DAG.getNode(ISD::ADD, dl, VT, Op, 6687 DAG.getNode(ISD::SRL, dl, VT, Op, 6688 DAG.getConstant(4, dl, ShVT))), 6689 Mask0F); 6690 // v = (v * 0x01010101...) >> (Len - 8) 6691 if (Len > 8) 6692 Op = 6693 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 6694 DAG.getConstant(Len - 8, dl, ShVT)); 6695 6696 Result = Op; 6697 return true; 6698 } 6699 6700 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result, 6701 SelectionDAG &DAG) const { 6702 SDLoc dl(Node); 6703 EVT VT = Node->getValueType(0); 6704 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6705 SDValue Op = Node->getOperand(0); 6706 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6707 6708 // If the non-ZERO_UNDEF version is supported we can use that instead. 6709 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 6710 isOperationLegalOrCustom(ISD::CTLZ, VT)) { 6711 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op); 6712 return true; 6713 } 6714 6715 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6716 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 6717 EVT SetCCVT = 6718 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6719 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 6720 SDValue Zero = DAG.getConstant(0, dl, VT); 6721 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6722 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6723 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 6724 return true; 6725 } 6726 6727 // Only expand vector types if we have the appropriate vector bit operations. 6728 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6729 !isOperationLegalOrCustom(ISD::CTPOP, VT) || 6730 !isOperationLegalOrCustom(ISD::SRL, VT) || 6731 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6732 return false; 6733 6734 // for now, we do this: 6735 // x = x | (x >> 1); 6736 // x = x | (x >> 2); 6737 // ... 6738 // x = x | (x >>16); 6739 // x = x | (x >>32); // for 64-bit input 6740 // return popcount(~x); 6741 // 6742 // Ref: "Hacker's Delight" by Henry Warren 6743 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 6744 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 6745 Op = DAG.getNode(ISD::OR, dl, VT, Op, 6746 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 6747 } 6748 Op = DAG.getNOT(dl, Op, VT); 6749 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op); 6750 return true; 6751 } 6752 6753 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result, 6754 SelectionDAG &DAG) const { 6755 SDLoc dl(Node); 6756 EVT VT = Node->getValueType(0); 6757 SDValue Op = Node->getOperand(0); 6758 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6759 6760 // If the non-ZERO_UNDEF version is supported we can use that instead. 6761 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 6762 isOperationLegalOrCustom(ISD::CTTZ, VT)) { 6763 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op); 6764 return true; 6765 } 6766 6767 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6768 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 6769 EVT SetCCVT = 6770 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6771 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 6772 SDValue Zero = DAG.getConstant(0, dl, VT); 6773 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6774 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6775 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 6776 return true; 6777 } 6778 6779 // Only expand vector types if we have the appropriate vector bit operations. 6780 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6781 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 6782 !isOperationLegalOrCustom(ISD::CTLZ, VT)) || 6783 !isOperationLegalOrCustom(ISD::SUB, VT) || 6784 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 6785 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6786 return false; 6787 6788 // for now, we use: { return popcount(~x & (x - 1)); } 6789 // unless the target has ctlz but not ctpop, in which case we use: 6790 // { return 32 - nlz(~x & (x-1)); } 6791 // Ref: "Hacker's Delight" by Henry Warren 6792 SDValue Tmp = DAG.getNode( 6793 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 6794 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 6795 6796 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 6797 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 6798 Result = 6799 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 6800 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 6801 return true; 6802 } 6803 6804 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 6805 return true; 6806 } 6807 6808 bool TargetLowering::expandABS(SDNode *N, SDValue &Result, 6809 SelectionDAG &DAG) const { 6810 SDLoc dl(N); 6811 EVT VT = N->getValueType(0); 6812 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6813 SDValue Op = N->getOperand(0); 6814 6815 // Only expand vector types if we have the appropriate vector operations. 6816 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) || 6817 !isOperationLegalOrCustom(ISD::ADD, VT) || 6818 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6819 return false; 6820 6821 SDValue Shift = 6822 DAG.getNode(ISD::SRA, dl, VT, Op, 6823 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 6824 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 6825 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 6826 return true; 6827 } 6828 6829 std::pair<SDValue, SDValue> 6830 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 6831 SelectionDAG &DAG) const { 6832 SDLoc SL(LD); 6833 SDValue Chain = LD->getChain(); 6834 SDValue BasePTR = LD->getBasePtr(); 6835 EVT SrcVT = LD->getMemoryVT(); 6836 EVT DstVT = LD->getValueType(0); 6837 ISD::LoadExtType ExtType = LD->getExtensionType(); 6838 6839 if (SrcVT.isScalableVector()) 6840 report_fatal_error("Cannot scalarize scalable vector loads"); 6841 6842 unsigned NumElem = SrcVT.getVectorNumElements(); 6843 6844 EVT SrcEltVT = SrcVT.getScalarType(); 6845 EVT DstEltVT = DstVT.getScalarType(); 6846 6847 // A vector must always be stored in memory as-is, i.e. without any padding 6848 // between the elements, since various code depend on it, e.g. in the 6849 // handling of a bitcast of a vector type to int, which may be done with a 6850 // vector store followed by an integer load. A vector that does not have 6851 // elements that are byte-sized must therefore be stored as an integer 6852 // built out of the extracted vector elements. 6853 if (!SrcEltVT.isByteSized()) { 6854 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 6855 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 6856 6857 unsigned NumSrcBits = SrcVT.getSizeInBits(); 6858 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 6859 6860 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 6861 SDValue SrcEltBitMask = DAG.getConstant( 6862 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 6863 6864 // Load the whole vector and avoid masking off the top bits as it makes 6865 // the codegen worse. 6866 SDValue Load = 6867 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 6868 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 6869 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6870 6871 SmallVector<SDValue, 8> Vals; 6872 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6873 unsigned ShiftIntoIdx = 6874 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6875 SDValue ShiftAmount = 6876 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 6877 LoadVT, SL, /*LegalTypes=*/false); 6878 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 6879 SDValue Elt = 6880 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 6881 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 6882 6883 if (ExtType != ISD::NON_EXTLOAD) { 6884 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 6885 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 6886 } 6887 6888 Vals.push_back(Scalar); 6889 } 6890 6891 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6892 return std::make_pair(Value, Load.getValue(1)); 6893 } 6894 6895 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 6896 assert(SrcEltVT.isByteSized()); 6897 6898 SmallVector<SDValue, 8> Vals; 6899 SmallVector<SDValue, 8> LoadChains; 6900 6901 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6902 SDValue ScalarLoad = 6903 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 6904 LD->getPointerInfo().getWithOffset(Idx * Stride), 6905 SrcEltVT, LD->getOriginalAlign(), 6906 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6907 6908 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 6909 6910 Vals.push_back(ScalarLoad.getValue(0)); 6911 LoadChains.push_back(ScalarLoad.getValue(1)); 6912 } 6913 6914 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 6915 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6916 6917 return std::make_pair(Value, NewChain); 6918 } 6919 6920 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 6921 SelectionDAG &DAG) const { 6922 SDLoc SL(ST); 6923 6924 SDValue Chain = ST->getChain(); 6925 SDValue BasePtr = ST->getBasePtr(); 6926 SDValue Value = ST->getValue(); 6927 EVT StVT = ST->getMemoryVT(); 6928 6929 if (StVT.isScalableVector()) 6930 report_fatal_error("Cannot scalarize scalable vector stores"); 6931 6932 // The type of the data we want to save 6933 EVT RegVT = Value.getValueType(); 6934 EVT RegSclVT = RegVT.getScalarType(); 6935 6936 // The type of data as saved in memory. 6937 EVT MemSclVT = StVT.getScalarType(); 6938 6939 unsigned NumElem = StVT.getVectorNumElements(); 6940 6941 // A vector must always be stored in memory as-is, i.e. without any padding 6942 // between the elements, since various code depend on it, e.g. in the 6943 // handling of a bitcast of a vector type to int, which may be done with a 6944 // vector store followed by an integer load. A vector that does not have 6945 // elements that are byte-sized must therefore be stored as an integer 6946 // built out of the extracted vector elements. 6947 if (!MemSclVT.isByteSized()) { 6948 unsigned NumBits = StVT.getSizeInBits(); 6949 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 6950 6951 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 6952 6953 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6954 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6955 DAG.getVectorIdxConstant(Idx, SL)); 6956 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 6957 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 6958 unsigned ShiftIntoIdx = 6959 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6960 SDValue ShiftAmount = 6961 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 6962 SDValue ShiftedElt = 6963 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 6964 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 6965 } 6966 6967 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 6968 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 6969 ST->getAAInfo()); 6970 } 6971 6972 // Store Stride in bytes 6973 unsigned Stride = MemSclVT.getSizeInBits() / 8; 6974 assert(Stride && "Zero stride!"); 6975 // Extract each of the elements from the original vector and save them into 6976 // memory individually. 6977 SmallVector<SDValue, 8> Stores; 6978 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6979 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6980 DAG.getVectorIdxConstant(Idx, SL)); 6981 6982 SDValue Ptr = 6983 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 6984 6985 // This scalar TruncStore may be illegal, but we legalize it later. 6986 SDValue Store = DAG.getTruncStore( 6987 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 6988 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 6989 ST->getAAInfo()); 6990 6991 Stores.push_back(Store); 6992 } 6993 6994 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 6995 } 6996 6997 std::pair<SDValue, SDValue> 6998 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 6999 assert(LD->getAddressingMode() == ISD::UNINDEXED && 7000 "unaligned indexed loads not implemented!"); 7001 SDValue Chain = LD->getChain(); 7002 SDValue Ptr = LD->getBasePtr(); 7003 EVT VT = LD->getValueType(0); 7004 EVT LoadedVT = LD->getMemoryVT(); 7005 SDLoc dl(LD); 7006 auto &MF = DAG.getMachineFunction(); 7007 7008 if (VT.isFloatingPoint() || VT.isVector()) { 7009 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 7010 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 7011 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 7012 LoadedVT.isVector()) { 7013 // Scalarize the load and let the individual components be handled. 7014 return scalarizeVectorLoad(LD, DAG); 7015 } 7016 7017 // Expand to a (misaligned) integer load of the same size, 7018 // then bitconvert to floating point or vector. 7019 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 7020 LD->getMemOperand()); 7021 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 7022 if (LoadedVT != VT) 7023 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 7024 ISD::ANY_EXTEND, dl, VT, Result); 7025 7026 return std::make_pair(Result, newLoad.getValue(1)); 7027 } 7028 7029 // Copy the value to a (aligned) stack slot using (unaligned) integer 7030 // loads and stores, then do a (aligned) load from the stack slot. 7031 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 7032 unsigned LoadedBytes = LoadedVT.getStoreSize(); 7033 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7034 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 7035 7036 // Make sure the stack slot is also aligned for the register type. 7037 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 7038 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 7039 SmallVector<SDValue, 8> Stores; 7040 SDValue StackPtr = StackBase; 7041 unsigned Offset = 0; 7042 7043 EVT PtrVT = Ptr.getValueType(); 7044 EVT StackPtrVT = StackPtr.getValueType(); 7045 7046 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7047 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7048 7049 // Do all but one copies using the full register width. 7050 for (unsigned i = 1; i < NumRegs; i++) { 7051 // Load one integer register's worth from the original location. 7052 SDValue Load = DAG.getLoad( 7053 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 7054 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7055 LD->getAAInfo()); 7056 // Follow the load with a store to the stack slot. Remember the store. 7057 Stores.push_back(DAG.getStore( 7058 Load.getValue(1), dl, Load, StackPtr, 7059 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 7060 // Increment the pointers. 7061 Offset += RegBytes; 7062 7063 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7064 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7065 } 7066 7067 // The last copy may be partial. Do an extending load. 7068 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 7069 8 * (LoadedBytes - Offset)); 7070 SDValue Load = 7071 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 7072 LD->getPointerInfo().getWithOffset(Offset), MemVT, 7073 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7074 LD->getAAInfo()); 7075 // Follow the load with a store to the stack slot. Remember the store. 7076 // On big-endian machines this requires a truncating store to ensure 7077 // that the bits end up in the right place. 7078 Stores.push_back(DAG.getTruncStore( 7079 Load.getValue(1), dl, Load, StackPtr, 7080 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 7081 7082 // The order of the stores doesn't matter - say it with a TokenFactor. 7083 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7084 7085 // Finally, perform the original load only redirected to the stack slot. 7086 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 7087 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 7088 LoadedVT); 7089 7090 // Callers expect a MERGE_VALUES node. 7091 return std::make_pair(Load, TF); 7092 } 7093 7094 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 7095 "Unaligned load of unsupported type."); 7096 7097 // Compute the new VT that is half the size of the old one. This is an 7098 // integer MVT. 7099 unsigned NumBits = LoadedVT.getSizeInBits(); 7100 EVT NewLoadedVT; 7101 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 7102 NumBits >>= 1; 7103 7104 Align Alignment = LD->getOriginalAlign(); 7105 unsigned IncrementSize = NumBits / 8; 7106 ISD::LoadExtType HiExtType = LD->getExtensionType(); 7107 7108 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 7109 if (HiExtType == ISD::NON_EXTLOAD) 7110 HiExtType = ISD::ZEXTLOAD; 7111 7112 // Load the value in two parts 7113 SDValue Lo, Hi; 7114 if (DAG.getDataLayout().isLittleEndian()) { 7115 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7116 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7117 LD->getAAInfo()); 7118 7119 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7120 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 7121 LD->getPointerInfo().getWithOffset(IncrementSize), 7122 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7123 LD->getAAInfo()); 7124 } else { 7125 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7126 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7127 LD->getAAInfo()); 7128 7129 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7130 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 7131 LD->getPointerInfo().getWithOffset(IncrementSize), 7132 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7133 LD->getAAInfo()); 7134 } 7135 7136 // aggregate the two parts 7137 SDValue ShiftAmount = 7138 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 7139 DAG.getDataLayout())); 7140 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 7141 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 7142 7143 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 7144 Hi.getValue(1)); 7145 7146 return std::make_pair(Result, TF); 7147 } 7148 7149 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 7150 SelectionDAG &DAG) const { 7151 assert(ST->getAddressingMode() == ISD::UNINDEXED && 7152 "unaligned indexed stores not implemented!"); 7153 SDValue Chain = ST->getChain(); 7154 SDValue Ptr = ST->getBasePtr(); 7155 SDValue Val = ST->getValue(); 7156 EVT VT = Val.getValueType(); 7157 Align Alignment = ST->getOriginalAlign(); 7158 auto &MF = DAG.getMachineFunction(); 7159 EVT StoreMemVT = ST->getMemoryVT(); 7160 7161 SDLoc dl(ST); 7162 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 7163 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7164 if (isTypeLegal(intVT)) { 7165 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 7166 StoreMemVT.isVector()) { 7167 // Scalarize the store and let the individual components be handled. 7168 SDValue Result = scalarizeVectorStore(ST, DAG); 7169 return Result; 7170 } 7171 // Expand to a bitconvert of the value to the integer type of the 7172 // same size, then a (misaligned) int store. 7173 // FIXME: Does not handle truncating floating point stores! 7174 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 7175 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 7176 Alignment, ST->getMemOperand()->getFlags()); 7177 return Result; 7178 } 7179 // Do a (aligned) store to a stack slot, then copy from the stack slot 7180 // to the final destination using (unaligned) integer loads and stores. 7181 MVT RegVT = getRegisterType( 7182 *DAG.getContext(), 7183 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 7184 EVT PtrVT = Ptr.getValueType(); 7185 unsigned StoredBytes = StoreMemVT.getStoreSize(); 7186 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7187 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 7188 7189 // Make sure the stack slot is also aligned for the register type. 7190 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 7191 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 7192 7193 // Perform the original store, only redirected to the stack slot. 7194 SDValue Store = DAG.getTruncStore( 7195 Chain, dl, Val, StackPtr, 7196 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 7197 7198 EVT StackPtrVT = StackPtr.getValueType(); 7199 7200 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7201 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7202 SmallVector<SDValue, 8> Stores; 7203 unsigned Offset = 0; 7204 7205 // Do all but one copies using the full register width. 7206 for (unsigned i = 1; i < NumRegs; i++) { 7207 // Load one integer register's worth from the stack slot. 7208 SDValue Load = DAG.getLoad( 7209 RegVT, dl, Store, StackPtr, 7210 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 7211 // Store it to the final location. Remember the store. 7212 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 7213 ST->getPointerInfo().getWithOffset(Offset), 7214 ST->getOriginalAlign(), 7215 ST->getMemOperand()->getFlags())); 7216 // Increment the pointers. 7217 Offset += RegBytes; 7218 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7219 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7220 } 7221 7222 // The last store may be partial. Do a truncating store. On big-endian 7223 // machines this requires an extending load from the stack slot to ensure 7224 // that the bits are in the right place. 7225 EVT LoadMemVT = 7226 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 7227 7228 // Load from the stack slot. 7229 SDValue Load = DAG.getExtLoad( 7230 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 7231 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 7232 7233 Stores.push_back( 7234 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 7235 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 7236 ST->getOriginalAlign(), 7237 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 7238 // The order of the stores doesn't matter - say it with a TokenFactor. 7239 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7240 return Result; 7241 } 7242 7243 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 7244 "Unaligned store of unknown type."); 7245 // Get the half-size VT 7246 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 7247 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 7248 unsigned IncrementSize = NumBits / 8; 7249 7250 // Divide the stored value in two parts. 7251 SDValue ShiftAmount = DAG.getConstant( 7252 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 7253 SDValue Lo = Val; 7254 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 7255 7256 // Store the two parts 7257 SDValue Store1, Store2; 7258 Store1 = DAG.getTruncStore(Chain, dl, 7259 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 7260 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 7261 ST->getMemOperand()->getFlags()); 7262 7263 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7264 Store2 = DAG.getTruncStore( 7265 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 7266 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 7267 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 7268 7269 SDValue Result = 7270 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 7271 return Result; 7272 } 7273 7274 SDValue 7275 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 7276 const SDLoc &DL, EVT DataVT, 7277 SelectionDAG &DAG, 7278 bool IsCompressedMemory) const { 7279 SDValue Increment; 7280 EVT AddrVT = Addr.getValueType(); 7281 EVT MaskVT = Mask.getValueType(); 7282 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 7283 "Incompatible types of Data and Mask"); 7284 if (IsCompressedMemory) { 7285 if (DataVT.isScalableVector()) 7286 report_fatal_error( 7287 "Cannot currently handle compressed memory with scalable vectors"); 7288 // Incrementing the pointer according to number of '1's in the mask. 7289 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 7290 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 7291 if (MaskIntVT.getSizeInBits() < 32) { 7292 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 7293 MaskIntVT = MVT::i32; 7294 } 7295 7296 // Count '1's with POPCNT. 7297 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 7298 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 7299 // Scale is an element size in bytes. 7300 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 7301 AddrVT); 7302 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 7303 } else if (DataVT.isScalableVector()) { 7304 Increment = DAG.getVScale(DL, AddrVT, 7305 APInt(AddrVT.getFixedSizeInBits(), 7306 DataVT.getStoreSize().getKnownMinSize())); 7307 } else 7308 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 7309 7310 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 7311 } 7312 7313 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, 7314 SDValue Idx, 7315 EVT VecVT, 7316 const SDLoc &dl) { 7317 if (!VecVT.isScalableVector() && isa<ConstantSDNode>(Idx)) 7318 return Idx; 7319 7320 EVT IdxVT = Idx.getValueType(); 7321 unsigned NElts = VecVT.getVectorMinNumElements(); 7322 if (VecVT.isScalableVector()) { 7323 SDValue VS = DAG.getVScale(dl, IdxVT, 7324 APInt(IdxVT.getFixedSizeInBits(), 7325 NElts)); 7326 SDValue Sub = DAG.getNode(ISD::SUB, dl, IdxVT, VS, 7327 DAG.getConstant(1, dl, IdxVT)); 7328 7329 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 7330 } else { 7331 if (isPowerOf2_32(NElts)) { 7332 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), 7333 Log2_32(NElts)); 7334 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 7335 DAG.getConstant(Imm, dl, IdxVT)); 7336 } 7337 } 7338 7339 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 7340 DAG.getConstant(NElts - 1, dl, IdxVT)); 7341 } 7342 7343 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 7344 SDValue VecPtr, EVT VecVT, 7345 SDValue Index) const { 7346 SDLoc dl(Index); 7347 // Make sure the index type is big enough to compute in. 7348 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 7349 7350 EVT EltVT = VecVT.getVectorElementType(); 7351 7352 // Calculate the element offset and add it to the pointer. 7353 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 7354 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 7355 "Converting bits to bytes lost precision"); 7356 7357 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl); 7358 7359 EVT IdxVT = Index.getValueType(); 7360 7361 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7362 DAG.getConstant(EltSize, dl, IdxVT)); 7363 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 7364 } 7365 7366 //===----------------------------------------------------------------------===// 7367 // Implementation of Emulated TLS Model 7368 //===----------------------------------------------------------------------===// 7369 7370 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 7371 SelectionDAG &DAG) const { 7372 // Access to address of TLS varialbe xyz is lowered to a function call: 7373 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 7374 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7375 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 7376 SDLoc dl(GA); 7377 7378 ArgListTy Args; 7379 ArgListEntry Entry; 7380 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 7381 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 7382 StringRef EmuTlsVarName(NameString); 7383 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 7384 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 7385 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 7386 Entry.Ty = VoidPtrType; 7387 Args.push_back(Entry); 7388 7389 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 7390 7391 TargetLowering::CallLoweringInfo CLI(DAG); 7392 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 7393 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 7394 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7395 7396 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7397 // At last for X86 targets, maybe good for other targets too? 7398 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7399 MFI.setAdjustsStack(true); // Is this only for X86 target? 7400 MFI.setHasCalls(true); 7401 7402 assert((GA->getOffset() == 0) && 7403 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7404 return CallResult.first; 7405 } 7406 7407 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 7408 SelectionDAG &DAG) const { 7409 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 7410 if (!isCtlzFast()) 7411 return SDValue(); 7412 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7413 SDLoc dl(Op); 7414 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 7415 if (C->isNullValue() && CC == ISD::SETEQ) { 7416 EVT VT = Op.getOperand(0).getValueType(); 7417 SDValue Zext = Op.getOperand(0); 7418 if (VT.bitsLT(MVT::i32)) { 7419 VT = MVT::i32; 7420 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 7421 } 7422 unsigned Log2b = Log2_32(VT.getSizeInBits()); 7423 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 7424 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 7425 DAG.getConstant(Log2b, dl, MVT::i32)); 7426 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 7427 } 7428 } 7429 return SDValue(); 7430 } 7431 7432 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 7433 unsigned Opcode = Node->getOpcode(); 7434 SDValue LHS = Node->getOperand(0); 7435 SDValue RHS = Node->getOperand(1); 7436 EVT VT = LHS.getValueType(); 7437 SDLoc dl(Node); 7438 7439 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7440 assert(VT.isInteger() && "Expected operands to be integers"); 7441 7442 // usub.sat(a, b) -> umax(a, b) - b 7443 if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) { 7444 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 7445 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 7446 } 7447 7448 // uadd.sat(a, b) -> umin(a, ~b) + b 7449 if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) { 7450 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 7451 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 7452 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 7453 } 7454 7455 unsigned OverflowOp; 7456 switch (Opcode) { 7457 case ISD::SADDSAT: 7458 OverflowOp = ISD::SADDO; 7459 break; 7460 case ISD::UADDSAT: 7461 OverflowOp = ISD::UADDO; 7462 break; 7463 case ISD::SSUBSAT: 7464 OverflowOp = ISD::SSUBO; 7465 break; 7466 case ISD::USUBSAT: 7467 OverflowOp = ISD::USUBO; 7468 break; 7469 default: 7470 llvm_unreachable("Expected method to receive signed or unsigned saturation " 7471 "addition or subtraction node."); 7472 } 7473 7474 // FIXME: Should really try to split the vector in case it's legal on a 7475 // subvector. 7476 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 7477 return DAG.UnrollVectorOp(Node); 7478 7479 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 7480 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7481 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), 7482 LHS, RHS); 7483 SDValue SumDiff = Result.getValue(0); 7484 SDValue Overflow = Result.getValue(1); 7485 SDValue Zero = DAG.getConstant(0, dl, VT); 7486 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 7487 7488 if (Opcode == ISD::UADDSAT) { 7489 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7490 // (LHS + RHS) | OverflowMask 7491 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7492 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 7493 } 7494 // Overflow ? 0xffff.... : (LHS + RHS) 7495 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 7496 } else if (Opcode == ISD::USUBSAT) { 7497 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7498 // (LHS - RHS) & ~OverflowMask 7499 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7500 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 7501 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 7502 } 7503 // Overflow ? 0 : (LHS - RHS) 7504 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 7505 } else { 7506 // SatMax -> Overflow && SumDiff < 0 7507 // SatMin -> Overflow && SumDiff >= 0 7508 APInt MinVal = APInt::getSignedMinValue(BitWidth); 7509 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 7510 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7511 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7512 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); 7513 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin); 7514 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 7515 } 7516 } 7517 7518 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 7519 unsigned Opcode = Node->getOpcode(); 7520 bool IsSigned = Opcode == ISD::SSHLSAT; 7521 SDValue LHS = Node->getOperand(0); 7522 SDValue RHS = Node->getOperand(1); 7523 EVT VT = LHS.getValueType(); 7524 SDLoc dl(Node); 7525 7526 assert((Node->getOpcode() == ISD::SSHLSAT || 7527 Node->getOpcode() == ISD::USHLSAT) && 7528 "Expected a SHLSAT opcode"); 7529 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7530 assert(VT.isInteger() && "Expected operands to be integers"); 7531 7532 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 7533 7534 unsigned BW = VT.getScalarSizeInBits(); 7535 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 7536 SDValue Orig = 7537 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 7538 7539 SDValue SatVal; 7540 if (IsSigned) { 7541 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 7542 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 7543 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 7544 SatMin, SatMax, ISD::SETLT); 7545 } else { 7546 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 7547 } 7548 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 7549 7550 return Result; 7551 } 7552 7553 SDValue 7554 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 7555 assert((Node->getOpcode() == ISD::SMULFIX || 7556 Node->getOpcode() == ISD::UMULFIX || 7557 Node->getOpcode() == ISD::SMULFIXSAT || 7558 Node->getOpcode() == ISD::UMULFIXSAT) && 7559 "Expected a fixed point multiplication opcode"); 7560 7561 SDLoc dl(Node); 7562 SDValue LHS = Node->getOperand(0); 7563 SDValue RHS = Node->getOperand(1); 7564 EVT VT = LHS.getValueType(); 7565 unsigned Scale = Node->getConstantOperandVal(2); 7566 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 7567 Node->getOpcode() == ISD::UMULFIXSAT); 7568 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 7569 Node->getOpcode() == ISD::SMULFIXSAT); 7570 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7571 unsigned VTSize = VT.getScalarSizeInBits(); 7572 7573 if (!Scale) { 7574 // [us]mul.fix(a, b, 0) -> mul(a, b) 7575 if (!Saturating) { 7576 if (isOperationLegalOrCustom(ISD::MUL, VT)) 7577 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7578 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 7579 SDValue Result = 7580 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7581 SDValue Product = Result.getValue(0); 7582 SDValue Overflow = Result.getValue(1); 7583 SDValue Zero = DAG.getConstant(0, dl, VT); 7584 7585 APInt MinVal = APInt::getSignedMinValue(VTSize); 7586 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 7587 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7588 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7589 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT); 7590 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin); 7591 return DAG.getSelect(dl, VT, Overflow, Result, Product); 7592 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 7593 SDValue Result = 7594 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7595 SDValue Product = Result.getValue(0); 7596 SDValue Overflow = Result.getValue(1); 7597 7598 APInt MaxVal = APInt::getMaxValue(VTSize); 7599 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7600 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 7601 } 7602 } 7603 7604 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 7605 "Expected scale to be less than the number of bits if signed or at " 7606 "most the number of bits if unsigned."); 7607 assert(LHS.getValueType() == RHS.getValueType() && 7608 "Expected both operands to be the same type"); 7609 7610 // Get the upper and lower bits of the result. 7611 SDValue Lo, Hi; 7612 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 7613 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 7614 if (isOperationLegalOrCustom(LoHiOp, VT)) { 7615 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 7616 Lo = Result.getValue(0); 7617 Hi = Result.getValue(1); 7618 } else if (isOperationLegalOrCustom(HiOp, VT)) { 7619 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7620 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 7621 } else if (VT.isVector()) { 7622 return SDValue(); 7623 } else { 7624 report_fatal_error("Unable to expand fixed point multiplication."); 7625 } 7626 7627 if (Scale == VTSize) 7628 // Result is just the top half since we'd be shifting by the width of the 7629 // operand. Overflow impossible so this works for both UMULFIX and 7630 // UMULFIXSAT. 7631 return Hi; 7632 7633 // The result will need to be shifted right by the scale since both operands 7634 // are scaled. The result is given to us in 2 halves, so we only want part of 7635 // both in the result. 7636 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7637 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 7638 DAG.getConstant(Scale, dl, ShiftTy)); 7639 if (!Saturating) 7640 return Result; 7641 7642 if (!Signed) { 7643 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 7644 // widened multiplication) aren't all zeroes. 7645 7646 // Saturate to max if ((Hi >> Scale) != 0), 7647 // which is the same as if (Hi > ((1 << Scale) - 1)) 7648 APInt MaxVal = APInt::getMaxValue(VTSize); 7649 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 7650 dl, VT); 7651 Result = DAG.getSelectCC(dl, Hi, LowMask, 7652 DAG.getConstant(MaxVal, dl, VT), Result, 7653 ISD::SETUGT); 7654 7655 return Result; 7656 } 7657 7658 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 7659 // widened multiplication) aren't all ones or all zeroes. 7660 7661 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 7662 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 7663 7664 if (Scale == 0) { 7665 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 7666 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 7667 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 7668 // Saturated to SatMin if wide product is negative, and SatMax if wide 7669 // product is positive ... 7670 SDValue Zero = DAG.getConstant(0, dl, VT); 7671 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 7672 ISD::SETLT); 7673 // ... but only if we overflowed. 7674 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 7675 } 7676 7677 // We handled Scale==0 above so all the bits to examine is in Hi. 7678 7679 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 7680 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 7681 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 7682 dl, VT); 7683 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 7684 // Saturate to min if (Hi >> (Scale - 1)) < -1), 7685 // which is the same as if (HI < (-1 << (Scale - 1)) 7686 SDValue HighMask = 7687 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 7688 dl, VT); 7689 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 7690 return Result; 7691 } 7692 7693 SDValue 7694 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 7695 SDValue LHS, SDValue RHS, 7696 unsigned Scale, SelectionDAG &DAG) const { 7697 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 7698 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 7699 "Expected a fixed point division opcode"); 7700 7701 EVT VT = LHS.getValueType(); 7702 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 7703 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 7704 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7705 7706 // If there is enough room in the type to upscale the LHS or downscale the 7707 // RHS before the division, we can perform it in this type without having to 7708 // resize. For signed operations, the LHS headroom is the number of 7709 // redundant sign bits, and for unsigned ones it is the number of zeroes. 7710 // The headroom for the RHS is the number of trailing zeroes. 7711 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 7712 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 7713 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 7714 7715 // For signed saturating operations, we need to be able to detect true integer 7716 // division overflow; that is, when you have MIN / -EPS. However, this 7717 // is undefined behavior and if we emit divisions that could take such 7718 // values it may cause undesired behavior (arithmetic exceptions on x86, for 7719 // example). 7720 // Avoid this by requiring an extra bit so that we never get this case. 7721 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 7722 // signed saturating division, we need to emit a whopping 32-bit division. 7723 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 7724 return SDValue(); 7725 7726 unsigned LHSShift = std::min(LHSLead, Scale); 7727 unsigned RHSShift = Scale - LHSShift; 7728 7729 // At this point, we know that if we shift the LHS up by LHSShift and the 7730 // RHS down by RHSShift, we can emit a regular division with a final scaling 7731 // factor of Scale. 7732 7733 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7734 if (LHSShift) 7735 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 7736 DAG.getConstant(LHSShift, dl, ShiftTy)); 7737 if (RHSShift) 7738 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 7739 DAG.getConstant(RHSShift, dl, ShiftTy)); 7740 7741 SDValue Quot; 7742 if (Signed) { 7743 // For signed operations, if the resulting quotient is negative and the 7744 // remainder is nonzero, subtract 1 from the quotient to round towards 7745 // negative infinity. 7746 SDValue Rem; 7747 // FIXME: Ideally we would always produce an SDIVREM here, but if the 7748 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 7749 // we couldn't just form a libcall, but the type legalizer doesn't do it. 7750 if (isTypeLegal(VT) && 7751 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 7752 Quot = DAG.getNode(ISD::SDIVREM, dl, 7753 DAG.getVTList(VT, VT), 7754 LHS, RHS); 7755 Rem = Quot.getValue(1); 7756 Quot = Quot.getValue(0); 7757 } else { 7758 Quot = DAG.getNode(ISD::SDIV, dl, VT, 7759 LHS, RHS); 7760 Rem = DAG.getNode(ISD::SREM, dl, VT, 7761 LHS, RHS); 7762 } 7763 SDValue Zero = DAG.getConstant(0, dl, VT); 7764 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 7765 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 7766 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 7767 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 7768 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 7769 DAG.getConstant(1, dl, VT)); 7770 Quot = DAG.getSelect(dl, VT, 7771 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 7772 Sub1, Quot); 7773 } else 7774 Quot = DAG.getNode(ISD::UDIV, dl, VT, 7775 LHS, RHS); 7776 7777 return Quot; 7778 } 7779 7780 void TargetLowering::expandUADDSUBO( 7781 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7782 SDLoc dl(Node); 7783 SDValue LHS = Node->getOperand(0); 7784 SDValue RHS = Node->getOperand(1); 7785 bool IsAdd = Node->getOpcode() == ISD::UADDO; 7786 7787 // If ADD/SUBCARRY is legal, use that instead. 7788 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 7789 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 7790 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 7791 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 7792 { LHS, RHS, CarryIn }); 7793 Result = SDValue(NodeCarry.getNode(), 0); 7794 Overflow = SDValue(NodeCarry.getNode(), 1); 7795 return; 7796 } 7797 7798 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7799 LHS.getValueType(), LHS, RHS); 7800 7801 EVT ResultType = Node->getValueType(1); 7802 EVT SetCCType = getSetCCResultType( 7803 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7804 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 7805 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 7806 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7807 } 7808 7809 void TargetLowering::expandSADDSUBO( 7810 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7811 SDLoc dl(Node); 7812 SDValue LHS = Node->getOperand(0); 7813 SDValue RHS = Node->getOperand(1); 7814 bool IsAdd = Node->getOpcode() == ISD::SADDO; 7815 7816 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7817 LHS.getValueType(), LHS, RHS); 7818 7819 EVT ResultType = Node->getValueType(1); 7820 EVT OType = getSetCCResultType( 7821 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7822 7823 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 7824 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 7825 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) { 7826 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 7827 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 7828 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7829 return; 7830 } 7831 7832 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 7833 7834 // For an addition, the result should be less than one of the operands (LHS) 7835 // if and only if the other operand (RHS) is negative, otherwise there will 7836 // be overflow. 7837 // For a subtraction, the result should be less than one of the operands 7838 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 7839 // otherwise there will be overflow. 7840 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 7841 SDValue ConditionRHS = 7842 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 7843 7844 Overflow = DAG.getBoolExtOrTrunc( 7845 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 7846 ResultType, ResultType); 7847 } 7848 7849 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 7850 SDValue &Overflow, SelectionDAG &DAG) const { 7851 SDLoc dl(Node); 7852 EVT VT = Node->getValueType(0); 7853 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7854 SDValue LHS = Node->getOperand(0); 7855 SDValue RHS = Node->getOperand(1); 7856 bool isSigned = Node->getOpcode() == ISD::SMULO; 7857 7858 // For power-of-two multiplications we can use a simpler shift expansion. 7859 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 7860 const APInt &C = RHSC->getAPIntValue(); 7861 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 7862 if (C.isPowerOf2()) { 7863 // smulo(x, signed_min) is same as umulo(x, signed_min). 7864 bool UseArithShift = isSigned && !C.isMinSignedValue(); 7865 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7866 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 7867 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 7868 Overflow = DAG.getSetCC(dl, SetCCVT, 7869 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 7870 dl, VT, Result, ShiftAmt), 7871 LHS, ISD::SETNE); 7872 return true; 7873 } 7874 } 7875 7876 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 7877 if (VT.isVector()) 7878 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 7879 VT.getVectorNumElements()); 7880 7881 SDValue BottomHalf; 7882 SDValue TopHalf; 7883 static const unsigned Ops[2][3] = 7884 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 7885 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 7886 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 7887 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7888 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 7889 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 7890 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 7891 RHS); 7892 TopHalf = BottomHalf.getValue(1); 7893 } else if (isTypeLegal(WideVT)) { 7894 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 7895 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 7896 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 7897 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 7898 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 7899 getShiftAmountTy(WideVT, DAG.getDataLayout())); 7900 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 7901 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 7902 } else { 7903 if (VT.isVector()) 7904 return false; 7905 7906 // We can fall back to a libcall with an illegal type for the MUL if we 7907 // have a libcall big enough. 7908 // Also, we can fall back to a division in some cases, but that's a big 7909 // performance hit in the general case. 7910 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 7911 if (WideVT == MVT::i16) 7912 LC = RTLIB::MUL_I16; 7913 else if (WideVT == MVT::i32) 7914 LC = RTLIB::MUL_I32; 7915 else if (WideVT == MVT::i64) 7916 LC = RTLIB::MUL_I64; 7917 else if (WideVT == MVT::i128) 7918 LC = RTLIB::MUL_I128; 7919 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 7920 7921 SDValue HiLHS; 7922 SDValue HiRHS; 7923 if (isSigned) { 7924 // The high part is obtained by SRA'ing all but one of the bits of low 7925 // part. 7926 unsigned LoSize = VT.getFixedSizeInBits(); 7927 HiLHS = 7928 DAG.getNode(ISD::SRA, dl, VT, LHS, 7929 DAG.getConstant(LoSize - 1, dl, 7930 getPointerTy(DAG.getDataLayout()))); 7931 HiRHS = 7932 DAG.getNode(ISD::SRA, dl, VT, RHS, 7933 DAG.getConstant(LoSize - 1, dl, 7934 getPointerTy(DAG.getDataLayout()))); 7935 } else { 7936 HiLHS = DAG.getConstant(0, dl, VT); 7937 HiRHS = DAG.getConstant(0, dl, VT); 7938 } 7939 7940 // Here we're passing the 2 arguments explicitly as 4 arguments that are 7941 // pre-lowered to the correct types. This all depends upon WideVT not 7942 // being a legal type for the architecture and thus has to be split to 7943 // two arguments. 7944 SDValue Ret; 7945 TargetLowering::MakeLibCallOptions CallOptions; 7946 CallOptions.setSExt(isSigned); 7947 CallOptions.setIsPostTypeLegalization(true); 7948 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 7949 // Halves of WideVT are packed into registers in different order 7950 // depending on platform endianness. This is usually handled by 7951 // the C calling convention, but we can't defer to it in 7952 // the legalizer. 7953 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 7954 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7955 } else { 7956 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 7957 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7958 } 7959 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 7960 "Ret value is a collection of constituent nodes holding result."); 7961 if (DAG.getDataLayout().isLittleEndian()) { 7962 // Same as above. 7963 BottomHalf = Ret.getOperand(0); 7964 TopHalf = Ret.getOperand(1); 7965 } else { 7966 BottomHalf = Ret.getOperand(1); 7967 TopHalf = Ret.getOperand(0); 7968 } 7969 } 7970 7971 Result = BottomHalf; 7972 if (isSigned) { 7973 SDValue ShiftAmt = DAG.getConstant( 7974 VT.getScalarSizeInBits() - 1, dl, 7975 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 7976 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 7977 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 7978 } else { 7979 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 7980 DAG.getConstant(0, dl, VT), ISD::SETNE); 7981 } 7982 7983 // Truncate the result if SetCC returns a larger type than needed. 7984 EVT RType = Node->getValueType(1); 7985 if (RType.bitsLT(Overflow.getValueType())) 7986 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 7987 7988 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 7989 "Unexpected result type for S/UMULO legalization"); 7990 return true; 7991 } 7992 7993 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 7994 SDLoc dl(Node); 7995 unsigned BaseOpcode = 0; 7996 switch (Node->getOpcode()) { 7997 default: llvm_unreachable("Expected VECREDUCE opcode"); 7998 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 7999 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 8000 case ISD::VECREDUCE_ADD: BaseOpcode = ISD::ADD; break; 8001 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 8002 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 8003 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 8004 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 8005 case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break; 8006 case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break; 8007 case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break; 8008 case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break; 8009 case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; 8010 case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; 8011 } 8012 8013 SDValue Op = Node->getOperand(0); 8014 EVT VT = Op.getValueType(); 8015 8016 // Try to use a shuffle reduction for power of two vectors. 8017 if (VT.isPow2VectorType()) { 8018 while (VT.getVectorNumElements() > 1) { 8019 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 8020 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 8021 break; 8022 8023 SDValue Lo, Hi; 8024 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 8025 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 8026 VT = HalfVT; 8027 } 8028 } 8029 8030 EVT EltVT = VT.getVectorElementType(); 8031 unsigned NumElts = VT.getVectorNumElements(); 8032 8033 SmallVector<SDValue, 8> Ops; 8034 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 8035 8036 SDValue Res = Ops[0]; 8037 for (unsigned i = 1; i < NumElts; i++) 8038 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 8039 8040 // Result type may be wider than element type. 8041 if (EltVT != Node->getValueType(0)) 8042 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 8043 return Res; 8044 } 8045 8046 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 8047 SelectionDAG &DAG) const { 8048 EVT VT = Node->getValueType(0); 8049 SDLoc dl(Node); 8050 bool isSigned = Node->getOpcode() == ISD::SREM; 8051 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 8052 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 8053 SDValue Dividend = Node->getOperand(0); 8054 SDValue Divisor = Node->getOperand(1); 8055 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 8056 SDVTList VTs = DAG.getVTList(VT, VT); 8057 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 8058 return true; 8059 } else if (isOperationLegalOrCustom(DivOpc, VT)) { 8060 // X % Y -> X-X/Y*Y 8061 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 8062 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 8063 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 8064 return true; 8065 } 8066 return false; 8067 } 8068