1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetLoweringObjectFile.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore NoAlias and NonNull because they don't affect the 61 // call sequence. 62 AttributeList CallerAttrs = F.getAttributes(); 63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) 64 .removeAttribute(Attribute::NoAlias) 65 .removeAttribute(Attribute::NonNull) 66 .hasAttributes()) 67 return false; 68 69 // It's not safe to eliminate the sign / zero extension of the return value. 70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || 71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 72 return false; 73 74 // Check if the only use is a function return node. 75 return isUsedByReturnOnly(Node, Chain); 76 } 77 78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 79 const uint32_t *CallerPreservedMask, 80 const SmallVectorImpl<CCValAssign> &ArgLocs, 81 const SmallVectorImpl<SDValue> &OutVals) const { 82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 83 const CCValAssign &ArgLoc = ArgLocs[I]; 84 if (!ArgLoc.isRegLoc()) 85 continue; 86 MCRegister Reg = ArgLoc.getLocReg(); 87 // Only look at callee saved registers. 88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 89 continue; 90 // Check that we pass the value used for the caller. 91 // (We look for a CopyFromReg reading a virtual register that is used 92 // for the function live-in value of register Reg) 93 SDValue Value = OutVals[I]; 94 if (Value->getOpcode() != ISD::CopyFromReg) 95 return false; 96 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 97 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 98 return false; 99 } 100 return true; 101 } 102 103 /// Set CallLoweringInfo attribute flags based on a call instruction 104 /// and called function attributes. 105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 106 unsigned ArgIdx) { 107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 113 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 114 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 115 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 116 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 117 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 118 Alignment = Call->getParamAlign(ArgIdx); 119 ByValType = nullptr; 120 if (IsByVal) 121 ByValType = Call->getParamByValType(ArgIdx); 122 PreallocatedType = nullptr; 123 if (IsPreallocated) 124 PreallocatedType = Call->getParamPreallocatedType(ArgIdx); 125 } 126 127 /// Generate a libcall taking the given operands as arguments and returning a 128 /// result of type RetVT. 129 std::pair<SDValue, SDValue> 130 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 131 ArrayRef<SDValue> Ops, 132 MakeLibCallOptions CallOptions, 133 const SDLoc &dl, 134 SDValue InChain) const { 135 if (!InChain) 136 InChain = DAG.getEntryNode(); 137 138 TargetLowering::ArgListTy Args; 139 Args.reserve(Ops.size()); 140 141 TargetLowering::ArgListEntry Entry; 142 for (unsigned i = 0; i < Ops.size(); ++i) { 143 SDValue NewOp = Ops[i]; 144 Entry.Node = NewOp; 145 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 146 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 147 CallOptions.IsSExt); 148 Entry.IsZExt = !Entry.IsSExt; 149 150 if (CallOptions.IsSoften && 151 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 152 Entry.IsSExt = Entry.IsZExt = false; 153 } 154 Args.push_back(Entry); 155 } 156 157 if (LC == RTLIB::UNKNOWN_LIBCALL) 158 report_fatal_error("Unsupported library call operation!"); 159 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 160 getPointerTy(DAG.getDataLayout())); 161 162 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 163 TargetLowering::CallLoweringInfo CLI(DAG); 164 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 165 bool zeroExtend = !signExtend; 166 167 if (CallOptions.IsSoften && 168 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 169 signExtend = zeroExtend = false; 170 } 171 172 CLI.setDebugLoc(dl) 173 .setChain(InChain) 174 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 175 .setNoReturn(CallOptions.DoesNotReturn) 176 .setDiscardResult(!CallOptions.IsReturnValueUsed) 177 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 178 .setSExtResult(signExtend) 179 .setZExtResult(zeroExtend); 180 return LowerCallTo(CLI); 181 } 182 183 bool TargetLowering::findOptimalMemOpLowering( 184 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 185 unsigned SrcAS, const AttributeList &FuncAttributes) const { 186 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 187 return false; 188 189 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 190 191 if (VT == MVT::Other) { 192 // Use the largest integer type whose alignment constraints are satisfied. 193 // We only need to check DstAlign here as SrcAlign is always greater or 194 // equal to DstAlign (or zero). 195 VT = MVT::i64; 196 if (Op.isFixedDstAlign()) 197 while ( 198 Op.getDstAlign() < (VT.getSizeInBits() / 8) && 199 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) 200 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 201 assert(VT.isInteger()); 202 203 // Find the largest legal integer type. 204 MVT LVT = MVT::i64; 205 while (!isTypeLegal(LVT)) 206 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 207 assert(LVT.isInteger()); 208 209 // If the type we've chosen is larger than the largest legal integer type 210 // then use that instead. 211 if (VT.bitsGT(LVT)) 212 VT = LVT; 213 } 214 215 unsigned NumMemOps = 0; 216 uint64_t Size = Op.size(); 217 while (Size) { 218 unsigned VTSize = VT.getSizeInBits() / 8; 219 while (VTSize > Size) { 220 // For now, only use non-vector load / store's for the left-over pieces. 221 EVT NewVT = VT; 222 unsigned NewVTSize; 223 224 bool Found = false; 225 if (VT.isVector() || VT.isFloatingPoint()) { 226 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 227 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 228 isSafeMemOpType(NewVT.getSimpleVT())) 229 Found = true; 230 else if (NewVT == MVT::i64 && 231 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 232 isSafeMemOpType(MVT::f64)) { 233 // i64 is usually not legal on 32-bit targets, but f64 may be. 234 NewVT = MVT::f64; 235 Found = true; 236 } 237 } 238 239 if (!Found) { 240 do { 241 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 242 if (NewVT == MVT::i8) 243 break; 244 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 245 } 246 NewVTSize = NewVT.getSizeInBits() / 8; 247 248 // If the new VT cannot cover all of the remaining bits, then consider 249 // issuing a (or a pair of) unaligned and overlapping load / store. 250 bool Fast; 251 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 252 allowsMisalignedMemoryAccesses( 253 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1, 254 MachineMemOperand::MONone, &Fast) && 255 Fast) 256 VTSize = Size; 257 else { 258 VT = NewVT; 259 VTSize = NewVTSize; 260 } 261 } 262 263 if (++NumMemOps > Limit) 264 return false; 265 266 MemOps.push_back(VT); 267 Size -= VTSize; 268 } 269 270 return true; 271 } 272 273 /// Soften the operands of a comparison. This code is shared among BR_CC, 274 /// SELECT_CC, and SETCC handlers. 275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 276 SDValue &NewLHS, SDValue &NewRHS, 277 ISD::CondCode &CCCode, 278 const SDLoc &dl, const SDValue OldLHS, 279 const SDValue OldRHS) const { 280 SDValue Chain; 281 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 282 OldRHS, Chain); 283 } 284 285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 286 SDValue &NewLHS, SDValue &NewRHS, 287 ISD::CondCode &CCCode, 288 const SDLoc &dl, const SDValue OldLHS, 289 const SDValue OldRHS, 290 SDValue &Chain, 291 bool IsSignaling) const { 292 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 293 // not supporting it. We can update this code when libgcc provides such 294 // functions. 295 296 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 297 && "Unsupported setcc type!"); 298 299 // Expand into one or more soft-fp libcall(s). 300 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 301 bool ShouldInvertCC = false; 302 switch (CCCode) { 303 case ISD::SETEQ: 304 case ISD::SETOEQ: 305 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 306 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 307 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 308 break; 309 case ISD::SETNE: 310 case ISD::SETUNE: 311 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 312 (VT == MVT::f64) ? RTLIB::UNE_F64 : 313 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 314 break; 315 case ISD::SETGE: 316 case ISD::SETOGE: 317 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 318 (VT == MVT::f64) ? RTLIB::OGE_F64 : 319 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 320 break; 321 case ISD::SETLT: 322 case ISD::SETOLT: 323 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 324 (VT == MVT::f64) ? RTLIB::OLT_F64 : 325 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 326 break; 327 case ISD::SETLE: 328 case ISD::SETOLE: 329 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 330 (VT == MVT::f64) ? RTLIB::OLE_F64 : 331 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 332 break; 333 case ISD::SETGT: 334 case ISD::SETOGT: 335 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 336 (VT == MVT::f64) ? RTLIB::OGT_F64 : 337 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 338 break; 339 case ISD::SETO: 340 ShouldInvertCC = true; 341 LLVM_FALLTHROUGH; 342 case ISD::SETUO: 343 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 344 (VT == MVT::f64) ? RTLIB::UO_F64 : 345 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 346 break; 347 case ISD::SETONE: 348 // SETONE = O && UNE 349 ShouldInvertCC = true; 350 LLVM_FALLTHROUGH; 351 case ISD::SETUEQ: 352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 353 (VT == MVT::f64) ? RTLIB::UO_F64 : 354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 355 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 356 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 357 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 358 break; 359 default: 360 // Invert CC for unordered comparisons 361 ShouldInvertCC = true; 362 switch (CCCode) { 363 case ISD::SETULT: 364 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 365 (VT == MVT::f64) ? RTLIB::OGE_F64 : 366 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 367 break; 368 case ISD::SETULE: 369 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 370 (VT == MVT::f64) ? RTLIB::OGT_F64 : 371 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 372 break; 373 case ISD::SETUGT: 374 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 375 (VT == MVT::f64) ? RTLIB::OLE_F64 : 376 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 377 break; 378 case ISD::SETUGE: 379 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 380 (VT == MVT::f64) ? RTLIB::OLT_F64 : 381 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 382 break; 383 default: llvm_unreachable("Do not know how to soften this setcc!"); 384 } 385 } 386 387 // Use the target specific return value for comparions lib calls. 388 EVT RetVT = getCmpLibcallReturnType(); 389 SDValue Ops[2] = {NewLHS, NewRHS}; 390 TargetLowering::MakeLibCallOptions CallOptions; 391 EVT OpsVT[2] = { OldLHS.getValueType(), 392 OldRHS.getValueType() }; 393 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 394 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 395 NewLHS = Call.first; 396 NewRHS = DAG.getConstant(0, dl, RetVT); 397 398 CCCode = getCmpLibcallCC(LC1); 399 if (ShouldInvertCC) { 400 assert(RetVT.isInteger()); 401 CCCode = getSetCCInverse(CCCode, RetVT); 402 } 403 404 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 405 // Update Chain. 406 Chain = Call.second; 407 } else { 408 EVT SetCCVT = 409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 410 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 411 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 412 CCCode = getCmpLibcallCC(LC2); 413 if (ShouldInvertCC) 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 416 if (Chain) 417 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 418 Call2.second); 419 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 420 Tmp.getValueType(), Tmp, NewLHS); 421 NewRHS = SDValue(); 422 } 423 } 424 425 /// Return the entry encoding for a jump table in the current function. The 426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 427 unsigned TargetLowering::getJumpTableEncoding() const { 428 // In non-pic modes, just use the address of a block. 429 if (!isPositionIndependent()) 430 return MachineJumpTableInfo::EK_BlockAddress; 431 432 // In PIC mode, if the target supports a GPRel32 directive, use it. 433 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 434 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 435 436 // Otherwise, use a label difference. 437 return MachineJumpTableInfo::EK_LabelDifference32; 438 } 439 440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 441 SelectionDAG &DAG) const { 442 // If our PIC model is GP relative, use the global offset table as the base. 443 unsigned JTEncoding = getJumpTableEncoding(); 444 445 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 446 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 447 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 448 449 return Table; 450 } 451 452 /// This returns the relocation base for the given PIC jumptable, the same as 453 /// getPICJumpTableRelocBase, but as an MCExpr. 454 const MCExpr * 455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 456 unsigned JTI,MCContext &Ctx) const{ 457 // The normal PIC reloc base is the label at the start of the jump table. 458 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 459 } 460 461 bool 462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 463 const TargetMachine &TM = getTargetMachine(); 464 const GlobalValue *GV = GA->getGlobal(); 465 466 // If the address is not even local to this DSO we will have to load it from 467 // a got and then add the offset. 468 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 469 return false; 470 471 // If the code is position independent we will have to add a base register. 472 if (isPositionIndependent()) 473 return false; 474 475 // Otherwise we can do it. 476 return true; 477 } 478 479 //===----------------------------------------------------------------------===// 480 // Optimization Methods 481 //===----------------------------------------------------------------------===// 482 483 /// If the specified instruction has a constant integer operand and there are 484 /// bits set in that constant that are not demanded, then clear those bits and 485 /// return true. 486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 487 const APInt &DemandedBits, 488 const APInt &DemandedElts, 489 TargetLoweringOpt &TLO) const { 490 SDLoc DL(Op); 491 unsigned Opcode = Op.getOpcode(); 492 493 // Do target-specific constant optimization. 494 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 495 return TLO.New.getNode(); 496 497 // FIXME: ISD::SELECT, ISD::SELECT_CC 498 switch (Opcode) { 499 default: 500 break; 501 case ISD::XOR: 502 case ISD::AND: 503 case ISD::OR: { 504 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 505 if (!Op1C) 506 return false; 507 508 // If this is a 'not' op, don't touch it because that's a canonical form. 509 const APInt &C = Op1C->getAPIntValue(); 510 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 511 return false; 512 513 if (!C.isSubsetOf(DemandedBits)) { 514 EVT VT = Op.getValueType(); 515 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 516 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 517 return TLO.CombineTo(Op, NewOp); 518 } 519 520 break; 521 } 522 } 523 524 return false; 525 } 526 527 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 528 const APInt &DemandedBits, 529 TargetLoweringOpt &TLO) const { 530 EVT VT = Op.getValueType(); 531 APInt DemandedElts = VT.isVector() 532 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 533 : APInt(1, 1); 534 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 535 } 536 537 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 538 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 539 /// generalized for targets with other types of implicit widening casts. 540 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 541 const APInt &Demanded, 542 TargetLoweringOpt &TLO) const { 543 assert(Op.getNumOperands() == 2 && 544 "ShrinkDemandedOp only supports binary operators!"); 545 assert(Op.getNode()->getNumValues() == 1 && 546 "ShrinkDemandedOp only supports nodes with one result!"); 547 548 SelectionDAG &DAG = TLO.DAG; 549 SDLoc dl(Op); 550 551 // Early return, as this function cannot handle vector types. 552 if (Op.getValueType().isVector()) 553 return false; 554 555 // Don't do this if the node has another user, which may require the 556 // full value. 557 if (!Op.getNode()->hasOneUse()) 558 return false; 559 560 // Search for the smallest integer type with free casts to and from 561 // Op's type. For expedience, just check power-of-2 integer types. 562 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 563 unsigned DemandedSize = Demanded.getActiveBits(); 564 unsigned SmallVTBits = DemandedSize; 565 if (!isPowerOf2_32(SmallVTBits)) 566 SmallVTBits = NextPowerOf2(SmallVTBits); 567 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 568 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 569 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 570 TLI.isZExtFree(SmallVT, Op.getValueType())) { 571 // We found a type with free casts. 572 SDValue X = DAG.getNode( 573 Op.getOpcode(), dl, SmallVT, 574 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 575 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 576 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 577 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 578 return TLO.CombineTo(Op, Z); 579 } 580 } 581 return false; 582 } 583 584 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 585 DAGCombinerInfo &DCI) const { 586 SelectionDAG &DAG = DCI.DAG; 587 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 588 !DCI.isBeforeLegalizeOps()); 589 KnownBits Known; 590 591 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 592 if (Simplified) { 593 DCI.AddToWorklist(Op.getNode()); 594 DCI.CommitTargetLoweringOpt(TLO); 595 } 596 return Simplified; 597 } 598 599 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 600 KnownBits &Known, 601 TargetLoweringOpt &TLO, 602 unsigned Depth, 603 bool AssumeSingleUse) const { 604 EVT VT = Op.getValueType(); 605 606 // TODO: We can probably do more work on calculating the known bits and 607 // simplifying the operations for scalable vectors, but for now we just 608 // bail out. 609 if (VT.isScalableVector()) { 610 // Pretend we don't know anything for now. 611 Known = KnownBits(DemandedBits.getBitWidth()); 612 return false; 613 } 614 615 APInt DemandedElts = VT.isVector() 616 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 617 : APInt(1, 1); 618 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 619 AssumeSingleUse); 620 } 621 622 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 623 // TODO: Under what circumstances can we create nodes? Constant folding? 624 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 625 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 626 SelectionDAG &DAG, unsigned Depth) const { 627 // Limit search depth. 628 if (Depth >= SelectionDAG::MaxRecursionDepth) 629 return SDValue(); 630 631 // Ignore UNDEFs. 632 if (Op.isUndef()) 633 return SDValue(); 634 635 // Not demanding any bits/elts from Op. 636 if (DemandedBits == 0 || DemandedElts == 0) 637 return DAG.getUNDEF(Op.getValueType()); 638 639 unsigned NumElts = DemandedElts.getBitWidth(); 640 unsigned BitWidth = DemandedBits.getBitWidth(); 641 KnownBits LHSKnown, RHSKnown; 642 switch (Op.getOpcode()) { 643 case ISD::BITCAST: { 644 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 645 EVT SrcVT = Src.getValueType(); 646 EVT DstVT = Op.getValueType(); 647 if (SrcVT == DstVT) 648 return Src; 649 650 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 651 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 652 if (NumSrcEltBits == NumDstEltBits) 653 if (SDValue V = SimplifyMultipleUseDemandedBits( 654 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 655 return DAG.getBitcast(DstVT, V); 656 657 // TODO - bigendian once we have test coverage. 658 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 && 659 DAG.getDataLayout().isLittleEndian()) { 660 unsigned Scale = NumDstEltBits / NumSrcEltBits; 661 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 662 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 663 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 664 for (unsigned i = 0; i != Scale; ++i) { 665 unsigned Offset = i * NumSrcEltBits; 666 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 667 if (!Sub.isNullValue()) { 668 DemandedSrcBits |= Sub; 669 for (unsigned j = 0; j != NumElts; ++j) 670 if (DemandedElts[j]) 671 DemandedSrcElts.setBit((j * Scale) + i); 672 } 673 } 674 675 if (SDValue V = SimplifyMultipleUseDemandedBits( 676 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 677 return DAG.getBitcast(DstVT, V); 678 } 679 680 // TODO - bigendian once we have test coverage. 681 if ((NumSrcEltBits % NumDstEltBits) == 0 && 682 DAG.getDataLayout().isLittleEndian()) { 683 unsigned Scale = NumSrcEltBits / NumDstEltBits; 684 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 685 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 686 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 687 for (unsigned i = 0; i != NumElts; ++i) 688 if (DemandedElts[i]) { 689 unsigned Offset = (i % Scale) * NumDstEltBits; 690 DemandedSrcBits.insertBits(DemandedBits, Offset); 691 DemandedSrcElts.setBit(i / Scale); 692 } 693 694 if (SDValue V = SimplifyMultipleUseDemandedBits( 695 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 696 return DAG.getBitcast(DstVT, V); 697 } 698 699 break; 700 } 701 case ISD::AND: { 702 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 703 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 704 705 // If all of the demanded bits are known 1 on one side, return the other. 706 // These bits cannot contribute to the result of the 'and' in this 707 // context. 708 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 709 return Op.getOperand(0); 710 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 711 return Op.getOperand(1); 712 break; 713 } 714 case ISD::OR: { 715 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 716 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 717 718 // If all of the demanded bits are known zero on one side, return the 719 // other. These bits cannot contribute to the result of the 'or' in this 720 // context. 721 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 722 return Op.getOperand(0); 723 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 724 return Op.getOperand(1); 725 break; 726 } 727 case ISD::XOR: { 728 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 729 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 730 731 // If all of the demanded bits are known zero on one side, return the 732 // other. 733 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 734 return Op.getOperand(0); 735 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 736 return Op.getOperand(1); 737 break; 738 } 739 case ISD::SHL: { 740 // If we are only demanding sign bits then we can use the shift source 741 // directly. 742 if (const APInt *MaxSA = 743 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 744 SDValue Op0 = Op.getOperand(0); 745 unsigned ShAmt = MaxSA->getZExtValue(); 746 unsigned NumSignBits = 747 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 748 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 749 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 750 return Op0; 751 } 752 break; 753 } 754 case ISD::SETCC: { 755 SDValue Op0 = Op.getOperand(0); 756 SDValue Op1 = Op.getOperand(1); 757 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 758 // If (1) we only need the sign-bit, (2) the setcc operands are the same 759 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 760 // -1, we may be able to bypass the setcc. 761 if (DemandedBits.isSignMask() && 762 Op0.getScalarValueSizeInBits() == BitWidth && 763 getBooleanContents(Op0.getValueType()) == 764 BooleanContent::ZeroOrNegativeOneBooleanContent) { 765 // If we're testing X < 0, then this compare isn't needed - just use X! 766 // FIXME: We're limiting to integer types here, but this should also work 767 // if we don't care about FP signed-zero. The use of SETLT with FP means 768 // that we don't care about NaNs. 769 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 770 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 771 return Op0; 772 } 773 break; 774 } 775 case ISD::SIGN_EXTEND_INREG: { 776 // If none of the extended bits are demanded, eliminate the sextinreg. 777 SDValue Op0 = Op.getOperand(0); 778 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 779 unsigned ExBits = ExVT.getScalarSizeInBits(); 780 if (DemandedBits.getActiveBits() <= ExBits) 781 return Op0; 782 // If the input is already sign extended, just drop the extension. 783 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 784 if (NumSignBits >= (BitWidth - ExBits + 1)) 785 return Op0; 786 break; 787 } 788 case ISD::ANY_EXTEND_VECTOR_INREG: 789 case ISD::SIGN_EXTEND_VECTOR_INREG: 790 case ISD::ZERO_EXTEND_VECTOR_INREG: { 791 // If we only want the lowest element and none of extended bits, then we can 792 // return the bitcasted source vector. 793 SDValue Src = Op.getOperand(0); 794 EVT SrcVT = Src.getValueType(); 795 EVT DstVT = Op.getValueType(); 796 if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 797 DAG.getDataLayout().isLittleEndian() && 798 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 799 return DAG.getBitcast(DstVT, Src); 800 } 801 break; 802 } 803 case ISD::INSERT_VECTOR_ELT: { 804 // If we don't demand the inserted element, return the base vector. 805 SDValue Vec = Op.getOperand(0); 806 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 807 EVT VecVT = Vec.getValueType(); 808 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 809 !DemandedElts[CIdx->getZExtValue()]) 810 return Vec; 811 break; 812 } 813 case ISD::INSERT_SUBVECTOR: { 814 // If we don't demand the inserted subvector, return the base vector. 815 SDValue Vec = Op.getOperand(0); 816 SDValue Sub = Op.getOperand(1); 817 uint64_t Idx = Op.getConstantOperandVal(2); 818 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 819 if (DemandedElts.extractBits(NumSubElts, Idx) == 0) 820 return Vec; 821 break; 822 } 823 case ISD::VECTOR_SHUFFLE: { 824 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 825 826 // If all the demanded elts are from one operand and are inline, 827 // then we can use the operand directly. 828 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 829 for (unsigned i = 0; i != NumElts; ++i) { 830 int M = ShuffleMask[i]; 831 if (M < 0 || !DemandedElts[i]) 832 continue; 833 AllUndef = false; 834 IdentityLHS &= (M == (int)i); 835 IdentityRHS &= ((M - NumElts) == i); 836 } 837 838 if (AllUndef) 839 return DAG.getUNDEF(Op.getValueType()); 840 if (IdentityLHS) 841 return Op.getOperand(0); 842 if (IdentityRHS) 843 return Op.getOperand(1); 844 break; 845 } 846 default: 847 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 848 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 849 Op, DemandedBits, DemandedElts, DAG, Depth)) 850 return V; 851 break; 852 } 853 return SDValue(); 854 } 855 856 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 857 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 858 unsigned Depth) const { 859 EVT VT = Op.getValueType(); 860 APInt DemandedElts = VT.isVector() 861 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 862 : APInt(1, 1); 863 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 864 Depth); 865 } 866 867 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 868 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 869 unsigned Depth) const { 870 APInt DemandedBits = APInt::getAllOnesValue(Op.getScalarValueSizeInBits()); 871 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 872 Depth); 873 } 874 875 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 876 /// result of Op are ever used downstream. If we can use this information to 877 /// simplify Op, create a new simplified DAG node and return true, returning the 878 /// original and new nodes in Old and New. Otherwise, analyze the expression and 879 /// return a mask of Known bits for the expression (used to simplify the 880 /// caller). The Known bits may only be accurate for those bits in the 881 /// OriginalDemandedBits and OriginalDemandedElts. 882 bool TargetLowering::SimplifyDemandedBits( 883 SDValue Op, const APInt &OriginalDemandedBits, 884 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 885 unsigned Depth, bool AssumeSingleUse) const { 886 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 887 assert(Op.getScalarValueSizeInBits() == BitWidth && 888 "Mask size mismatches value type size!"); 889 890 // Don't know anything. 891 Known = KnownBits(BitWidth); 892 893 // TODO: We can probably do more work on calculating the known bits and 894 // simplifying the operations for scalable vectors, but for now we just 895 // bail out. 896 if (Op.getValueType().isScalableVector()) 897 return false; 898 899 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 900 assert((!Op.getValueType().isVector() || 901 NumElts == Op.getValueType().getVectorNumElements()) && 902 "Unexpected vector size"); 903 904 APInt DemandedBits = OriginalDemandedBits; 905 APInt DemandedElts = OriginalDemandedElts; 906 SDLoc dl(Op); 907 auto &DL = TLO.DAG.getDataLayout(); 908 909 // Undef operand. 910 if (Op.isUndef()) 911 return false; 912 913 if (Op.getOpcode() == ISD::Constant) { 914 // We know all of the bits for a constant! 915 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 916 return false; 917 } 918 919 if (Op.getOpcode() == ISD::ConstantFP) { 920 // We know all of the bits for a floating point constant! 921 Known = KnownBits::makeConstant( 922 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 923 return false; 924 } 925 926 // Other users may use these bits. 927 EVT VT = Op.getValueType(); 928 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 929 if (Depth != 0) { 930 // If not at the root, Just compute the Known bits to 931 // simplify things downstream. 932 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 933 return false; 934 } 935 // If this is the root being simplified, allow it to have multiple uses, 936 // just set the DemandedBits/Elts to all bits. 937 DemandedBits = APInt::getAllOnesValue(BitWidth); 938 DemandedElts = APInt::getAllOnesValue(NumElts); 939 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 940 // Not demanding any bits/elts from Op. 941 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 942 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 943 // Limit search depth. 944 return false; 945 } 946 947 KnownBits Known2; 948 switch (Op.getOpcode()) { 949 case ISD::TargetConstant: 950 llvm_unreachable("Can't simplify this node"); 951 case ISD::SCALAR_TO_VECTOR: { 952 if (!DemandedElts[0]) 953 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 954 955 KnownBits SrcKnown; 956 SDValue Src = Op.getOperand(0); 957 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 958 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 959 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 960 return true; 961 962 // Upper elements are undef, so only get the knownbits if we just demand 963 // the bottom element. 964 if (DemandedElts == 1) 965 Known = SrcKnown.anyextOrTrunc(BitWidth); 966 break; 967 } 968 case ISD::BUILD_VECTOR: 969 // Collect the known bits that are shared by every demanded element. 970 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 971 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 972 return false; // Don't fall through, will infinitely loop. 973 case ISD::LOAD: { 974 LoadSDNode *LD = cast<LoadSDNode>(Op); 975 if (getTargetConstantFromLoad(LD)) { 976 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 977 return false; // Don't fall through, will infinitely loop. 978 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 979 // If this is a ZEXTLoad and we are looking at the loaded value. 980 EVT MemVT = LD->getMemoryVT(); 981 unsigned MemBits = MemVT.getScalarSizeInBits(); 982 Known.Zero.setBitsFrom(MemBits); 983 return false; // Don't fall through, will infinitely loop. 984 } 985 break; 986 } 987 case ISD::INSERT_VECTOR_ELT: { 988 SDValue Vec = Op.getOperand(0); 989 SDValue Scl = Op.getOperand(1); 990 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 991 EVT VecVT = Vec.getValueType(); 992 993 // If index isn't constant, assume we need all vector elements AND the 994 // inserted element. 995 APInt DemandedVecElts(DemandedElts); 996 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 997 unsigned Idx = CIdx->getZExtValue(); 998 DemandedVecElts.clearBit(Idx); 999 1000 // Inserted element is not required. 1001 if (!DemandedElts[Idx]) 1002 return TLO.CombineTo(Op, Vec); 1003 } 1004 1005 KnownBits KnownScl; 1006 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1007 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1008 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1009 return true; 1010 1011 Known = KnownScl.anyextOrTrunc(BitWidth); 1012 1013 KnownBits KnownVec; 1014 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1015 Depth + 1)) 1016 return true; 1017 1018 if (!!DemandedVecElts) 1019 Known = KnownBits::commonBits(Known, KnownVec); 1020 1021 return false; 1022 } 1023 case ISD::INSERT_SUBVECTOR: { 1024 // Demand any elements from the subvector and the remainder from the src its 1025 // inserted into. 1026 SDValue Src = Op.getOperand(0); 1027 SDValue Sub = Op.getOperand(1); 1028 uint64_t Idx = Op.getConstantOperandVal(2); 1029 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1030 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1031 APInt DemandedSrcElts = DemandedElts; 1032 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 1033 1034 KnownBits KnownSub, KnownSrc; 1035 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1036 Depth + 1)) 1037 return true; 1038 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1039 Depth + 1)) 1040 return true; 1041 1042 Known.Zero.setAllBits(); 1043 Known.One.setAllBits(); 1044 if (!!DemandedSubElts) 1045 Known = KnownBits::commonBits(Known, KnownSub); 1046 if (!!DemandedSrcElts) 1047 Known = KnownBits::commonBits(Known, KnownSrc); 1048 1049 // Attempt to avoid multi-use src if we don't need anything from it. 1050 if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() || 1051 !DemandedSrcElts.isAllOnesValue()) { 1052 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1053 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1054 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1055 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1056 if (NewSub || NewSrc) { 1057 NewSub = NewSub ? NewSub : Sub; 1058 NewSrc = NewSrc ? NewSrc : Src; 1059 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1060 Op.getOperand(2)); 1061 return TLO.CombineTo(Op, NewOp); 1062 } 1063 } 1064 break; 1065 } 1066 case ISD::EXTRACT_SUBVECTOR: { 1067 // Offset the demanded elts by the subvector index. 1068 SDValue Src = Op.getOperand(0); 1069 if (Src.getValueType().isScalableVector()) 1070 break; 1071 uint64_t Idx = Op.getConstantOperandVal(1); 1072 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1073 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1074 1075 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1076 Depth + 1)) 1077 return true; 1078 1079 // Attempt to avoid multi-use src if we don't need anything from it. 1080 if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) { 1081 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1082 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1083 if (DemandedSrc) { 1084 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1085 Op.getOperand(1)); 1086 return TLO.CombineTo(Op, NewOp); 1087 } 1088 } 1089 break; 1090 } 1091 case ISD::CONCAT_VECTORS: { 1092 Known.Zero.setAllBits(); 1093 Known.One.setAllBits(); 1094 EVT SubVT = Op.getOperand(0).getValueType(); 1095 unsigned NumSubVecs = Op.getNumOperands(); 1096 unsigned NumSubElts = SubVT.getVectorNumElements(); 1097 for (unsigned i = 0; i != NumSubVecs; ++i) { 1098 APInt DemandedSubElts = 1099 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1100 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1101 Known2, TLO, Depth + 1)) 1102 return true; 1103 // Known bits are shared by every demanded subvector element. 1104 if (!!DemandedSubElts) 1105 Known = KnownBits::commonBits(Known, Known2); 1106 } 1107 break; 1108 } 1109 case ISD::VECTOR_SHUFFLE: { 1110 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1111 1112 // Collect demanded elements from shuffle operands.. 1113 APInt DemandedLHS(NumElts, 0); 1114 APInt DemandedRHS(NumElts, 0); 1115 for (unsigned i = 0; i != NumElts; ++i) { 1116 if (!DemandedElts[i]) 1117 continue; 1118 int M = ShuffleMask[i]; 1119 if (M < 0) { 1120 // For UNDEF elements, we don't know anything about the common state of 1121 // the shuffle result. 1122 DemandedLHS.clearAllBits(); 1123 DemandedRHS.clearAllBits(); 1124 break; 1125 } 1126 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1127 if (M < (int)NumElts) 1128 DemandedLHS.setBit(M); 1129 else 1130 DemandedRHS.setBit(M - NumElts); 1131 } 1132 1133 if (!!DemandedLHS || !!DemandedRHS) { 1134 SDValue Op0 = Op.getOperand(0); 1135 SDValue Op1 = Op.getOperand(1); 1136 1137 Known.Zero.setAllBits(); 1138 Known.One.setAllBits(); 1139 if (!!DemandedLHS) { 1140 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1141 Depth + 1)) 1142 return true; 1143 Known = KnownBits::commonBits(Known, Known2); 1144 } 1145 if (!!DemandedRHS) { 1146 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1147 Depth + 1)) 1148 return true; 1149 Known = KnownBits::commonBits(Known, Known2); 1150 } 1151 1152 // Attempt to avoid multi-use ops if we don't need anything from them. 1153 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1154 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1155 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1156 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1157 if (DemandedOp0 || DemandedOp1) { 1158 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1159 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1160 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1161 return TLO.CombineTo(Op, NewOp); 1162 } 1163 } 1164 break; 1165 } 1166 case ISD::AND: { 1167 SDValue Op0 = Op.getOperand(0); 1168 SDValue Op1 = Op.getOperand(1); 1169 1170 // If the RHS is a constant, check to see if the LHS would be zero without 1171 // using the bits from the RHS. Below, we use knowledge about the RHS to 1172 // simplify the LHS, here we're using information from the LHS to simplify 1173 // the RHS. 1174 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1175 // Do not increment Depth here; that can cause an infinite loop. 1176 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1177 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1178 if ((LHSKnown.Zero & DemandedBits) == 1179 (~RHSC->getAPIntValue() & DemandedBits)) 1180 return TLO.CombineTo(Op, Op0); 1181 1182 // If any of the set bits in the RHS are known zero on the LHS, shrink 1183 // the constant. 1184 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1185 DemandedElts, TLO)) 1186 return true; 1187 1188 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1189 // constant, but if this 'and' is only clearing bits that were just set by 1190 // the xor, then this 'and' can be eliminated by shrinking the mask of 1191 // the xor. For example, for a 32-bit X: 1192 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1193 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1194 LHSKnown.One == ~RHSC->getAPIntValue()) { 1195 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1196 return TLO.CombineTo(Op, Xor); 1197 } 1198 } 1199 1200 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1201 Depth + 1)) 1202 return true; 1203 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1204 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1205 Known2, TLO, Depth + 1)) 1206 return true; 1207 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1208 1209 // Attempt to avoid multi-use ops if we don't need anything from them. 1210 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1211 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1212 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1213 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1214 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1215 if (DemandedOp0 || DemandedOp1) { 1216 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1217 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1218 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1219 return TLO.CombineTo(Op, NewOp); 1220 } 1221 } 1222 1223 // If all of the demanded bits are known one on one side, return the other. 1224 // These bits cannot contribute to the result of the 'and'. 1225 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1226 return TLO.CombineTo(Op, Op0); 1227 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1228 return TLO.CombineTo(Op, Op1); 1229 // If all of the demanded bits in the inputs are known zeros, return zero. 1230 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1231 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1232 // If the RHS is a constant, see if we can simplify it. 1233 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1234 TLO)) 1235 return true; 1236 // If the operation can be done in a smaller type, do so. 1237 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1238 return true; 1239 1240 Known &= Known2; 1241 break; 1242 } 1243 case ISD::OR: { 1244 SDValue Op0 = Op.getOperand(0); 1245 SDValue Op1 = Op.getOperand(1); 1246 1247 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1248 Depth + 1)) 1249 return true; 1250 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1251 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1252 Known2, TLO, Depth + 1)) 1253 return true; 1254 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1255 1256 // Attempt to avoid multi-use ops if we don't need anything from them. 1257 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1258 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1259 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1260 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1261 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1262 if (DemandedOp0 || DemandedOp1) { 1263 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1264 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1265 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1266 return TLO.CombineTo(Op, NewOp); 1267 } 1268 } 1269 1270 // If all of the demanded bits are known zero on one side, return the other. 1271 // These bits cannot contribute to the result of the 'or'. 1272 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1273 return TLO.CombineTo(Op, Op0); 1274 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1275 return TLO.CombineTo(Op, Op1); 1276 // If the RHS is a constant, see if we can simplify it. 1277 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1278 return true; 1279 // If the operation can be done in a smaller type, do so. 1280 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1281 return true; 1282 1283 Known |= Known2; 1284 break; 1285 } 1286 case ISD::XOR: { 1287 SDValue Op0 = Op.getOperand(0); 1288 SDValue Op1 = Op.getOperand(1); 1289 1290 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1291 Depth + 1)) 1292 return true; 1293 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1294 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1295 Depth + 1)) 1296 return true; 1297 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1298 1299 // Attempt to avoid multi-use ops if we don't need anything from them. 1300 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1301 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1302 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1303 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1304 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1305 if (DemandedOp0 || DemandedOp1) { 1306 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1307 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1308 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1309 return TLO.CombineTo(Op, NewOp); 1310 } 1311 } 1312 1313 // If all of the demanded bits are known zero on one side, return the other. 1314 // These bits cannot contribute to the result of the 'xor'. 1315 if (DemandedBits.isSubsetOf(Known.Zero)) 1316 return TLO.CombineTo(Op, Op0); 1317 if (DemandedBits.isSubsetOf(Known2.Zero)) 1318 return TLO.CombineTo(Op, Op1); 1319 // If the operation can be done in a smaller type, do so. 1320 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1321 return true; 1322 1323 // If all of the unknown bits are known to be zero on one side or the other 1324 // turn this into an *inclusive* or. 1325 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1326 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1327 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1328 1329 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1330 if (C) { 1331 // If one side is a constant, and all of the set bits in the constant are 1332 // also known set on the other side, turn this into an AND, as we know 1333 // the bits will be cleared. 1334 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1335 // NB: it is okay if more bits are known than are requested 1336 if (C->getAPIntValue() == Known2.One) { 1337 SDValue ANDC = 1338 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1339 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1340 } 1341 1342 // If the RHS is a constant, see if we can change it. Don't alter a -1 1343 // constant because that's a 'not' op, and that is better for combining 1344 // and codegen. 1345 if (!C->isAllOnesValue() && 1346 DemandedBits.isSubsetOf(C->getAPIntValue())) { 1347 // We're flipping all demanded bits. Flip the undemanded bits too. 1348 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1349 return TLO.CombineTo(Op, New); 1350 } 1351 } 1352 1353 // If we can't turn this into a 'not', try to shrink the constant. 1354 if (!C || !C->isAllOnesValue()) 1355 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1356 return true; 1357 1358 Known ^= Known2; 1359 break; 1360 } 1361 case ISD::SELECT: 1362 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1363 Depth + 1)) 1364 return true; 1365 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1366 Depth + 1)) 1367 return true; 1368 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1369 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1370 1371 // If the operands are constants, see if we can simplify them. 1372 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1373 return true; 1374 1375 // Only known if known in both the LHS and RHS. 1376 Known = KnownBits::commonBits(Known, Known2); 1377 break; 1378 case ISD::SELECT_CC: 1379 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1380 Depth + 1)) 1381 return true; 1382 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1383 Depth + 1)) 1384 return true; 1385 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1386 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1387 1388 // If the operands are constants, see if we can simplify them. 1389 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1390 return true; 1391 1392 // Only known if known in both the LHS and RHS. 1393 Known = KnownBits::commonBits(Known, Known2); 1394 break; 1395 case ISD::SETCC: { 1396 SDValue Op0 = Op.getOperand(0); 1397 SDValue Op1 = Op.getOperand(1); 1398 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1399 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1400 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1401 // -1, we may be able to bypass the setcc. 1402 if (DemandedBits.isSignMask() && 1403 Op0.getScalarValueSizeInBits() == BitWidth && 1404 getBooleanContents(Op0.getValueType()) == 1405 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1406 // If we're testing X < 0, then this compare isn't needed - just use X! 1407 // FIXME: We're limiting to integer types here, but this should also work 1408 // if we don't care about FP signed-zero. The use of SETLT with FP means 1409 // that we don't care about NaNs. 1410 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1411 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1412 return TLO.CombineTo(Op, Op0); 1413 1414 // TODO: Should we check for other forms of sign-bit comparisons? 1415 // Examples: X <= -1, X >= 0 1416 } 1417 if (getBooleanContents(Op0.getValueType()) == 1418 TargetLowering::ZeroOrOneBooleanContent && 1419 BitWidth > 1) 1420 Known.Zero.setBitsFrom(1); 1421 break; 1422 } 1423 case ISD::SHL: { 1424 SDValue Op0 = Op.getOperand(0); 1425 SDValue Op1 = Op.getOperand(1); 1426 EVT ShiftVT = Op1.getValueType(); 1427 1428 if (const APInt *SA = 1429 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1430 unsigned ShAmt = SA->getZExtValue(); 1431 if (ShAmt == 0) 1432 return TLO.CombineTo(Op, Op0); 1433 1434 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1435 // single shift. We can do this if the bottom bits (which are shifted 1436 // out) are never demanded. 1437 // TODO - support non-uniform vector amounts. 1438 if (Op0.getOpcode() == ISD::SRL) { 1439 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1440 if (const APInt *SA2 = 1441 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1442 unsigned C1 = SA2->getZExtValue(); 1443 unsigned Opc = ISD::SHL; 1444 int Diff = ShAmt - C1; 1445 if (Diff < 0) { 1446 Diff = -Diff; 1447 Opc = ISD::SRL; 1448 } 1449 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1450 return TLO.CombineTo( 1451 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1452 } 1453 } 1454 } 1455 1456 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1457 // are not demanded. This will likely allow the anyext to be folded away. 1458 // TODO - support non-uniform vector amounts. 1459 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1460 SDValue InnerOp = Op0.getOperand(0); 1461 EVT InnerVT = InnerOp.getValueType(); 1462 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1463 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1464 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1465 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1466 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1467 ShTy = InnerVT; 1468 SDValue NarrowShl = 1469 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1470 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1471 return TLO.CombineTo( 1472 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1473 } 1474 1475 // Repeat the SHL optimization above in cases where an extension 1476 // intervenes: (shl (anyext (shr x, c1)), c2) to 1477 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1478 // aren't demanded (as above) and that the shifted upper c1 bits of 1479 // x aren't demanded. 1480 // TODO - support non-uniform vector amounts. 1481 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1482 InnerOp.hasOneUse()) { 1483 if (const APInt *SA2 = 1484 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1485 unsigned InnerShAmt = SA2->getZExtValue(); 1486 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1487 DemandedBits.getActiveBits() <= 1488 (InnerBits - InnerShAmt + ShAmt) && 1489 DemandedBits.countTrailingZeros() >= ShAmt) { 1490 SDValue NewSA = 1491 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1492 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1493 InnerOp.getOperand(0)); 1494 return TLO.CombineTo( 1495 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1496 } 1497 } 1498 } 1499 } 1500 1501 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1502 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1503 Depth + 1)) 1504 return true; 1505 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1506 Known.Zero <<= ShAmt; 1507 Known.One <<= ShAmt; 1508 // low bits known zero. 1509 Known.Zero.setLowBits(ShAmt); 1510 1511 // Try shrinking the operation as long as the shift amount will still be 1512 // in range. 1513 if ((ShAmt < DemandedBits.getActiveBits()) && 1514 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1515 return true; 1516 } 1517 1518 // If we are only demanding sign bits then we can use the shift source 1519 // directly. 1520 if (const APInt *MaxSA = 1521 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1522 unsigned ShAmt = MaxSA->getZExtValue(); 1523 unsigned NumSignBits = 1524 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1525 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1526 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1527 return TLO.CombineTo(Op, Op0); 1528 } 1529 break; 1530 } 1531 case ISD::SRL: { 1532 SDValue Op0 = Op.getOperand(0); 1533 SDValue Op1 = Op.getOperand(1); 1534 EVT ShiftVT = Op1.getValueType(); 1535 1536 if (const APInt *SA = 1537 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1538 unsigned ShAmt = SA->getZExtValue(); 1539 if (ShAmt == 0) 1540 return TLO.CombineTo(Op, Op0); 1541 1542 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1543 // single shift. We can do this if the top bits (which are shifted out) 1544 // are never demanded. 1545 // TODO - support non-uniform vector amounts. 1546 if (Op0.getOpcode() == ISD::SHL) { 1547 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1548 if (const APInt *SA2 = 1549 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1550 unsigned C1 = SA2->getZExtValue(); 1551 unsigned Opc = ISD::SRL; 1552 int Diff = ShAmt - C1; 1553 if (Diff < 0) { 1554 Diff = -Diff; 1555 Opc = ISD::SHL; 1556 } 1557 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1558 return TLO.CombineTo( 1559 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1560 } 1561 } 1562 } 1563 1564 APInt InDemandedMask = (DemandedBits << ShAmt); 1565 1566 // If the shift is exact, then it does demand the low bits (and knows that 1567 // they are zero). 1568 if (Op->getFlags().hasExact()) 1569 InDemandedMask.setLowBits(ShAmt); 1570 1571 // Compute the new bits that are at the top now. 1572 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1573 Depth + 1)) 1574 return true; 1575 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1576 Known.Zero.lshrInPlace(ShAmt); 1577 Known.One.lshrInPlace(ShAmt); 1578 // High bits known zero. 1579 Known.Zero.setHighBits(ShAmt); 1580 } 1581 break; 1582 } 1583 case ISD::SRA: { 1584 SDValue Op0 = Op.getOperand(0); 1585 SDValue Op1 = Op.getOperand(1); 1586 EVT ShiftVT = Op1.getValueType(); 1587 1588 // If we only want bits that already match the signbit then we don't need 1589 // to shift. 1590 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1591 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1592 NumHiDemandedBits) 1593 return TLO.CombineTo(Op, Op0); 1594 1595 // If this is an arithmetic shift right and only the low-bit is set, we can 1596 // always convert this into a logical shr, even if the shift amount is 1597 // variable. The low bit of the shift cannot be an input sign bit unless 1598 // the shift amount is >= the size of the datatype, which is undefined. 1599 if (DemandedBits.isOneValue()) 1600 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1601 1602 if (const APInt *SA = 1603 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1604 unsigned ShAmt = SA->getZExtValue(); 1605 if (ShAmt == 0) 1606 return TLO.CombineTo(Op, Op0); 1607 1608 APInt InDemandedMask = (DemandedBits << ShAmt); 1609 1610 // If the shift is exact, then it does demand the low bits (and knows that 1611 // they are zero). 1612 if (Op->getFlags().hasExact()) 1613 InDemandedMask.setLowBits(ShAmt); 1614 1615 // If any of the demanded bits are produced by the sign extension, we also 1616 // demand the input sign bit. 1617 if (DemandedBits.countLeadingZeros() < ShAmt) 1618 InDemandedMask.setSignBit(); 1619 1620 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1621 Depth + 1)) 1622 return true; 1623 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1624 Known.Zero.lshrInPlace(ShAmt); 1625 Known.One.lshrInPlace(ShAmt); 1626 1627 // If the input sign bit is known to be zero, or if none of the top bits 1628 // are demanded, turn this into an unsigned shift right. 1629 if (Known.Zero[BitWidth - ShAmt - 1] || 1630 DemandedBits.countLeadingZeros() >= ShAmt) { 1631 SDNodeFlags Flags; 1632 Flags.setExact(Op->getFlags().hasExact()); 1633 return TLO.CombineTo( 1634 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1635 } 1636 1637 int Log2 = DemandedBits.exactLogBase2(); 1638 if (Log2 >= 0) { 1639 // The bit must come from the sign. 1640 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1641 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1642 } 1643 1644 if (Known.One[BitWidth - ShAmt - 1]) 1645 // New bits are known one. 1646 Known.One.setHighBits(ShAmt); 1647 1648 // Attempt to avoid multi-use ops if we don't need anything from them. 1649 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1650 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1651 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1652 if (DemandedOp0) { 1653 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1654 return TLO.CombineTo(Op, NewOp); 1655 } 1656 } 1657 } 1658 break; 1659 } 1660 case ISD::FSHL: 1661 case ISD::FSHR: { 1662 SDValue Op0 = Op.getOperand(0); 1663 SDValue Op1 = Op.getOperand(1); 1664 SDValue Op2 = Op.getOperand(2); 1665 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1666 1667 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1668 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1669 1670 // For fshl, 0-shift returns the 1st arg. 1671 // For fshr, 0-shift returns the 2nd arg. 1672 if (Amt == 0) { 1673 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1674 Known, TLO, Depth + 1)) 1675 return true; 1676 break; 1677 } 1678 1679 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1680 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1681 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1682 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1683 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1684 Depth + 1)) 1685 return true; 1686 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1687 Depth + 1)) 1688 return true; 1689 1690 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1691 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1692 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1693 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1694 Known.One |= Known2.One; 1695 Known.Zero |= Known2.Zero; 1696 } 1697 1698 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1699 if (isPowerOf2_32(BitWidth)) { 1700 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1701 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1702 Known2, TLO, Depth + 1)) 1703 return true; 1704 } 1705 break; 1706 } 1707 case ISD::ROTL: 1708 case ISD::ROTR: { 1709 SDValue Op0 = Op.getOperand(0); 1710 SDValue Op1 = Op.getOperand(1); 1711 1712 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1713 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1714 return TLO.CombineTo(Op, Op0); 1715 1716 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1717 if (isPowerOf2_32(BitWidth)) { 1718 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1719 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1720 Depth + 1)) 1721 return true; 1722 } 1723 break; 1724 } 1725 case ISD::UMIN: { 1726 // Check if one arg is always less than (or equal) to the other arg. 1727 SDValue Op0 = Op.getOperand(0); 1728 SDValue Op1 = Op.getOperand(1); 1729 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1730 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1731 Known = KnownBits::umin(Known0, Known1); 1732 if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 1733 return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1); 1734 if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 1735 return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1); 1736 break; 1737 } 1738 case ISD::UMAX: { 1739 // Check if one arg is always greater than (or equal) to the other arg. 1740 SDValue Op0 = Op.getOperand(0); 1741 SDValue Op1 = Op.getOperand(1); 1742 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1743 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1744 Known = KnownBits::umax(Known0, Known1); 1745 if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 1746 return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1); 1747 if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 1748 return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1); 1749 break; 1750 } 1751 case ISD::BITREVERSE: { 1752 SDValue Src = Op.getOperand(0); 1753 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1754 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1755 Depth + 1)) 1756 return true; 1757 Known.One = Known2.One.reverseBits(); 1758 Known.Zero = Known2.Zero.reverseBits(); 1759 break; 1760 } 1761 case ISD::BSWAP: { 1762 SDValue Src = Op.getOperand(0); 1763 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1764 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1765 Depth + 1)) 1766 return true; 1767 Known.One = Known2.One.byteSwap(); 1768 Known.Zero = Known2.Zero.byteSwap(); 1769 break; 1770 } 1771 case ISD::CTPOP: { 1772 // If only 1 bit is demanded, replace with PARITY as long as we're before 1773 // op legalization. 1774 // FIXME: Limit to scalars for now. 1775 if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector()) 1776 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 1777 Op.getOperand(0))); 1778 1779 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1780 break; 1781 } 1782 case ISD::SIGN_EXTEND_INREG: { 1783 SDValue Op0 = Op.getOperand(0); 1784 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1785 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1786 1787 // If we only care about the highest bit, don't bother shifting right. 1788 if (DemandedBits.isSignMask()) { 1789 unsigned NumSignBits = 1790 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1791 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1; 1792 // However if the input is already sign extended we expect the sign 1793 // extension to be dropped altogether later and do not simplify. 1794 if (!AlreadySignExtended) { 1795 // Compute the correct shift amount type, which must be getShiftAmountTy 1796 // for scalar types after legalization. 1797 EVT ShiftAmtTy = VT; 1798 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1799 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1800 1801 SDValue ShiftAmt = 1802 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1803 return TLO.CombineTo(Op, 1804 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1805 } 1806 } 1807 1808 // If none of the extended bits are demanded, eliminate the sextinreg. 1809 if (DemandedBits.getActiveBits() <= ExVTBits) 1810 return TLO.CombineTo(Op, Op0); 1811 1812 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1813 1814 // Since the sign extended bits are demanded, we know that the sign 1815 // bit is demanded. 1816 InputDemandedBits.setBit(ExVTBits - 1); 1817 1818 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1819 return true; 1820 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1821 1822 // If the sign bit of the input is known set or clear, then we know the 1823 // top bits of the result. 1824 1825 // If the input sign bit is known zero, convert this into a zero extension. 1826 if (Known.Zero[ExVTBits - 1]) 1827 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 1828 1829 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1830 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1831 Known.One.setBitsFrom(ExVTBits); 1832 Known.Zero &= Mask; 1833 } else { // Input sign bit unknown 1834 Known.Zero &= Mask; 1835 Known.One &= Mask; 1836 } 1837 break; 1838 } 1839 case ISD::BUILD_PAIR: { 1840 EVT HalfVT = Op.getOperand(0).getValueType(); 1841 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1842 1843 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1844 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1845 1846 KnownBits KnownLo, KnownHi; 1847 1848 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1849 return true; 1850 1851 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1852 return true; 1853 1854 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1855 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1856 1857 Known.One = KnownLo.One.zext(BitWidth) | 1858 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1859 break; 1860 } 1861 case ISD::ZERO_EXTEND: 1862 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1863 SDValue Src = Op.getOperand(0); 1864 EVT SrcVT = Src.getValueType(); 1865 unsigned InBits = SrcVT.getScalarSizeInBits(); 1866 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1867 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1868 1869 // If none of the top bits are demanded, convert this into an any_extend. 1870 if (DemandedBits.getActiveBits() <= InBits) { 1871 // If we only need the non-extended bits of the bottom element 1872 // then we can just bitcast to the result. 1873 if (IsVecInReg && DemandedElts == 1 && 1874 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1875 TLO.DAG.getDataLayout().isLittleEndian()) 1876 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1877 1878 unsigned Opc = 1879 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1880 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1881 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1882 } 1883 1884 APInt InDemandedBits = DemandedBits.trunc(InBits); 1885 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1886 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1887 Depth + 1)) 1888 return true; 1889 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1890 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1891 Known = Known.zext(BitWidth); 1892 1893 // Attempt to avoid multi-use ops if we don't need anything from them. 1894 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1895 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1896 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1897 break; 1898 } 1899 case ISD::SIGN_EXTEND: 1900 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1901 SDValue Src = Op.getOperand(0); 1902 EVT SrcVT = Src.getValueType(); 1903 unsigned InBits = SrcVT.getScalarSizeInBits(); 1904 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1905 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1906 1907 // If none of the top bits are demanded, convert this into an any_extend. 1908 if (DemandedBits.getActiveBits() <= InBits) { 1909 // If we only need the non-extended bits of the bottom element 1910 // then we can just bitcast to the result. 1911 if (IsVecInReg && DemandedElts == 1 && 1912 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1913 TLO.DAG.getDataLayout().isLittleEndian()) 1914 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1915 1916 unsigned Opc = 1917 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1918 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1919 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1920 } 1921 1922 APInt InDemandedBits = DemandedBits.trunc(InBits); 1923 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1924 1925 // Since some of the sign extended bits are demanded, we know that the sign 1926 // bit is demanded. 1927 InDemandedBits.setBit(InBits - 1); 1928 1929 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1930 Depth + 1)) 1931 return true; 1932 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1933 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1934 1935 // If the sign bit is known one, the top bits match. 1936 Known = Known.sext(BitWidth); 1937 1938 // If the sign bit is known zero, convert this to a zero extend. 1939 if (Known.isNonNegative()) { 1940 unsigned Opc = 1941 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 1942 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1943 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1944 } 1945 1946 // Attempt to avoid multi-use ops if we don't need anything from them. 1947 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1948 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1949 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1950 break; 1951 } 1952 case ISD::ANY_EXTEND: 1953 case ISD::ANY_EXTEND_VECTOR_INREG: { 1954 SDValue Src = Op.getOperand(0); 1955 EVT SrcVT = Src.getValueType(); 1956 unsigned InBits = SrcVT.getScalarSizeInBits(); 1957 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1958 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 1959 1960 // If we only need the bottom element then we can just bitcast. 1961 // TODO: Handle ANY_EXTEND? 1962 if (IsVecInReg && DemandedElts == 1 && 1963 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1964 TLO.DAG.getDataLayout().isLittleEndian()) 1965 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1966 1967 APInt InDemandedBits = DemandedBits.trunc(InBits); 1968 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1969 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1970 Depth + 1)) 1971 return true; 1972 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1973 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1974 Known = Known.anyext(BitWidth); 1975 1976 // Attempt to avoid multi-use ops if we don't need anything from them. 1977 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1978 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1979 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1980 break; 1981 } 1982 case ISD::TRUNCATE: { 1983 SDValue Src = Op.getOperand(0); 1984 1985 // Simplify the input, using demanded bit information, and compute the known 1986 // zero/one bits live out. 1987 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 1988 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 1989 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 1990 Depth + 1)) 1991 return true; 1992 Known = Known.trunc(BitWidth); 1993 1994 // Attempt to avoid multi-use ops if we don't need anything from them. 1995 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1996 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 1997 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 1998 1999 // If the input is only used by this truncate, see if we can shrink it based 2000 // on the known demanded bits. 2001 if (Src.getNode()->hasOneUse()) { 2002 switch (Src.getOpcode()) { 2003 default: 2004 break; 2005 case ISD::SRL: 2006 // Shrink SRL by a constant if none of the high bits shifted in are 2007 // demanded. 2008 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2009 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2010 // undesirable. 2011 break; 2012 2013 const APInt *ShAmtC = 2014 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2015 if (!ShAmtC) 2016 break; 2017 uint64_t ShVal = ShAmtC->getZExtValue(); 2018 2019 APInt HighBits = 2020 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2021 HighBits.lshrInPlace(ShVal); 2022 HighBits = HighBits.trunc(BitWidth); 2023 2024 if (!(HighBits & DemandedBits)) { 2025 // None of the shifted in bits are needed. Add a truncate of the 2026 // shift input, then shift it. 2027 SDValue NewShAmt = TLO.DAG.getConstant( 2028 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2029 SDValue NewTrunc = 2030 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2031 return TLO.CombineTo( 2032 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2033 } 2034 break; 2035 } 2036 } 2037 2038 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2039 break; 2040 } 2041 case ISD::AssertZext: { 2042 // AssertZext demands all of the high bits, plus any of the low bits 2043 // demanded by its users. 2044 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2045 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2046 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2047 TLO, Depth + 1)) 2048 return true; 2049 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2050 2051 Known.Zero |= ~InMask; 2052 break; 2053 } 2054 case ISD::EXTRACT_VECTOR_ELT: { 2055 SDValue Src = Op.getOperand(0); 2056 SDValue Idx = Op.getOperand(1); 2057 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2058 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2059 2060 if (SrcEltCnt.isScalable()) 2061 return false; 2062 2063 // Demand the bits from every vector element without a constant index. 2064 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2065 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 2066 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2067 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2068 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2069 2070 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2071 // anything about the extended bits. 2072 APInt DemandedSrcBits = DemandedBits; 2073 if (BitWidth > EltBitWidth) 2074 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2075 2076 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2077 Depth + 1)) 2078 return true; 2079 2080 // Attempt to avoid multi-use ops if we don't need anything from them. 2081 if (!DemandedSrcBits.isAllOnesValue() || 2082 !DemandedSrcElts.isAllOnesValue()) { 2083 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2084 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2085 SDValue NewOp = 2086 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2087 return TLO.CombineTo(Op, NewOp); 2088 } 2089 } 2090 2091 Known = Known2; 2092 if (BitWidth > EltBitWidth) 2093 Known = Known.anyext(BitWidth); 2094 break; 2095 } 2096 case ISD::BITCAST: { 2097 SDValue Src = Op.getOperand(0); 2098 EVT SrcVT = Src.getValueType(); 2099 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2100 2101 // If this is an FP->Int bitcast and if the sign bit is the only 2102 // thing demanded, turn this into a FGETSIGN. 2103 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2104 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2105 SrcVT.isFloatingPoint()) { 2106 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2107 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2108 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2109 SrcVT != MVT::f128) { 2110 // Cannot eliminate/lower SHL for f128 yet. 2111 EVT Ty = OpVTLegal ? VT : MVT::i32; 2112 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2113 // place. We expect the SHL to be eliminated by other optimizations. 2114 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2115 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2116 if (!OpVTLegal && OpVTSizeInBits > 32) 2117 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2118 unsigned ShVal = Op.getValueSizeInBits() - 1; 2119 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2120 return TLO.CombineTo(Op, 2121 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2122 } 2123 } 2124 2125 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2126 // Demand the elt/bit if any of the original elts/bits are demanded. 2127 // TODO - bigendian once we have test coverage. 2128 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 && 2129 TLO.DAG.getDataLayout().isLittleEndian()) { 2130 unsigned Scale = BitWidth / NumSrcEltBits; 2131 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2132 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2133 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2134 for (unsigned i = 0; i != Scale; ++i) { 2135 unsigned Offset = i * NumSrcEltBits; 2136 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 2137 if (!Sub.isNullValue()) { 2138 DemandedSrcBits |= Sub; 2139 for (unsigned j = 0; j != NumElts; ++j) 2140 if (DemandedElts[j]) 2141 DemandedSrcElts.setBit((j * Scale) + i); 2142 } 2143 } 2144 2145 APInt KnownSrcUndef, KnownSrcZero; 2146 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2147 KnownSrcZero, TLO, Depth + 1)) 2148 return true; 2149 2150 KnownBits KnownSrcBits; 2151 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2152 KnownSrcBits, TLO, Depth + 1)) 2153 return true; 2154 } else if ((NumSrcEltBits % BitWidth) == 0 && 2155 TLO.DAG.getDataLayout().isLittleEndian()) { 2156 unsigned Scale = NumSrcEltBits / BitWidth; 2157 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2158 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2159 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2160 for (unsigned i = 0; i != NumElts; ++i) 2161 if (DemandedElts[i]) { 2162 unsigned Offset = (i % Scale) * BitWidth; 2163 DemandedSrcBits.insertBits(DemandedBits, Offset); 2164 DemandedSrcElts.setBit(i / Scale); 2165 } 2166 2167 if (SrcVT.isVector()) { 2168 APInt KnownSrcUndef, KnownSrcZero; 2169 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2170 KnownSrcZero, TLO, Depth + 1)) 2171 return true; 2172 } 2173 2174 KnownBits KnownSrcBits; 2175 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2176 KnownSrcBits, TLO, Depth + 1)) 2177 return true; 2178 } 2179 2180 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2181 // recursive call where Known may be useful to the caller. 2182 if (Depth > 0) { 2183 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2184 return false; 2185 } 2186 break; 2187 } 2188 case ISD::ADD: 2189 case ISD::MUL: 2190 case ISD::SUB: { 2191 // Add, Sub, and Mul don't demand any bits in positions beyond that 2192 // of the highest bit demanded of them. 2193 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2194 SDNodeFlags Flags = Op.getNode()->getFlags(); 2195 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2196 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2197 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2198 Depth + 1) || 2199 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2200 Depth + 1) || 2201 // See if the operation should be performed at a smaller bit width. 2202 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2203 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2204 // Disable the nsw and nuw flags. We can no longer guarantee that we 2205 // won't wrap after simplification. 2206 Flags.setNoSignedWrap(false); 2207 Flags.setNoUnsignedWrap(false); 2208 SDValue NewOp = 2209 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2210 return TLO.CombineTo(Op, NewOp); 2211 } 2212 return true; 2213 } 2214 2215 // Attempt to avoid multi-use ops if we don't need anything from them. 2216 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 2217 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2218 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2219 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2220 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2221 if (DemandedOp0 || DemandedOp1) { 2222 Flags.setNoSignedWrap(false); 2223 Flags.setNoUnsignedWrap(false); 2224 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2225 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2226 SDValue NewOp = 2227 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2228 return TLO.CombineTo(Op, NewOp); 2229 } 2230 } 2231 2232 // If we have a constant operand, we may be able to turn it into -1 if we 2233 // do not demand the high bits. This can make the constant smaller to 2234 // encode, allow more general folding, or match specialized instruction 2235 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2236 // is probably not useful (and could be detrimental). 2237 ConstantSDNode *C = isConstOrConstSplat(Op1); 2238 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2239 if (C && !C->isAllOnesValue() && !C->isOne() && 2240 (C->getAPIntValue() | HighMask).isAllOnesValue()) { 2241 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2242 // Disable the nsw and nuw flags. We can no longer guarantee that we 2243 // won't wrap after simplification. 2244 Flags.setNoSignedWrap(false); 2245 Flags.setNoUnsignedWrap(false); 2246 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2247 return TLO.CombineTo(Op, NewOp); 2248 } 2249 2250 LLVM_FALLTHROUGH; 2251 } 2252 default: 2253 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2254 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2255 Known, TLO, Depth)) 2256 return true; 2257 break; 2258 } 2259 2260 // Just use computeKnownBits to compute output bits. 2261 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2262 break; 2263 } 2264 2265 // If we know the value of all of the demanded bits, return this as a 2266 // constant. 2267 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2268 // Avoid folding to a constant if any OpaqueConstant is involved. 2269 const SDNode *N = Op.getNode(); 2270 for (SDNodeIterator I = SDNodeIterator::begin(N), 2271 E = SDNodeIterator::end(N); 2272 I != E; ++I) { 2273 SDNode *Op = *I; 2274 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2275 if (C->isOpaque()) 2276 return false; 2277 } 2278 if (VT.isInteger()) 2279 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2280 if (VT.isFloatingPoint()) 2281 return TLO.CombineTo( 2282 Op, 2283 TLO.DAG.getConstantFP( 2284 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2285 } 2286 2287 return false; 2288 } 2289 2290 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2291 const APInt &DemandedElts, 2292 APInt &KnownUndef, 2293 APInt &KnownZero, 2294 DAGCombinerInfo &DCI) const { 2295 SelectionDAG &DAG = DCI.DAG; 2296 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2297 !DCI.isBeforeLegalizeOps()); 2298 2299 bool Simplified = 2300 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2301 if (Simplified) { 2302 DCI.AddToWorklist(Op.getNode()); 2303 DCI.CommitTargetLoweringOpt(TLO); 2304 } 2305 2306 return Simplified; 2307 } 2308 2309 /// Given a vector binary operation and known undefined elements for each input 2310 /// operand, compute whether each element of the output is undefined. 2311 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2312 const APInt &UndefOp0, 2313 const APInt &UndefOp1) { 2314 EVT VT = BO.getValueType(); 2315 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2316 "Vector binop only"); 2317 2318 EVT EltVT = VT.getVectorElementType(); 2319 unsigned NumElts = VT.getVectorNumElements(); 2320 assert(UndefOp0.getBitWidth() == NumElts && 2321 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2322 2323 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2324 const APInt &UndefVals) { 2325 if (UndefVals[Index]) 2326 return DAG.getUNDEF(EltVT); 2327 2328 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2329 // Try hard to make sure that the getNode() call is not creating temporary 2330 // nodes. Ignore opaque integers because they do not constant fold. 2331 SDValue Elt = BV->getOperand(Index); 2332 auto *C = dyn_cast<ConstantSDNode>(Elt); 2333 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2334 return Elt; 2335 } 2336 2337 return SDValue(); 2338 }; 2339 2340 APInt KnownUndef = APInt::getNullValue(NumElts); 2341 for (unsigned i = 0; i != NumElts; ++i) { 2342 // If both inputs for this element are either constant or undef and match 2343 // the element type, compute the constant/undef result for this element of 2344 // the vector. 2345 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2346 // not handle FP constants. The code within getNode() should be refactored 2347 // to avoid the danger of creating a bogus temporary node here. 2348 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2349 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2350 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2351 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2352 KnownUndef.setBit(i); 2353 } 2354 return KnownUndef; 2355 } 2356 2357 bool TargetLowering::SimplifyDemandedVectorElts( 2358 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2359 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2360 bool AssumeSingleUse) const { 2361 EVT VT = Op.getValueType(); 2362 unsigned Opcode = Op.getOpcode(); 2363 APInt DemandedElts = OriginalDemandedElts; 2364 unsigned NumElts = DemandedElts.getBitWidth(); 2365 assert(VT.isVector() && "Expected vector op"); 2366 2367 KnownUndef = KnownZero = APInt::getNullValue(NumElts); 2368 2369 // TODO: For now we assume we know nothing about scalable vectors. 2370 if (VT.isScalableVector()) 2371 return false; 2372 2373 assert(VT.getVectorNumElements() == NumElts && 2374 "Mask size mismatches value type element count!"); 2375 2376 // Undef operand. 2377 if (Op.isUndef()) { 2378 KnownUndef.setAllBits(); 2379 return false; 2380 } 2381 2382 // If Op has other users, assume that all elements are needed. 2383 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2384 DemandedElts.setAllBits(); 2385 2386 // Not demanding any elements from Op. 2387 if (DemandedElts == 0) { 2388 KnownUndef.setAllBits(); 2389 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2390 } 2391 2392 // Limit search depth. 2393 if (Depth >= SelectionDAG::MaxRecursionDepth) 2394 return false; 2395 2396 SDLoc DL(Op); 2397 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2398 2399 // Helper for demanding the specified elements and all the bits of both binary 2400 // operands. 2401 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2402 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2403 TLO.DAG, Depth + 1); 2404 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2405 TLO.DAG, Depth + 1); 2406 if (NewOp0 || NewOp1) { 2407 SDValue NewOp = TLO.DAG.getNode( 2408 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2409 return TLO.CombineTo(Op, NewOp); 2410 } 2411 return false; 2412 }; 2413 2414 switch (Opcode) { 2415 case ISD::SCALAR_TO_VECTOR: { 2416 if (!DemandedElts[0]) { 2417 KnownUndef.setAllBits(); 2418 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2419 } 2420 KnownUndef.setHighBits(NumElts - 1); 2421 break; 2422 } 2423 case ISD::BITCAST: { 2424 SDValue Src = Op.getOperand(0); 2425 EVT SrcVT = Src.getValueType(); 2426 2427 // We only handle vectors here. 2428 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2429 if (!SrcVT.isVector()) 2430 break; 2431 2432 // Fast handling of 'identity' bitcasts. 2433 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2434 if (NumSrcElts == NumElts) 2435 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2436 KnownZero, TLO, Depth + 1); 2437 2438 APInt SrcZero, SrcUndef; 2439 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts); 2440 2441 // Bitcast from 'large element' src vector to 'small element' vector, we 2442 // must demand a source element if any DemandedElt maps to it. 2443 if ((NumElts % NumSrcElts) == 0) { 2444 unsigned Scale = NumElts / NumSrcElts; 2445 for (unsigned i = 0; i != NumElts; ++i) 2446 if (DemandedElts[i]) 2447 SrcDemandedElts.setBit(i / Scale); 2448 2449 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2450 TLO, Depth + 1)) 2451 return true; 2452 2453 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2454 // of the large element. 2455 // TODO - bigendian once we have test coverage. 2456 if (TLO.DAG.getDataLayout().isLittleEndian()) { 2457 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2458 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits); 2459 for (unsigned i = 0; i != NumElts; ++i) 2460 if (DemandedElts[i]) { 2461 unsigned Ofs = (i % Scale) * EltSizeInBits; 2462 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2463 } 2464 2465 KnownBits Known; 2466 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2467 TLO, Depth + 1)) 2468 return true; 2469 } 2470 2471 // If the src element is zero/undef then all the output elements will be - 2472 // only demanded elements are guaranteed to be correct. 2473 for (unsigned i = 0; i != NumSrcElts; ++i) { 2474 if (SrcDemandedElts[i]) { 2475 if (SrcZero[i]) 2476 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2477 if (SrcUndef[i]) 2478 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2479 } 2480 } 2481 } 2482 2483 // Bitcast from 'small element' src vector to 'large element' vector, we 2484 // demand all smaller source elements covered by the larger demanded element 2485 // of this vector. 2486 if ((NumSrcElts % NumElts) == 0) { 2487 unsigned Scale = NumSrcElts / NumElts; 2488 for (unsigned i = 0; i != NumElts; ++i) 2489 if (DemandedElts[i]) 2490 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale); 2491 2492 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2493 TLO, Depth + 1)) 2494 return true; 2495 2496 // If all the src elements covering an output element are zero/undef, then 2497 // the output element will be as well, assuming it was demanded. 2498 for (unsigned i = 0; i != NumElts; ++i) { 2499 if (DemandedElts[i]) { 2500 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue()) 2501 KnownZero.setBit(i); 2502 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue()) 2503 KnownUndef.setBit(i); 2504 } 2505 } 2506 } 2507 break; 2508 } 2509 case ISD::BUILD_VECTOR: { 2510 // Check all elements and simplify any unused elements with UNDEF. 2511 if (!DemandedElts.isAllOnesValue()) { 2512 // Don't simplify BROADCASTS. 2513 if (llvm::any_of(Op->op_values(), 2514 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2515 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2516 bool Updated = false; 2517 for (unsigned i = 0; i != NumElts; ++i) { 2518 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2519 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2520 KnownUndef.setBit(i); 2521 Updated = true; 2522 } 2523 } 2524 if (Updated) 2525 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2526 } 2527 } 2528 for (unsigned i = 0; i != NumElts; ++i) { 2529 SDValue SrcOp = Op.getOperand(i); 2530 if (SrcOp.isUndef()) { 2531 KnownUndef.setBit(i); 2532 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2533 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2534 KnownZero.setBit(i); 2535 } 2536 } 2537 break; 2538 } 2539 case ISD::CONCAT_VECTORS: { 2540 EVT SubVT = Op.getOperand(0).getValueType(); 2541 unsigned NumSubVecs = Op.getNumOperands(); 2542 unsigned NumSubElts = SubVT.getVectorNumElements(); 2543 for (unsigned i = 0; i != NumSubVecs; ++i) { 2544 SDValue SubOp = Op.getOperand(i); 2545 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2546 APInt SubUndef, SubZero; 2547 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2548 Depth + 1)) 2549 return true; 2550 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2551 KnownZero.insertBits(SubZero, i * NumSubElts); 2552 } 2553 break; 2554 } 2555 case ISD::INSERT_SUBVECTOR: { 2556 // Demand any elements from the subvector and the remainder from the src its 2557 // inserted into. 2558 SDValue Src = Op.getOperand(0); 2559 SDValue Sub = Op.getOperand(1); 2560 uint64_t Idx = Op.getConstantOperandVal(2); 2561 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2562 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2563 APInt DemandedSrcElts = DemandedElts; 2564 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2565 2566 APInt SubUndef, SubZero; 2567 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2568 Depth + 1)) 2569 return true; 2570 2571 // If none of the src operand elements are demanded, replace it with undef. 2572 if (!DemandedSrcElts && !Src.isUndef()) 2573 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2574 TLO.DAG.getUNDEF(VT), Sub, 2575 Op.getOperand(2))); 2576 2577 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2578 TLO, Depth + 1)) 2579 return true; 2580 KnownUndef.insertBits(SubUndef, Idx); 2581 KnownZero.insertBits(SubZero, Idx); 2582 2583 // Attempt to avoid multi-use ops if we don't need anything from them. 2584 if (!DemandedSrcElts.isAllOnesValue() || 2585 !DemandedSubElts.isAllOnesValue()) { 2586 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2587 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2588 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2589 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2590 if (NewSrc || NewSub) { 2591 NewSrc = NewSrc ? NewSrc : Src; 2592 NewSub = NewSub ? NewSub : Sub; 2593 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2594 NewSub, Op.getOperand(2)); 2595 return TLO.CombineTo(Op, NewOp); 2596 } 2597 } 2598 break; 2599 } 2600 case ISD::EXTRACT_SUBVECTOR: { 2601 // Offset the demanded elts by the subvector index. 2602 SDValue Src = Op.getOperand(0); 2603 if (Src.getValueType().isScalableVector()) 2604 break; 2605 uint64_t Idx = Op.getConstantOperandVal(1); 2606 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2607 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2608 2609 APInt SrcUndef, SrcZero; 2610 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2611 Depth + 1)) 2612 return true; 2613 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2614 KnownZero = SrcZero.extractBits(NumElts, Idx); 2615 2616 // Attempt to avoid multi-use ops if we don't need anything from them. 2617 if (!DemandedElts.isAllOnesValue()) { 2618 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2619 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2620 if (NewSrc) { 2621 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2622 Op.getOperand(1)); 2623 return TLO.CombineTo(Op, NewOp); 2624 } 2625 } 2626 break; 2627 } 2628 case ISD::INSERT_VECTOR_ELT: { 2629 SDValue Vec = Op.getOperand(0); 2630 SDValue Scl = Op.getOperand(1); 2631 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2632 2633 // For a legal, constant insertion index, if we don't need this insertion 2634 // then strip it, else remove it from the demanded elts. 2635 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2636 unsigned Idx = CIdx->getZExtValue(); 2637 if (!DemandedElts[Idx]) 2638 return TLO.CombineTo(Op, Vec); 2639 2640 APInt DemandedVecElts(DemandedElts); 2641 DemandedVecElts.clearBit(Idx); 2642 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2643 KnownZero, TLO, Depth + 1)) 2644 return true; 2645 2646 KnownUndef.setBitVal(Idx, Scl.isUndef()); 2647 2648 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 2649 break; 2650 } 2651 2652 APInt VecUndef, VecZero; 2653 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2654 Depth + 1)) 2655 return true; 2656 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2657 break; 2658 } 2659 case ISD::VSELECT: { 2660 // Try to transform the select condition based on the current demanded 2661 // elements. 2662 // TODO: If a condition element is undef, we can choose from one arm of the 2663 // select (and if one arm is undef, then we can propagate that to the 2664 // result). 2665 // TODO - add support for constant vselect masks (see IR version of this). 2666 APInt UnusedUndef, UnusedZero; 2667 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2668 UnusedZero, TLO, Depth + 1)) 2669 return true; 2670 2671 // See if we can simplify either vselect operand. 2672 APInt DemandedLHS(DemandedElts); 2673 APInt DemandedRHS(DemandedElts); 2674 APInt UndefLHS, ZeroLHS; 2675 APInt UndefRHS, ZeroRHS; 2676 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2677 ZeroLHS, TLO, Depth + 1)) 2678 return true; 2679 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2680 ZeroRHS, TLO, Depth + 1)) 2681 return true; 2682 2683 KnownUndef = UndefLHS & UndefRHS; 2684 KnownZero = ZeroLHS & ZeroRHS; 2685 break; 2686 } 2687 case ISD::VECTOR_SHUFFLE: { 2688 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2689 2690 // Collect demanded elements from shuffle operands.. 2691 APInt DemandedLHS(NumElts, 0); 2692 APInt DemandedRHS(NumElts, 0); 2693 for (unsigned i = 0; i != NumElts; ++i) { 2694 int M = ShuffleMask[i]; 2695 if (M < 0 || !DemandedElts[i]) 2696 continue; 2697 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2698 if (M < (int)NumElts) 2699 DemandedLHS.setBit(M); 2700 else 2701 DemandedRHS.setBit(M - NumElts); 2702 } 2703 2704 // See if we can simplify either shuffle operand. 2705 APInt UndefLHS, ZeroLHS; 2706 APInt UndefRHS, ZeroRHS; 2707 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2708 ZeroLHS, TLO, Depth + 1)) 2709 return true; 2710 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2711 ZeroRHS, TLO, Depth + 1)) 2712 return true; 2713 2714 // Simplify mask using undef elements from LHS/RHS. 2715 bool Updated = false; 2716 bool IdentityLHS = true, IdentityRHS = true; 2717 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2718 for (unsigned i = 0; i != NumElts; ++i) { 2719 int &M = NewMask[i]; 2720 if (M < 0) 2721 continue; 2722 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2723 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2724 Updated = true; 2725 M = -1; 2726 } 2727 IdentityLHS &= (M < 0) || (M == (int)i); 2728 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2729 } 2730 2731 // Update legal shuffle masks based on demanded elements if it won't reduce 2732 // to Identity which can cause premature removal of the shuffle mask. 2733 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2734 SDValue LegalShuffle = 2735 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2736 NewMask, TLO.DAG); 2737 if (LegalShuffle) 2738 return TLO.CombineTo(Op, LegalShuffle); 2739 } 2740 2741 // Propagate undef/zero elements from LHS/RHS. 2742 for (unsigned i = 0; i != NumElts; ++i) { 2743 int M = ShuffleMask[i]; 2744 if (M < 0) { 2745 KnownUndef.setBit(i); 2746 } else if (M < (int)NumElts) { 2747 if (UndefLHS[M]) 2748 KnownUndef.setBit(i); 2749 if (ZeroLHS[M]) 2750 KnownZero.setBit(i); 2751 } else { 2752 if (UndefRHS[M - NumElts]) 2753 KnownUndef.setBit(i); 2754 if (ZeroRHS[M - NumElts]) 2755 KnownZero.setBit(i); 2756 } 2757 } 2758 break; 2759 } 2760 case ISD::ANY_EXTEND_VECTOR_INREG: 2761 case ISD::SIGN_EXTEND_VECTOR_INREG: 2762 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2763 APInt SrcUndef, SrcZero; 2764 SDValue Src = Op.getOperand(0); 2765 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2766 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2767 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2768 Depth + 1)) 2769 return true; 2770 KnownZero = SrcZero.zextOrTrunc(NumElts); 2771 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2772 2773 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2774 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2775 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) { 2776 // aext - if we just need the bottom element then we can bitcast. 2777 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2778 } 2779 2780 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2781 // zext(undef) upper bits are guaranteed to be zero. 2782 if (DemandedElts.isSubsetOf(KnownUndef)) 2783 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2784 KnownUndef.clearAllBits(); 2785 } 2786 break; 2787 } 2788 2789 // TODO: There are more binop opcodes that could be handled here - MIN, 2790 // MAX, saturated math, etc. 2791 case ISD::OR: 2792 case ISD::XOR: 2793 case ISD::ADD: 2794 case ISD::SUB: 2795 case ISD::FADD: 2796 case ISD::FSUB: 2797 case ISD::FMUL: 2798 case ISD::FDIV: 2799 case ISD::FREM: { 2800 SDValue Op0 = Op.getOperand(0); 2801 SDValue Op1 = Op.getOperand(1); 2802 2803 APInt UndefRHS, ZeroRHS; 2804 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2805 Depth + 1)) 2806 return true; 2807 APInt UndefLHS, ZeroLHS; 2808 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2809 Depth + 1)) 2810 return true; 2811 2812 KnownZero = ZeroLHS & ZeroRHS; 2813 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2814 2815 // Attempt to avoid multi-use ops if we don't need anything from them. 2816 // TODO - use KnownUndef to relax the demandedelts? 2817 if (!DemandedElts.isAllOnesValue()) 2818 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2819 return true; 2820 break; 2821 } 2822 case ISD::SHL: 2823 case ISD::SRL: 2824 case ISD::SRA: 2825 case ISD::ROTL: 2826 case ISD::ROTR: { 2827 SDValue Op0 = Op.getOperand(0); 2828 SDValue Op1 = Op.getOperand(1); 2829 2830 APInt UndefRHS, ZeroRHS; 2831 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2832 Depth + 1)) 2833 return true; 2834 APInt UndefLHS, ZeroLHS; 2835 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2836 Depth + 1)) 2837 return true; 2838 2839 KnownZero = ZeroLHS; 2840 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2841 2842 // Attempt to avoid multi-use ops if we don't need anything from them. 2843 // TODO - use KnownUndef to relax the demandedelts? 2844 if (!DemandedElts.isAllOnesValue()) 2845 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2846 return true; 2847 break; 2848 } 2849 case ISD::MUL: 2850 case ISD::AND: { 2851 SDValue Op0 = Op.getOperand(0); 2852 SDValue Op1 = Op.getOperand(1); 2853 2854 APInt SrcUndef, SrcZero; 2855 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 2856 Depth + 1)) 2857 return true; 2858 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 2859 TLO, Depth + 1)) 2860 return true; 2861 2862 // If either side has a zero element, then the result element is zero, even 2863 // if the other is an UNDEF. 2864 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2865 // and then handle 'and' nodes with the rest of the binop opcodes. 2866 KnownZero |= SrcZero; 2867 KnownUndef &= SrcUndef; 2868 KnownUndef &= ~KnownZero; 2869 2870 // Attempt to avoid multi-use ops if we don't need anything from them. 2871 // TODO - use KnownUndef to relax the demandedelts? 2872 if (!DemandedElts.isAllOnesValue()) 2873 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2874 return true; 2875 break; 2876 } 2877 case ISD::TRUNCATE: 2878 case ISD::SIGN_EXTEND: 2879 case ISD::ZERO_EXTEND: 2880 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2881 KnownZero, TLO, Depth + 1)) 2882 return true; 2883 2884 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2885 // zext(undef) upper bits are guaranteed to be zero. 2886 if (DemandedElts.isSubsetOf(KnownUndef)) 2887 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2888 KnownUndef.clearAllBits(); 2889 } 2890 break; 2891 default: { 2892 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2893 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 2894 KnownZero, TLO, Depth)) 2895 return true; 2896 } else { 2897 KnownBits Known; 2898 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits); 2899 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 2900 TLO, Depth, AssumeSingleUse)) 2901 return true; 2902 } 2903 break; 2904 } 2905 } 2906 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 2907 2908 // Constant fold all undef cases. 2909 // TODO: Handle zero cases as well. 2910 if (DemandedElts.isSubsetOf(KnownUndef)) 2911 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2912 2913 return false; 2914 } 2915 2916 /// Determine which of the bits specified in Mask are known to be either zero or 2917 /// one and return them in the Known. 2918 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 2919 KnownBits &Known, 2920 const APInt &DemandedElts, 2921 const SelectionDAG &DAG, 2922 unsigned Depth) const { 2923 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2924 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2925 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2926 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2927 "Should use MaskedValueIsZero if you don't know whether Op" 2928 " is a target node!"); 2929 Known.resetAll(); 2930 } 2931 2932 void TargetLowering::computeKnownBitsForTargetInstr( 2933 GISelKnownBits &Analysis, Register R, KnownBits &Known, 2934 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 2935 unsigned Depth) const { 2936 Known.resetAll(); 2937 } 2938 2939 void TargetLowering::computeKnownBitsForFrameIndex( 2940 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 2941 // The low bits are known zero if the pointer is aligned. 2942 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 2943 } 2944 2945 Align TargetLowering::computeKnownAlignForTargetInstr( 2946 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 2947 unsigned Depth) const { 2948 return Align(1); 2949 } 2950 2951 /// This method can be implemented by targets that want to expose additional 2952 /// information about sign bits to the DAG Combiner. 2953 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 2954 const APInt &, 2955 const SelectionDAG &, 2956 unsigned Depth) const { 2957 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2958 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2959 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2960 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2961 "Should use ComputeNumSignBits if you don't know whether Op" 2962 " is a target node!"); 2963 return 1; 2964 } 2965 2966 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 2967 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 2968 const MachineRegisterInfo &MRI, unsigned Depth) const { 2969 return 1; 2970 } 2971 2972 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 2973 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 2974 TargetLoweringOpt &TLO, unsigned Depth) const { 2975 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2976 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2977 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2978 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2979 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 2980 " is a target node!"); 2981 return false; 2982 } 2983 2984 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 2985 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2986 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 2987 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2988 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2989 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2990 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2991 "Should use SimplifyDemandedBits if you don't know whether Op" 2992 " is a target node!"); 2993 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 2994 return false; 2995 } 2996 2997 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 2998 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2999 SelectionDAG &DAG, unsigned Depth) const { 3000 assert( 3001 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3002 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3003 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3004 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3005 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3006 " is a target node!"); 3007 return SDValue(); 3008 } 3009 3010 SDValue 3011 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3012 SDValue N1, MutableArrayRef<int> Mask, 3013 SelectionDAG &DAG) const { 3014 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3015 if (!LegalMask) { 3016 std::swap(N0, N1); 3017 ShuffleVectorSDNode::commuteMask(Mask); 3018 LegalMask = isShuffleMaskLegal(Mask, VT); 3019 } 3020 3021 if (!LegalMask) 3022 return SDValue(); 3023 3024 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3025 } 3026 3027 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3028 return nullptr; 3029 } 3030 3031 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3032 const SelectionDAG &DAG, 3033 bool SNaN, 3034 unsigned Depth) const { 3035 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3036 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3037 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3038 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3039 "Should use isKnownNeverNaN if you don't know whether Op" 3040 " is a target node!"); 3041 return false; 3042 } 3043 3044 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3045 // work with truncating build vectors and vectors with elements of less than 3046 // 8 bits. 3047 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 3048 if (!N) 3049 return false; 3050 3051 APInt CVal; 3052 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 3053 CVal = CN->getAPIntValue(); 3054 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 3055 auto *CN = BV->getConstantSplatNode(); 3056 if (!CN) 3057 return false; 3058 3059 // If this is a truncating build vector, truncate the splat value. 3060 // Otherwise, we may fail to match the expected values below. 3061 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 3062 CVal = CN->getAPIntValue(); 3063 if (BVEltWidth < CVal.getBitWidth()) 3064 CVal = CVal.trunc(BVEltWidth); 3065 } else { 3066 return false; 3067 } 3068 3069 switch (getBooleanContents(N->getValueType(0))) { 3070 case UndefinedBooleanContent: 3071 return CVal[0]; 3072 case ZeroOrOneBooleanContent: 3073 return CVal.isOneValue(); 3074 case ZeroOrNegativeOneBooleanContent: 3075 return CVal.isAllOnesValue(); 3076 } 3077 3078 llvm_unreachable("Invalid boolean contents"); 3079 } 3080 3081 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 3082 if (!N) 3083 return false; 3084 3085 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3086 if (!CN) { 3087 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3088 if (!BV) 3089 return false; 3090 3091 // Only interested in constant splats, we don't care about undef 3092 // elements in identifying boolean constants and getConstantSplatNode 3093 // returns NULL if all ops are undef; 3094 CN = BV->getConstantSplatNode(); 3095 if (!CN) 3096 return false; 3097 } 3098 3099 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3100 return !CN->getAPIntValue()[0]; 3101 3102 return CN->isNullValue(); 3103 } 3104 3105 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3106 bool SExt) const { 3107 if (VT == MVT::i1) 3108 return N->isOne(); 3109 3110 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3111 switch (Cnt) { 3112 case TargetLowering::ZeroOrOneBooleanContent: 3113 // An extended value of 1 is always true, unless its original type is i1, 3114 // in which case it will be sign extended to -1. 3115 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3116 case TargetLowering::UndefinedBooleanContent: 3117 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3118 return N->isAllOnesValue() && SExt; 3119 } 3120 llvm_unreachable("Unexpected enumeration."); 3121 } 3122 3123 /// This helper function of SimplifySetCC tries to optimize the comparison when 3124 /// either operand of the SetCC node is a bitwise-and instruction. 3125 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3126 ISD::CondCode Cond, const SDLoc &DL, 3127 DAGCombinerInfo &DCI) const { 3128 // Match these patterns in any of their permutations: 3129 // (X & Y) == Y 3130 // (X & Y) != Y 3131 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3132 std::swap(N0, N1); 3133 3134 EVT OpVT = N0.getValueType(); 3135 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3136 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3137 return SDValue(); 3138 3139 SDValue X, Y; 3140 if (N0.getOperand(0) == N1) { 3141 X = N0.getOperand(1); 3142 Y = N0.getOperand(0); 3143 } else if (N0.getOperand(1) == N1) { 3144 X = N0.getOperand(0); 3145 Y = N0.getOperand(1); 3146 } else { 3147 return SDValue(); 3148 } 3149 3150 SelectionDAG &DAG = DCI.DAG; 3151 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3152 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3153 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3154 // Note that where Y is variable and is known to have at most one bit set 3155 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3156 // equivalent when Y == 0. 3157 assert(OpVT.isInteger()); 3158 Cond = ISD::getSetCCInverse(Cond, OpVT); 3159 if (DCI.isBeforeLegalizeOps() || 3160 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3161 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3162 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3163 // If the target supports an 'and-not' or 'and-complement' logic operation, 3164 // try to use that to make a comparison operation more efficient. 3165 // But don't do this transform if the mask is a single bit because there are 3166 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3167 // 'rlwinm' on PPC). 3168 3169 // Bail out if the compare operand that we want to turn into a zero is 3170 // already a zero (otherwise, infinite loop). 3171 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3172 if (YConst && YConst->isNullValue()) 3173 return SDValue(); 3174 3175 // Transform this into: ~X & Y == 0. 3176 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3177 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3178 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3179 } 3180 3181 return SDValue(); 3182 } 3183 3184 /// There are multiple IR patterns that could be checking whether certain 3185 /// truncation of a signed number would be lossy or not. The pattern which is 3186 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3187 /// We are looking for the following pattern: (KeptBits is a constant) 3188 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3189 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3190 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3191 /// We will unfold it into the natural trunc+sext pattern: 3192 /// ((%x << C) a>> C) dstcond %x 3193 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3194 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3195 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3196 const SDLoc &DL) const { 3197 // We must be comparing with a constant. 3198 ConstantSDNode *C1; 3199 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3200 return SDValue(); 3201 3202 // N0 should be: add %x, (1 << (KeptBits-1)) 3203 if (N0->getOpcode() != ISD::ADD) 3204 return SDValue(); 3205 3206 // And we must be 'add'ing a constant. 3207 ConstantSDNode *C01; 3208 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3209 return SDValue(); 3210 3211 SDValue X = N0->getOperand(0); 3212 EVT XVT = X.getValueType(); 3213 3214 // Validate constants ... 3215 3216 APInt I1 = C1->getAPIntValue(); 3217 3218 ISD::CondCode NewCond; 3219 if (Cond == ISD::CondCode::SETULT) { 3220 NewCond = ISD::CondCode::SETEQ; 3221 } else if (Cond == ISD::CondCode::SETULE) { 3222 NewCond = ISD::CondCode::SETEQ; 3223 // But need to 'canonicalize' the constant. 3224 I1 += 1; 3225 } else if (Cond == ISD::CondCode::SETUGT) { 3226 NewCond = ISD::CondCode::SETNE; 3227 // But need to 'canonicalize' the constant. 3228 I1 += 1; 3229 } else if (Cond == ISD::CondCode::SETUGE) { 3230 NewCond = ISD::CondCode::SETNE; 3231 } else 3232 return SDValue(); 3233 3234 APInt I01 = C01->getAPIntValue(); 3235 3236 auto checkConstants = [&I1, &I01]() -> bool { 3237 // Both of them must be power-of-two, and the constant from setcc is bigger. 3238 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3239 }; 3240 3241 if (checkConstants()) { 3242 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3243 } else { 3244 // What if we invert constants? (and the target predicate) 3245 I1.negate(); 3246 I01.negate(); 3247 assert(XVT.isInteger()); 3248 NewCond = getSetCCInverse(NewCond, XVT); 3249 if (!checkConstants()) 3250 return SDValue(); 3251 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3252 } 3253 3254 // They are power-of-two, so which bit is set? 3255 const unsigned KeptBits = I1.logBase2(); 3256 const unsigned KeptBitsMinusOne = I01.logBase2(); 3257 3258 // Magic! 3259 if (KeptBits != (KeptBitsMinusOne + 1)) 3260 return SDValue(); 3261 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3262 3263 // We don't want to do this in every single case. 3264 SelectionDAG &DAG = DCI.DAG; 3265 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3266 XVT, KeptBits)) 3267 return SDValue(); 3268 3269 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3270 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3271 3272 // Unfold into: ((%x << C) a>> C) cond %x 3273 // Where 'cond' will be either 'eq' or 'ne'. 3274 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3275 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3276 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3277 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3278 3279 return T2; 3280 } 3281 3282 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3283 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3284 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3285 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3286 assert(isConstOrConstSplat(N1C) && 3287 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && 3288 "Should be a comparison with 0."); 3289 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3290 "Valid only for [in]equality comparisons."); 3291 3292 unsigned NewShiftOpcode; 3293 SDValue X, C, Y; 3294 3295 SelectionDAG &DAG = DCI.DAG; 3296 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3297 3298 // Look for '(C l>>/<< Y)'. 3299 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3300 // The shift should be one-use. 3301 if (!V.hasOneUse()) 3302 return false; 3303 unsigned OldShiftOpcode = V.getOpcode(); 3304 switch (OldShiftOpcode) { 3305 case ISD::SHL: 3306 NewShiftOpcode = ISD::SRL; 3307 break; 3308 case ISD::SRL: 3309 NewShiftOpcode = ISD::SHL; 3310 break; 3311 default: 3312 return false; // must be a logical shift. 3313 } 3314 // We should be shifting a constant. 3315 // FIXME: best to use isConstantOrConstantVector(). 3316 C = V.getOperand(0); 3317 ConstantSDNode *CC = 3318 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3319 if (!CC) 3320 return false; 3321 Y = V.getOperand(1); 3322 3323 ConstantSDNode *XC = 3324 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3325 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3326 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3327 }; 3328 3329 // LHS of comparison should be an one-use 'and'. 3330 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3331 return SDValue(); 3332 3333 X = N0.getOperand(0); 3334 SDValue Mask = N0.getOperand(1); 3335 3336 // 'and' is commutative! 3337 if (!Match(Mask)) { 3338 std::swap(X, Mask); 3339 if (!Match(Mask)) 3340 return SDValue(); 3341 } 3342 3343 EVT VT = X.getValueType(); 3344 3345 // Produce: 3346 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3347 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3348 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3349 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3350 return T2; 3351 } 3352 3353 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3354 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3355 /// handle the commuted versions of these patterns. 3356 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3357 ISD::CondCode Cond, const SDLoc &DL, 3358 DAGCombinerInfo &DCI) const { 3359 unsigned BOpcode = N0.getOpcode(); 3360 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3361 "Unexpected binop"); 3362 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3363 3364 // (X + Y) == X --> Y == 0 3365 // (X - Y) == X --> Y == 0 3366 // (X ^ Y) == X --> Y == 0 3367 SelectionDAG &DAG = DCI.DAG; 3368 EVT OpVT = N0.getValueType(); 3369 SDValue X = N0.getOperand(0); 3370 SDValue Y = N0.getOperand(1); 3371 if (X == N1) 3372 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3373 3374 if (Y != N1) 3375 return SDValue(); 3376 3377 // (X + Y) == Y --> X == 0 3378 // (X ^ Y) == Y --> X == 0 3379 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3380 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3381 3382 // The shift would not be valid if the operands are boolean (i1). 3383 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3384 return SDValue(); 3385 3386 // (X - Y) == Y --> X == Y << 1 3387 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3388 !DCI.isBeforeLegalize()); 3389 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3390 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3391 if (!DCI.isCalledByLegalizer()) 3392 DCI.AddToWorklist(YShl1.getNode()); 3393 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3394 } 3395 3396 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3397 SDValue N0, const APInt &C1, 3398 ISD::CondCode Cond, const SDLoc &dl, 3399 SelectionDAG &DAG) { 3400 // Look through truncs that don't change the value of a ctpop. 3401 // FIXME: Add vector support? Need to be careful with setcc result type below. 3402 SDValue CTPOP = N0; 3403 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3404 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3405 CTPOP = N0.getOperand(0); 3406 3407 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3408 return SDValue(); 3409 3410 EVT CTVT = CTPOP.getValueType(); 3411 SDValue CTOp = CTPOP.getOperand(0); 3412 3413 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3414 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3415 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3416 return SDValue(); 3417 3418 // (ctpop x) u< 2 -> (x & x-1) == 0 3419 // (ctpop x) u> 1 -> (x & x-1) != 0 3420 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 3421 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 3422 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 3423 return SDValue(); 3424 if (C1 == 0 && (Cond == ISD::SETULT)) 3425 return SDValue(); // This is handled elsewhere. 3426 3427 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 3428 3429 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3430 SDValue Result = CTOp; 3431 for (unsigned i = 0; i < Passes; i++) { 3432 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 3433 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 3434 } 3435 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3436 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 3437 } 3438 3439 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3440 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3441 // For scalars, keep CTPOP if it is legal or custom. 3442 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 3443 return SDValue(); 3444 // This is based on X86's custom lowering for CTPOP which produces more 3445 // instructions than the expansion here. 3446 3447 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3448 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3449 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3450 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3451 assert(CTVT.isInteger()); 3452 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3453 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3454 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3455 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3456 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3457 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3458 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3459 } 3460 3461 return SDValue(); 3462 } 3463 3464 /// Try to simplify a setcc built with the specified operands and cc. If it is 3465 /// unable to simplify it, return a null SDValue. 3466 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3467 ISD::CondCode Cond, bool foldBooleans, 3468 DAGCombinerInfo &DCI, 3469 const SDLoc &dl) const { 3470 SelectionDAG &DAG = DCI.DAG; 3471 const DataLayout &Layout = DAG.getDataLayout(); 3472 EVT OpVT = N0.getValueType(); 3473 3474 // Constant fold or commute setcc. 3475 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3476 return Fold; 3477 3478 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3479 // TODO: Handle non-splat vector constants. All undef causes trouble. 3480 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 3481 // infinite loop here when we encounter one. 3482 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3483 if (isConstOrConstSplat(N0) && 3484 (!OpVT.isScalableVector() || !isConstOrConstSplat(N1)) && 3485 (DCI.isBeforeLegalizeOps() || 3486 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3487 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3488 3489 // If we have a subtract with the same 2 non-constant operands as this setcc 3490 // -- but in reverse order -- then try to commute the operands of this setcc 3491 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3492 // instruction on some targets. 3493 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3494 (DCI.isBeforeLegalizeOps() || 3495 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3496 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 3497 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 3498 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3499 3500 if (auto *N1C = isConstOrConstSplat(N1)) { 3501 const APInt &C1 = N1C->getAPIntValue(); 3502 3503 // Optimize some CTPOP cases. 3504 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 3505 return V; 3506 3507 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3508 // equality comparison, then we're just comparing whether X itself is 3509 // zero. 3510 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && 3511 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3512 isPowerOf2_32(N0.getScalarValueSizeInBits())) { 3513 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 3514 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3515 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 3516 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3517 // (srl (ctlz x), 5) == 0 -> X != 0 3518 // (srl (ctlz x), 5) != 1 -> X != 0 3519 Cond = ISD::SETNE; 3520 } else { 3521 // (srl (ctlz x), 5) != 0 -> X == 0 3522 // (srl (ctlz x), 5) == 1 -> X == 0 3523 Cond = ISD::SETEQ; 3524 } 3525 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3526 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 3527 Cond); 3528 } 3529 } 3530 } 3531 } 3532 3533 // FIXME: Support vectors. 3534 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3535 const APInt &C1 = N1C->getAPIntValue(); 3536 3537 // (zext x) == C --> x == (trunc C) 3538 // (sext x) == C --> x == (trunc C) 3539 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3540 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3541 unsigned MinBits = N0.getValueSizeInBits(); 3542 SDValue PreExt; 3543 bool Signed = false; 3544 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3545 // ZExt 3546 MinBits = N0->getOperand(0).getValueSizeInBits(); 3547 PreExt = N0->getOperand(0); 3548 } else if (N0->getOpcode() == ISD::AND) { 3549 // DAGCombine turns costly ZExts into ANDs 3550 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3551 if ((C->getAPIntValue()+1).isPowerOf2()) { 3552 MinBits = C->getAPIntValue().countTrailingOnes(); 3553 PreExt = N0->getOperand(0); 3554 } 3555 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3556 // SExt 3557 MinBits = N0->getOperand(0).getValueSizeInBits(); 3558 PreExt = N0->getOperand(0); 3559 Signed = true; 3560 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3561 // ZEXTLOAD / SEXTLOAD 3562 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3563 MinBits = LN0->getMemoryVT().getSizeInBits(); 3564 PreExt = N0; 3565 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3566 Signed = true; 3567 MinBits = LN0->getMemoryVT().getSizeInBits(); 3568 PreExt = N0; 3569 } 3570 } 3571 3572 // Figure out how many bits we need to preserve this constant. 3573 unsigned ReqdBits = Signed ? 3574 C1.getBitWidth() - C1.getNumSignBits() + 1 : 3575 C1.getActiveBits(); 3576 3577 // Make sure we're not losing bits from the constant. 3578 if (MinBits > 0 && 3579 MinBits < C1.getBitWidth() && 3580 MinBits >= ReqdBits) { 3581 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3582 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3583 // Will get folded away. 3584 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3585 if (MinBits == 1 && C1 == 1) 3586 // Invert the condition. 3587 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3588 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3589 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3590 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3591 } 3592 3593 // If truncating the setcc operands is not desirable, we can still 3594 // simplify the expression in some cases: 3595 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3596 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3597 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3598 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3599 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3600 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3601 SDValue TopSetCC = N0->getOperand(0); 3602 unsigned N0Opc = N0->getOpcode(); 3603 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3604 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3605 TopSetCC.getOpcode() == ISD::SETCC && 3606 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3607 (isConstFalseVal(N1C) || 3608 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3609 3610 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) || 3611 (!N1C->isNullValue() && Cond == ISD::SETNE); 3612 3613 if (!Inverse) 3614 return TopSetCC; 3615 3616 ISD::CondCode InvCond = ISD::getSetCCInverse( 3617 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3618 TopSetCC.getOperand(0).getValueType()); 3619 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3620 TopSetCC.getOperand(1), 3621 InvCond); 3622 } 3623 } 3624 } 3625 3626 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3627 // equality or unsigned, and all 1 bits of the const are in the same 3628 // partial word, see if we can shorten the load. 3629 if (DCI.isBeforeLegalize() && 3630 !ISD::isSignedIntSetCC(Cond) && 3631 N0.getOpcode() == ISD::AND && C1 == 0 && 3632 N0.getNode()->hasOneUse() && 3633 isa<LoadSDNode>(N0.getOperand(0)) && 3634 N0.getOperand(0).getNode()->hasOneUse() && 3635 isa<ConstantSDNode>(N0.getOperand(1))) { 3636 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3637 APInt bestMask; 3638 unsigned bestWidth = 0, bestOffset = 0; 3639 if (Lod->isSimple() && Lod->isUnindexed()) { 3640 unsigned origWidth = N0.getValueSizeInBits(); 3641 unsigned maskWidth = origWidth; 3642 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3643 // 8 bits, but have to be careful... 3644 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3645 origWidth = Lod->getMemoryVT().getSizeInBits(); 3646 const APInt &Mask = N0.getConstantOperandAPInt(1); 3647 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3648 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3649 for (unsigned offset=0; offset<origWidth/width; offset++) { 3650 if (Mask.isSubsetOf(newMask)) { 3651 if (Layout.isLittleEndian()) 3652 bestOffset = (uint64_t)offset * (width/8); 3653 else 3654 bestOffset = (origWidth/width - offset - 1) * (width/8); 3655 bestMask = Mask.lshr(offset * (width/8) * 8); 3656 bestWidth = width; 3657 break; 3658 } 3659 newMask <<= width; 3660 } 3661 } 3662 } 3663 if (bestWidth) { 3664 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3665 if (newVT.isRound() && 3666 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3667 SDValue Ptr = Lod->getBasePtr(); 3668 if (bestOffset != 0) 3669 Ptr = 3670 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 3671 SDValue NewLoad = 3672 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 3673 Lod->getPointerInfo().getWithOffset(bestOffset), 3674 Lod->getOriginalAlign()); 3675 return DAG.getSetCC(dl, VT, 3676 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3677 DAG.getConstant(bestMask.trunc(bestWidth), 3678 dl, newVT)), 3679 DAG.getConstant(0LL, dl, newVT), Cond); 3680 } 3681 } 3682 } 3683 3684 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3685 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3686 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3687 3688 // If the comparison constant has bits in the upper part, the 3689 // zero-extended value could never match. 3690 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3691 C1.getBitWidth() - InSize))) { 3692 switch (Cond) { 3693 case ISD::SETUGT: 3694 case ISD::SETUGE: 3695 case ISD::SETEQ: 3696 return DAG.getConstant(0, dl, VT); 3697 case ISD::SETULT: 3698 case ISD::SETULE: 3699 case ISD::SETNE: 3700 return DAG.getConstant(1, dl, VT); 3701 case ISD::SETGT: 3702 case ISD::SETGE: 3703 // True if the sign bit of C1 is set. 3704 return DAG.getConstant(C1.isNegative(), dl, VT); 3705 case ISD::SETLT: 3706 case ISD::SETLE: 3707 // True if the sign bit of C1 isn't set. 3708 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3709 default: 3710 break; 3711 } 3712 } 3713 3714 // Otherwise, we can perform the comparison with the low bits. 3715 switch (Cond) { 3716 case ISD::SETEQ: 3717 case ISD::SETNE: 3718 case ISD::SETUGT: 3719 case ISD::SETUGE: 3720 case ISD::SETULT: 3721 case ISD::SETULE: { 3722 EVT newVT = N0.getOperand(0).getValueType(); 3723 if (DCI.isBeforeLegalizeOps() || 3724 (isOperationLegal(ISD::SETCC, newVT) && 3725 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3726 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3727 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3728 3729 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3730 NewConst, Cond); 3731 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3732 } 3733 break; 3734 } 3735 default: 3736 break; // todo, be more careful with signed comparisons 3737 } 3738 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3739 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3740 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3741 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3742 EVT ExtDstTy = N0.getValueType(); 3743 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3744 3745 // If the constant doesn't fit into the number of bits for the source of 3746 // the sign extension, it is impossible for both sides to be equal. 3747 if (C1.getMinSignedBits() > ExtSrcTyBits) 3748 return DAG.getConstant(Cond == ISD::SETNE, dl, VT); 3749 3750 SDValue ZextOp; 3751 EVT Op0Ty = N0.getOperand(0).getValueType(); 3752 if (Op0Ty == ExtSrcTy) { 3753 ZextOp = N0.getOperand(0); 3754 } else { 3755 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3756 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 3757 DAG.getConstant(Imm, dl, Op0Ty)); 3758 } 3759 if (!DCI.isCalledByLegalizer()) 3760 DCI.AddToWorklist(ZextOp.getNode()); 3761 // Otherwise, make this a use of a zext. 3762 return DAG.getSetCC(dl, VT, ZextOp, 3763 DAG.getConstant(C1 & APInt::getLowBitsSet( 3764 ExtDstTyBits, 3765 ExtSrcTyBits), 3766 dl, ExtDstTy), 3767 Cond); 3768 } else if ((N1C->isNullValue() || N1C->isOne()) && 3769 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3770 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3771 if (N0.getOpcode() == ISD::SETCC && 3772 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3773 (N0.getValueType() == MVT::i1 || 3774 getBooleanContents(N0.getOperand(0).getValueType()) == 3775 ZeroOrOneBooleanContent)) { 3776 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3777 if (TrueWhenTrue) 3778 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3779 // Invert the condition. 3780 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3781 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3782 if (DCI.isBeforeLegalizeOps() || 3783 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3784 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3785 } 3786 3787 if ((N0.getOpcode() == ISD::XOR || 3788 (N0.getOpcode() == ISD::AND && 3789 N0.getOperand(0).getOpcode() == ISD::XOR && 3790 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3791 isa<ConstantSDNode>(N0.getOperand(1)) && 3792 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) { 3793 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3794 // can only do this if the top bits are known zero. 3795 unsigned BitWidth = N0.getValueSizeInBits(); 3796 if (DAG.MaskedValueIsZero(N0, 3797 APInt::getHighBitsSet(BitWidth, 3798 BitWidth-1))) { 3799 // Okay, get the un-inverted input value. 3800 SDValue Val; 3801 if (N0.getOpcode() == ISD::XOR) { 3802 Val = N0.getOperand(0); 3803 } else { 3804 assert(N0.getOpcode() == ISD::AND && 3805 N0.getOperand(0).getOpcode() == ISD::XOR); 3806 // ((X^1)&1)^1 -> X & 1 3807 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3808 N0.getOperand(0).getOperand(0), 3809 N0.getOperand(1)); 3810 } 3811 3812 return DAG.getSetCC(dl, VT, Val, N1, 3813 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3814 } 3815 } else if (N1C->isOne()) { 3816 SDValue Op0 = N0; 3817 if (Op0.getOpcode() == ISD::TRUNCATE) 3818 Op0 = Op0.getOperand(0); 3819 3820 if ((Op0.getOpcode() == ISD::XOR) && 3821 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3822 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3823 SDValue XorLHS = Op0.getOperand(0); 3824 SDValue XorRHS = Op0.getOperand(1); 3825 // Ensure that the input setccs return an i1 type or 0/1 value. 3826 if (Op0.getValueType() == MVT::i1 || 3827 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3828 ZeroOrOneBooleanContent && 3829 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3830 ZeroOrOneBooleanContent)) { 3831 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3832 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3833 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3834 } 3835 } 3836 if (Op0.getOpcode() == ISD::AND && 3837 isa<ConstantSDNode>(Op0.getOperand(1)) && 3838 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) { 3839 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3840 if (Op0.getValueType().bitsGT(VT)) 3841 Op0 = DAG.getNode(ISD::AND, dl, VT, 3842 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3843 DAG.getConstant(1, dl, VT)); 3844 else if (Op0.getValueType().bitsLT(VT)) 3845 Op0 = DAG.getNode(ISD::AND, dl, VT, 3846 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3847 DAG.getConstant(1, dl, VT)); 3848 3849 return DAG.getSetCC(dl, VT, Op0, 3850 DAG.getConstant(0, dl, Op0.getValueType()), 3851 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3852 } 3853 if (Op0.getOpcode() == ISD::AssertZext && 3854 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3855 return DAG.getSetCC(dl, VT, Op0, 3856 DAG.getConstant(0, dl, Op0.getValueType()), 3857 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3858 } 3859 } 3860 3861 // Given: 3862 // icmp eq/ne (urem %x, %y), 0 3863 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3864 // icmp eq/ne %x, 0 3865 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() && 3866 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3867 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3868 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3869 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3870 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3871 } 3872 3873 if (SDValue V = 3874 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 3875 return V; 3876 } 3877 3878 // These simplifications apply to splat vectors as well. 3879 // TODO: Handle more splat vector cases. 3880 if (auto *N1C = isConstOrConstSplat(N1)) { 3881 const APInt &C1 = N1C->getAPIntValue(); 3882 3883 APInt MinVal, MaxVal; 3884 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 3885 if (ISD::isSignedIntSetCC(Cond)) { 3886 MinVal = APInt::getSignedMinValue(OperandBitSize); 3887 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 3888 } else { 3889 MinVal = APInt::getMinValue(OperandBitSize); 3890 MaxVal = APInt::getMaxValue(OperandBitSize); 3891 } 3892 3893 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 3894 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 3895 // X >= MIN --> true 3896 if (C1 == MinVal) 3897 return DAG.getBoolConstant(true, dl, VT, OpVT); 3898 3899 if (!VT.isVector()) { // TODO: Support this for vectors. 3900 // X >= C0 --> X > (C0 - 1) 3901 APInt C = C1 - 1; 3902 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 3903 if ((DCI.isBeforeLegalizeOps() || 3904 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3905 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3906 isLegalICmpImmediate(C.getSExtValue())))) { 3907 return DAG.getSetCC(dl, VT, N0, 3908 DAG.getConstant(C, dl, N1.getValueType()), 3909 NewCC); 3910 } 3911 } 3912 } 3913 3914 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 3915 // X <= MAX --> true 3916 if (C1 == MaxVal) 3917 return DAG.getBoolConstant(true, dl, VT, OpVT); 3918 3919 // X <= C0 --> X < (C0 + 1) 3920 if (!VT.isVector()) { // TODO: Support this for vectors. 3921 APInt C = C1 + 1; 3922 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 3923 if ((DCI.isBeforeLegalizeOps() || 3924 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3925 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3926 isLegalICmpImmediate(C.getSExtValue())))) { 3927 return DAG.getSetCC(dl, VT, N0, 3928 DAG.getConstant(C, dl, N1.getValueType()), 3929 NewCC); 3930 } 3931 } 3932 } 3933 3934 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 3935 if (C1 == MinVal) 3936 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 3937 3938 // TODO: Support this for vectors after legalize ops. 3939 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3940 // Canonicalize setlt X, Max --> setne X, Max 3941 if (C1 == MaxVal) 3942 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3943 3944 // If we have setult X, 1, turn it into seteq X, 0 3945 if (C1 == MinVal+1) 3946 return DAG.getSetCC(dl, VT, N0, 3947 DAG.getConstant(MinVal, dl, N0.getValueType()), 3948 ISD::SETEQ); 3949 } 3950 } 3951 3952 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 3953 if (C1 == MaxVal) 3954 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 3955 3956 // TODO: Support this for vectors after legalize ops. 3957 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3958 // Canonicalize setgt X, Min --> setne X, Min 3959 if (C1 == MinVal) 3960 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3961 3962 // If we have setugt X, Max-1, turn it into seteq X, Max 3963 if (C1 == MaxVal-1) 3964 return DAG.getSetCC(dl, VT, N0, 3965 DAG.getConstant(MaxVal, dl, N0.getValueType()), 3966 ISD::SETEQ); 3967 } 3968 } 3969 3970 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 3971 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3972 if (C1.isNullValue()) 3973 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 3974 VT, N0, N1, Cond, DCI, dl)) 3975 return CC; 3976 3977 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 3978 // For example, when high 32-bits of i64 X are known clear: 3979 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 3980 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 3981 bool CmpZero = N1C->getAPIntValue().isNullValue(); 3982 bool CmpNegOne = N1C->getAPIntValue().isAllOnesValue(); 3983 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 3984 // Match or(lo,shl(hi,bw/2)) pattern. 3985 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 3986 unsigned EltBits = V.getScalarValueSizeInBits(); 3987 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 3988 return false; 3989 SDValue LHS = V.getOperand(0); 3990 SDValue RHS = V.getOperand(1); 3991 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 3992 // Unshifted element must have zero upperbits. 3993 if (RHS.getOpcode() == ISD::SHL && 3994 isa<ConstantSDNode>(RHS.getOperand(1)) && 3995 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 3996 DAG.MaskedValueIsZero(LHS, HiBits)) { 3997 Lo = LHS; 3998 Hi = RHS.getOperand(0); 3999 return true; 4000 } 4001 if (LHS.getOpcode() == ISD::SHL && 4002 isa<ConstantSDNode>(LHS.getOperand(1)) && 4003 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4004 DAG.MaskedValueIsZero(RHS, HiBits)) { 4005 Lo = RHS; 4006 Hi = LHS.getOperand(0); 4007 return true; 4008 } 4009 return false; 4010 }; 4011 4012 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4013 unsigned EltBits = N0.getScalarValueSizeInBits(); 4014 unsigned HalfBits = EltBits / 2; 4015 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4016 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4017 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4018 SDValue NewN0 = 4019 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4020 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4021 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4022 }; 4023 4024 SDValue Lo, Hi; 4025 if (IsConcat(N0, Lo, Hi)) 4026 return MergeConcat(Lo, Hi); 4027 4028 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4029 SDValue Lo0, Lo1, Hi0, Hi1; 4030 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4031 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4032 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4033 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4034 } 4035 } 4036 } 4037 } 4038 4039 // If we have "setcc X, C0", check to see if we can shrink the immediate 4040 // by changing cc. 4041 // TODO: Support this for vectors after legalize ops. 4042 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4043 // SETUGT X, SINTMAX -> SETLT X, 0 4044 // SETUGE X, SINTMIN -> SETLT X, 0 4045 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4046 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4047 return DAG.getSetCC(dl, VT, N0, 4048 DAG.getConstant(0, dl, N1.getValueType()), 4049 ISD::SETLT); 4050 4051 // SETULT X, SINTMIN -> SETGT X, -1 4052 // SETULE X, SINTMAX -> SETGT X, -1 4053 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4054 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4055 return DAG.getSetCC(dl, VT, N0, 4056 DAG.getAllOnesConstant(dl, N1.getValueType()), 4057 ISD::SETGT); 4058 } 4059 } 4060 4061 // Back to non-vector simplifications. 4062 // TODO: Can we do these for vector splats? 4063 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4064 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4065 const APInt &C1 = N1C->getAPIntValue(); 4066 EVT ShValTy = N0.getValueType(); 4067 4068 // Fold bit comparisons when we can. This will result in an 4069 // incorrect value when boolean false is negative one, unless 4070 // the bitsize is 1 in which case the false value is the same 4071 // in practice regardless of the representation. 4072 if ((VT.getSizeInBits() == 1 || 4073 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4074 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4075 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4076 N0.getOpcode() == ISD::AND) { 4077 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4078 EVT ShiftTy = 4079 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4080 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4081 // Perform the xform if the AND RHS is a single bit. 4082 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4083 if (AndRHS->getAPIntValue().isPowerOf2() && 4084 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4085 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4086 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4087 DAG.getConstant(ShCt, dl, ShiftTy))); 4088 } 4089 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4090 // (X & 8) == 8 --> (X & 8) >> 3 4091 // Perform the xform if C1 is a single bit. 4092 unsigned ShCt = C1.logBase2(); 4093 if (C1.isPowerOf2() && 4094 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4095 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4096 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4097 DAG.getConstant(ShCt, dl, ShiftTy))); 4098 } 4099 } 4100 } 4101 } 4102 4103 if (C1.getMinSignedBits() <= 64 && 4104 !isLegalICmpImmediate(C1.getSExtValue())) { 4105 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4106 // (X & -256) == 256 -> (X >> 8) == 1 4107 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4108 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4109 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4110 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4111 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) { 4112 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4113 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4114 SDValue Shift = 4115 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4116 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4117 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4118 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4119 } 4120 } 4121 } 4122 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4123 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4124 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4125 // X < 0x100000000 -> (X >> 32) < 1 4126 // X >= 0x100000000 -> (X >> 32) >= 1 4127 // X <= 0x0ffffffff -> (X >> 32) < 1 4128 // X > 0x0ffffffff -> (X >> 32) >= 1 4129 unsigned ShiftBits; 4130 APInt NewC = C1; 4131 ISD::CondCode NewCond = Cond; 4132 if (AdjOne) { 4133 ShiftBits = C1.countTrailingOnes(); 4134 NewC = NewC + 1; 4135 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4136 } else { 4137 ShiftBits = C1.countTrailingZeros(); 4138 } 4139 NewC.lshrInPlace(ShiftBits); 4140 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4141 isLegalICmpImmediate(NewC.getSExtValue()) && 4142 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4143 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4144 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4145 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4146 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4147 } 4148 } 4149 } 4150 } 4151 4152 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4153 auto *CFP = cast<ConstantFPSDNode>(N1); 4154 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4155 4156 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4157 // constant if knowing that the operand is non-nan is enough. We prefer to 4158 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4159 // materialize 0.0. 4160 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4161 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4162 4163 // setcc (fneg x), C -> setcc swap(pred) x, -C 4164 if (N0.getOpcode() == ISD::FNEG) { 4165 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4166 if (DCI.isBeforeLegalizeOps() || 4167 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4168 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4169 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4170 } 4171 } 4172 4173 // If the condition is not legal, see if we can find an equivalent one 4174 // which is legal. 4175 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4176 // If the comparison was an awkward floating-point == or != and one of 4177 // the comparison operands is infinity or negative infinity, convert the 4178 // condition to a less-awkward <= or >=. 4179 if (CFP->getValueAPF().isInfinity()) { 4180 bool IsNegInf = CFP->getValueAPF().isNegative(); 4181 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4182 switch (Cond) { 4183 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4184 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4185 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4186 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4187 default: break; 4188 } 4189 if (NewCond != ISD::SETCC_INVALID && 4190 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4191 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4192 } 4193 } 4194 } 4195 4196 if (N0 == N1) { 4197 // The sext(setcc()) => setcc() optimization relies on the appropriate 4198 // constant being emitted. 4199 assert(!N0.getValueType().isInteger() && 4200 "Integer types should be handled by FoldSetCC"); 4201 4202 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4203 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4204 if (UOF == 2) // FP operators that are undefined on NaNs. 4205 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4206 if (UOF == unsigned(EqTrue)) 4207 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4208 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4209 // if it is not already. 4210 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4211 if (NewCond != Cond && 4212 (DCI.isBeforeLegalizeOps() || 4213 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4214 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4215 } 4216 4217 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4218 N0.getValueType().isInteger()) { 4219 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4220 N0.getOpcode() == ISD::XOR) { 4221 // Simplify (X+Y) == (X+Z) --> Y == Z 4222 if (N0.getOpcode() == N1.getOpcode()) { 4223 if (N0.getOperand(0) == N1.getOperand(0)) 4224 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4225 if (N0.getOperand(1) == N1.getOperand(1)) 4226 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4227 if (isCommutativeBinOp(N0.getOpcode())) { 4228 // If X op Y == Y op X, try other combinations. 4229 if (N0.getOperand(0) == N1.getOperand(1)) 4230 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4231 Cond); 4232 if (N0.getOperand(1) == N1.getOperand(0)) 4233 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4234 Cond); 4235 } 4236 } 4237 4238 // If RHS is a legal immediate value for a compare instruction, we need 4239 // to be careful about increasing register pressure needlessly. 4240 bool LegalRHSImm = false; 4241 4242 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4243 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4244 // Turn (X+C1) == C2 --> X == C2-C1 4245 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 4246 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4247 DAG.getConstant(RHSC->getAPIntValue()- 4248 LHSR->getAPIntValue(), 4249 dl, N0.getValueType()), Cond); 4250 } 4251 4252 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 4253 if (N0.getOpcode() == ISD::XOR) 4254 // If we know that all of the inverted bits are zero, don't bother 4255 // performing the inversion. 4256 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 4257 return 4258 DAG.getSetCC(dl, VT, N0.getOperand(0), 4259 DAG.getConstant(LHSR->getAPIntValue() ^ 4260 RHSC->getAPIntValue(), 4261 dl, N0.getValueType()), 4262 Cond); 4263 } 4264 4265 // Turn (C1-X) == C2 --> X == C1-C2 4266 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 4267 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 4268 return 4269 DAG.getSetCC(dl, VT, N0.getOperand(1), 4270 DAG.getConstant(SUBC->getAPIntValue() - 4271 RHSC->getAPIntValue(), 4272 dl, N0.getValueType()), 4273 Cond); 4274 } 4275 } 4276 4277 // Could RHSC fold directly into a compare? 4278 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4279 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4280 } 4281 4282 // (X+Y) == X --> Y == 0 and similar folds. 4283 // Don't do this if X is an immediate that can fold into a cmp 4284 // instruction and X+Y has other uses. It could be an induction variable 4285 // chain, and the transform would increase register pressure. 4286 if (!LegalRHSImm || N0.hasOneUse()) 4287 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4288 return V; 4289 } 4290 4291 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4292 N1.getOpcode() == ISD::XOR) 4293 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4294 return V; 4295 4296 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4297 return V; 4298 } 4299 4300 // Fold remainder of division by a constant. 4301 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4302 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4303 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4304 4305 // When division is cheap or optimizing for minimum size, 4306 // fall through to DIVREM creation by skipping this fold. 4307 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) { 4308 if (N0.getOpcode() == ISD::UREM) { 4309 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4310 return Folded; 4311 } else if (N0.getOpcode() == ISD::SREM) { 4312 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4313 return Folded; 4314 } 4315 } 4316 } 4317 4318 // Fold away ALL boolean setcc's. 4319 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4320 SDValue Temp; 4321 switch (Cond) { 4322 default: llvm_unreachable("Unknown integer setcc!"); 4323 case ISD::SETEQ: // X == Y -> ~(X^Y) 4324 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4325 N0 = DAG.getNOT(dl, Temp, OpVT); 4326 if (!DCI.isCalledByLegalizer()) 4327 DCI.AddToWorklist(Temp.getNode()); 4328 break; 4329 case ISD::SETNE: // X != Y --> (X^Y) 4330 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4331 break; 4332 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4333 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4334 Temp = DAG.getNOT(dl, N0, OpVT); 4335 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4336 if (!DCI.isCalledByLegalizer()) 4337 DCI.AddToWorklist(Temp.getNode()); 4338 break; 4339 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4340 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4341 Temp = DAG.getNOT(dl, N1, OpVT); 4342 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4343 if (!DCI.isCalledByLegalizer()) 4344 DCI.AddToWorklist(Temp.getNode()); 4345 break; 4346 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4347 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4348 Temp = DAG.getNOT(dl, N0, OpVT); 4349 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4350 if (!DCI.isCalledByLegalizer()) 4351 DCI.AddToWorklist(Temp.getNode()); 4352 break; 4353 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4354 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4355 Temp = DAG.getNOT(dl, N1, OpVT); 4356 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4357 break; 4358 } 4359 if (VT.getScalarType() != MVT::i1) { 4360 if (!DCI.isCalledByLegalizer()) 4361 DCI.AddToWorklist(N0.getNode()); 4362 // FIXME: If running after legalize, we probably can't do this. 4363 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4364 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4365 } 4366 return N0; 4367 } 4368 4369 // Could not fold it. 4370 return SDValue(); 4371 } 4372 4373 /// Returns true (and the GlobalValue and the offset) if the node is a 4374 /// GlobalAddress + offset. 4375 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4376 int64_t &Offset) const { 4377 4378 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4379 4380 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4381 GA = GASD->getGlobal(); 4382 Offset += GASD->getOffset(); 4383 return true; 4384 } 4385 4386 if (N->getOpcode() == ISD::ADD) { 4387 SDValue N1 = N->getOperand(0); 4388 SDValue N2 = N->getOperand(1); 4389 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4390 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4391 Offset += V->getSExtValue(); 4392 return true; 4393 } 4394 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4395 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4396 Offset += V->getSExtValue(); 4397 return true; 4398 } 4399 } 4400 } 4401 4402 return false; 4403 } 4404 4405 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4406 DAGCombinerInfo &DCI) const { 4407 // Default implementation: no optimization. 4408 return SDValue(); 4409 } 4410 4411 //===----------------------------------------------------------------------===// 4412 // Inline Assembler Implementation Methods 4413 //===----------------------------------------------------------------------===// 4414 4415 TargetLowering::ConstraintType 4416 TargetLowering::getConstraintType(StringRef Constraint) const { 4417 unsigned S = Constraint.size(); 4418 4419 if (S == 1) { 4420 switch (Constraint[0]) { 4421 default: break; 4422 case 'r': 4423 return C_RegisterClass; 4424 case 'm': // memory 4425 case 'o': // offsetable 4426 case 'V': // not offsetable 4427 return C_Memory; 4428 case 'n': // Simple Integer 4429 case 'E': // Floating Point Constant 4430 case 'F': // Floating Point Constant 4431 return C_Immediate; 4432 case 'i': // Simple Integer or Relocatable Constant 4433 case 's': // Relocatable Constant 4434 case 'p': // Address. 4435 case 'X': // Allow ANY value. 4436 case 'I': // Target registers. 4437 case 'J': 4438 case 'K': 4439 case 'L': 4440 case 'M': 4441 case 'N': 4442 case 'O': 4443 case 'P': 4444 case '<': 4445 case '>': 4446 return C_Other; 4447 } 4448 } 4449 4450 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4451 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4452 return C_Memory; 4453 return C_Register; 4454 } 4455 return C_Unknown; 4456 } 4457 4458 /// Try to replace an X constraint, which matches anything, with another that 4459 /// has more specific requirements based on the type of the corresponding 4460 /// operand. 4461 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4462 if (ConstraintVT.isInteger()) 4463 return "r"; 4464 if (ConstraintVT.isFloatingPoint()) 4465 return "f"; // works for many targets 4466 return nullptr; 4467 } 4468 4469 SDValue TargetLowering::LowerAsmOutputForConstraint( 4470 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 4471 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 4472 return SDValue(); 4473 } 4474 4475 /// Lower the specified operand into the Ops vector. 4476 /// If it is invalid, don't add anything to Ops. 4477 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4478 std::string &Constraint, 4479 std::vector<SDValue> &Ops, 4480 SelectionDAG &DAG) const { 4481 4482 if (Constraint.length() > 1) return; 4483 4484 char ConstraintLetter = Constraint[0]; 4485 switch (ConstraintLetter) { 4486 default: break; 4487 case 'X': // Allows any operand; labels (basic block) use this. 4488 if (Op.getOpcode() == ISD::BasicBlock || 4489 Op.getOpcode() == ISD::TargetBlockAddress) { 4490 Ops.push_back(Op); 4491 return; 4492 } 4493 LLVM_FALLTHROUGH; 4494 case 'i': // Simple Integer or Relocatable Constant 4495 case 'n': // Simple Integer 4496 case 's': { // Relocatable Constant 4497 4498 GlobalAddressSDNode *GA; 4499 ConstantSDNode *C; 4500 BlockAddressSDNode *BA; 4501 uint64_t Offset = 0; 4502 4503 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4504 // etc., since getelementpointer is variadic. We can't use 4505 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4506 // while in this case the GA may be furthest from the root node which is 4507 // likely an ISD::ADD. 4508 while (1) { 4509 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') { 4510 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4511 GA->getValueType(0), 4512 Offset + GA->getOffset())); 4513 return; 4514 } else if ((C = dyn_cast<ConstantSDNode>(Op)) && 4515 ConstraintLetter != 's') { 4516 // gcc prints these as sign extended. Sign extend value to 64 bits 4517 // now; without this it would get ZExt'd later in 4518 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4519 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4520 BooleanContent BCont = getBooleanContents(MVT::i64); 4521 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont) 4522 : ISD::SIGN_EXTEND; 4523 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() 4524 : C->getSExtValue(); 4525 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal, 4526 SDLoc(C), MVT::i64)); 4527 return; 4528 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && 4529 ConstraintLetter != 'n') { 4530 Ops.push_back(DAG.getTargetBlockAddress( 4531 BA->getBlockAddress(), BA->getValueType(0), 4532 Offset + BA->getOffset(), BA->getTargetFlags())); 4533 return; 4534 } else { 4535 const unsigned OpCode = Op.getOpcode(); 4536 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4537 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4538 Op = Op.getOperand(1); 4539 // Subtraction is not commutative. 4540 else if (OpCode == ISD::ADD && 4541 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4542 Op = Op.getOperand(0); 4543 else 4544 return; 4545 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4546 continue; 4547 } 4548 } 4549 return; 4550 } 4551 break; 4552 } 4553 } 4554 } 4555 4556 std::pair<unsigned, const TargetRegisterClass *> 4557 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4558 StringRef Constraint, 4559 MVT VT) const { 4560 if (Constraint.empty() || Constraint[0] != '{') 4561 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4562 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4563 4564 // Remove the braces from around the name. 4565 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4566 4567 std::pair<unsigned, const TargetRegisterClass *> R = 4568 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4569 4570 // Figure out which register class contains this reg. 4571 for (const TargetRegisterClass *RC : RI->regclasses()) { 4572 // If none of the value types for this register class are valid, we 4573 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4574 if (!isLegalRC(*RI, *RC)) 4575 continue; 4576 4577 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 4578 I != E; ++I) { 4579 if (RegName.equals_lower(RI->getRegAsmName(*I))) { 4580 std::pair<unsigned, const TargetRegisterClass *> S = 4581 std::make_pair(*I, RC); 4582 4583 // If this register class has the requested value type, return it, 4584 // otherwise keep searching and return the first class found 4585 // if no other is found which explicitly has the requested type. 4586 if (RI->isTypeLegalForClass(*RC, VT)) 4587 return S; 4588 if (!R.second) 4589 R = S; 4590 } 4591 } 4592 } 4593 4594 return R; 4595 } 4596 4597 //===----------------------------------------------------------------------===// 4598 // Constraint Selection. 4599 4600 /// Return true of this is an input operand that is a matching constraint like 4601 /// "4". 4602 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4603 assert(!ConstraintCode.empty() && "No known constraint!"); 4604 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4605 } 4606 4607 /// If this is an input matching constraint, this method returns the output 4608 /// operand it matches. 4609 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4610 assert(!ConstraintCode.empty() && "No known constraint!"); 4611 return atoi(ConstraintCode.c_str()); 4612 } 4613 4614 /// Split up the constraint string from the inline assembly value into the 4615 /// specific constraints and their prefixes, and also tie in the associated 4616 /// operand values. 4617 /// If this returns an empty vector, and if the constraint string itself 4618 /// isn't empty, there was an error parsing. 4619 TargetLowering::AsmOperandInfoVector 4620 TargetLowering::ParseConstraints(const DataLayout &DL, 4621 const TargetRegisterInfo *TRI, 4622 const CallBase &Call) const { 4623 /// Information about all of the constraints. 4624 AsmOperandInfoVector ConstraintOperands; 4625 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 4626 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4627 4628 // Do a prepass over the constraints, canonicalizing them, and building up the 4629 // ConstraintOperands list. 4630 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4631 unsigned ResNo = 0; // ResNo - The result number of the next output. 4632 4633 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4634 ConstraintOperands.emplace_back(std::move(CI)); 4635 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4636 4637 // Update multiple alternative constraint count. 4638 if (OpInfo.multipleAlternatives.size() > maCount) 4639 maCount = OpInfo.multipleAlternatives.size(); 4640 4641 OpInfo.ConstraintVT = MVT::Other; 4642 4643 // Compute the value type for each operand. 4644 switch (OpInfo.Type) { 4645 case InlineAsm::isOutput: 4646 // Indirect outputs just consume an argument. 4647 if (OpInfo.isIndirect) { 4648 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4649 break; 4650 } 4651 4652 // The return value of the call is this value. As such, there is no 4653 // corresponding argument. 4654 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 4655 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 4656 OpInfo.ConstraintVT = 4657 getSimpleValueType(DL, STy->getElementType(ResNo)); 4658 } else { 4659 assert(ResNo == 0 && "Asm only has one result!"); 4660 OpInfo.ConstraintVT = getSimpleValueType(DL, Call.getType()); 4661 } 4662 ++ResNo; 4663 break; 4664 case InlineAsm::isInput: 4665 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4666 break; 4667 case InlineAsm::isClobber: 4668 // Nothing to do. 4669 break; 4670 } 4671 4672 if (OpInfo.CallOperandVal) { 4673 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4674 if (OpInfo.isIndirect) { 4675 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 4676 if (!PtrTy) 4677 report_fatal_error("Indirect operand for inline asm not a pointer!"); 4678 OpTy = PtrTy->getElementType(); 4679 } 4680 4681 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4682 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4683 if (STy->getNumElements() == 1) 4684 OpTy = STy->getElementType(0); 4685 4686 // If OpTy is not a single value, it may be a struct/union that we 4687 // can tile with integers. 4688 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4689 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4690 switch (BitSize) { 4691 default: break; 4692 case 1: 4693 case 8: 4694 case 16: 4695 case 32: 4696 case 64: 4697 case 128: 4698 OpInfo.ConstraintVT = 4699 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4700 break; 4701 } 4702 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4703 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4704 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4705 } else { 4706 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4707 } 4708 } 4709 } 4710 4711 // If we have multiple alternative constraints, select the best alternative. 4712 if (!ConstraintOperands.empty()) { 4713 if (maCount) { 4714 unsigned bestMAIndex = 0; 4715 int bestWeight = -1; 4716 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4717 int weight = -1; 4718 unsigned maIndex; 4719 // Compute the sums of the weights for each alternative, keeping track 4720 // of the best (highest weight) one so far. 4721 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4722 int weightSum = 0; 4723 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4724 cIndex != eIndex; ++cIndex) { 4725 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4726 if (OpInfo.Type == InlineAsm::isClobber) 4727 continue; 4728 4729 // If this is an output operand with a matching input operand, 4730 // look up the matching input. If their types mismatch, e.g. one 4731 // is an integer, the other is floating point, or their sizes are 4732 // different, flag it as an maCantMatch. 4733 if (OpInfo.hasMatchingInput()) { 4734 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4735 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4736 if ((OpInfo.ConstraintVT.isInteger() != 4737 Input.ConstraintVT.isInteger()) || 4738 (OpInfo.ConstraintVT.getSizeInBits() != 4739 Input.ConstraintVT.getSizeInBits())) { 4740 weightSum = -1; // Can't match. 4741 break; 4742 } 4743 } 4744 } 4745 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4746 if (weight == -1) { 4747 weightSum = -1; 4748 break; 4749 } 4750 weightSum += weight; 4751 } 4752 // Update best. 4753 if (weightSum > bestWeight) { 4754 bestWeight = weightSum; 4755 bestMAIndex = maIndex; 4756 } 4757 } 4758 4759 // Now select chosen alternative in each constraint. 4760 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4761 cIndex != eIndex; ++cIndex) { 4762 AsmOperandInfo &cInfo = ConstraintOperands[cIndex]; 4763 if (cInfo.Type == InlineAsm::isClobber) 4764 continue; 4765 cInfo.selectAlternative(bestMAIndex); 4766 } 4767 } 4768 } 4769 4770 // Check and hook up tied operands, choose constraint code to use. 4771 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4772 cIndex != eIndex; ++cIndex) { 4773 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4774 4775 // If this is an output operand with a matching input operand, look up the 4776 // matching input. If their types mismatch, e.g. one is an integer, the 4777 // other is floating point, or their sizes are different, flag it as an 4778 // error. 4779 if (OpInfo.hasMatchingInput()) { 4780 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4781 4782 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4783 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4784 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4785 OpInfo.ConstraintVT); 4786 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4787 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4788 Input.ConstraintVT); 4789 if ((OpInfo.ConstraintVT.isInteger() != 4790 Input.ConstraintVT.isInteger()) || 4791 (MatchRC.second != InputRC.second)) { 4792 report_fatal_error("Unsupported asm: input constraint" 4793 " with a matching output constraint of" 4794 " incompatible type!"); 4795 } 4796 } 4797 } 4798 } 4799 4800 return ConstraintOperands; 4801 } 4802 4803 /// Return an integer indicating how general CT is. 4804 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4805 switch (CT) { 4806 case TargetLowering::C_Immediate: 4807 case TargetLowering::C_Other: 4808 case TargetLowering::C_Unknown: 4809 return 0; 4810 case TargetLowering::C_Register: 4811 return 1; 4812 case TargetLowering::C_RegisterClass: 4813 return 2; 4814 case TargetLowering::C_Memory: 4815 return 3; 4816 } 4817 llvm_unreachable("Invalid constraint type"); 4818 } 4819 4820 /// Examine constraint type and operand type and determine a weight value. 4821 /// This object must already have been set up with the operand type 4822 /// and the current alternative constraint selected. 4823 TargetLowering::ConstraintWeight 4824 TargetLowering::getMultipleConstraintMatchWeight( 4825 AsmOperandInfo &info, int maIndex) const { 4826 InlineAsm::ConstraintCodeVector *rCodes; 4827 if (maIndex >= (int)info.multipleAlternatives.size()) 4828 rCodes = &info.Codes; 4829 else 4830 rCodes = &info.multipleAlternatives[maIndex].Codes; 4831 ConstraintWeight BestWeight = CW_Invalid; 4832 4833 // Loop over the options, keeping track of the most general one. 4834 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 4835 ConstraintWeight weight = 4836 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 4837 if (weight > BestWeight) 4838 BestWeight = weight; 4839 } 4840 4841 return BestWeight; 4842 } 4843 4844 /// Examine constraint type and operand type and determine a weight value. 4845 /// This object must already have been set up with the operand type 4846 /// and the current alternative constraint selected. 4847 TargetLowering::ConstraintWeight 4848 TargetLowering::getSingleConstraintMatchWeight( 4849 AsmOperandInfo &info, const char *constraint) const { 4850 ConstraintWeight weight = CW_Invalid; 4851 Value *CallOperandVal = info.CallOperandVal; 4852 // If we don't have a value, we can't do a match, 4853 // but allow it at the lowest weight. 4854 if (!CallOperandVal) 4855 return CW_Default; 4856 // Look at the constraint type. 4857 switch (*constraint) { 4858 case 'i': // immediate integer. 4859 case 'n': // immediate integer with a known value. 4860 if (isa<ConstantInt>(CallOperandVal)) 4861 weight = CW_Constant; 4862 break; 4863 case 's': // non-explicit intregal immediate. 4864 if (isa<GlobalValue>(CallOperandVal)) 4865 weight = CW_Constant; 4866 break; 4867 case 'E': // immediate float if host format. 4868 case 'F': // immediate float. 4869 if (isa<ConstantFP>(CallOperandVal)) 4870 weight = CW_Constant; 4871 break; 4872 case '<': // memory operand with autodecrement. 4873 case '>': // memory operand with autoincrement. 4874 case 'm': // memory operand. 4875 case 'o': // offsettable memory operand 4876 case 'V': // non-offsettable memory operand 4877 weight = CW_Memory; 4878 break; 4879 case 'r': // general register. 4880 case 'g': // general register, memory operand or immediate integer. 4881 // note: Clang converts "g" to "imr". 4882 if (CallOperandVal->getType()->isIntegerTy()) 4883 weight = CW_Register; 4884 break; 4885 case 'X': // any operand. 4886 default: 4887 weight = CW_Default; 4888 break; 4889 } 4890 return weight; 4891 } 4892 4893 /// If there are multiple different constraints that we could pick for this 4894 /// operand (e.g. "imr") try to pick the 'best' one. 4895 /// This is somewhat tricky: constraints fall into four classes: 4896 /// Other -> immediates and magic values 4897 /// Register -> one specific register 4898 /// RegisterClass -> a group of regs 4899 /// Memory -> memory 4900 /// Ideally, we would pick the most specific constraint possible: if we have 4901 /// something that fits into a register, we would pick it. The problem here 4902 /// is that if we have something that could either be in a register or in 4903 /// memory that use of the register could cause selection of *other* 4904 /// operands to fail: they might only succeed if we pick memory. Because of 4905 /// this the heuristic we use is: 4906 /// 4907 /// 1) If there is an 'other' constraint, and if the operand is valid for 4908 /// that constraint, use it. This makes us take advantage of 'i' 4909 /// constraints when available. 4910 /// 2) Otherwise, pick the most general constraint present. This prefers 4911 /// 'm' over 'r', for example. 4912 /// 4913 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 4914 const TargetLowering &TLI, 4915 SDValue Op, SelectionDAG *DAG) { 4916 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 4917 unsigned BestIdx = 0; 4918 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 4919 int BestGenerality = -1; 4920 4921 // Loop over the options, keeping track of the most general one. 4922 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 4923 TargetLowering::ConstraintType CType = 4924 TLI.getConstraintType(OpInfo.Codes[i]); 4925 4926 // Indirect 'other' or 'immediate' constraints are not allowed. 4927 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 4928 CType == TargetLowering::C_Register || 4929 CType == TargetLowering::C_RegisterClass)) 4930 continue; 4931 4932 // If this is an 'other' or 'immediate' constraint, see if the operand is 4933 // valid for it. For example, on X86 we might have an 'rI' constraint. If 4934 // the operand is an integer in the range [0..31] we want to use I (saving a 4935 // load of a register), otherwise we must use 'r'. 4936 if ((CType == TargetLowering::C_Other || 4937 CType == TargetLowering::C_Immediate) && Op.getNode()) { 4938 assert(OpInfo.Codes[i].size() == 1 && 4939 "Unhandled multi-letter 'other' constraint"); 4940 std::vector<SDValue> ResultOps; 4941 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 4942 ResultOps, *DAG); 4943 if (!ResultOps.empty()) { 4944 BestType = CType; 4945 BestIdx = i; 4946 break; 4947 } 4948 } 4949 4950 // Things with matching constraints can only be registers, per gcc 4951 // documentation. This mainly affects "g" constraints. 4952 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 4953 continue; 4954 4955 // This constraint letter is more general than the previous one, use it. 4956 int Generality = getConstraintGenerality(CType); 4957 if (Generality > BestGenerality) { 4958 BestType = CType; 4959 BestIdx = i; 4960 BestGenerality = Generality; 4961 } 4962 } 4963 4964 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 4965 OpInfo.ConstraintType = BestType; 4966 } 4967 4968 /// Determines the constraint code and constraint type to use for the specific 4969 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4970 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4971 SDValue Op, 4972 SelectionDAG *DAG) const { 4973 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 4974 4975 // Single-letter constraints ('r') are very common. 4976 if (OpInfo.Codes.size() == 1) { 4977 OpInfo.ConstraintCode = OpInfo.Codes[0]; 4978 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4979 } else { 4980 ChooseConstraint(OpInfo, *this, Op, DAG); 4981 } 4982 4983 // 'X' matches anything. 4984 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 4985 // Labels and constants are handled elsewhere ('X' is the only thing 4986 // that matches labels). For Functions, the type here is the type of 4987 // the result, which is not what we want to look at; leave them alone. 4988 Value *v = OpInfo.CallOperandVal; 4989 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 4990 OpInfo.CallOperandVal = v; 4991 return; 4992 } 4993 4994 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress) 4995 return; 4996 4997 // Otherwise, try to resolve it to something we know about by looking at 4998 // the actual operand type. 4999 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5000 OpInfo.ConstraintCode = Repl; 5001 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5002 } 5003 } 5004 } 5005 5006 /// Given an exact SDIV by a constant, create a multiplication 5007 /// with the multiplicative inverse of the constant. 5008 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5009 const SDLoc &dl, SelectionDAG &DAG, 5010 SmallVectorImpl<SDNode *> &Created) { 5011 SDValue Op0 = N->getOperand(0); 5012 SDValue Op1 = N->getOperand(1); 5013 EVT VT = N->getValueType(0); 5014 EVT SVT = VT.getScalarType(); 5015 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5016 EVT ShSVT = ShVT.getScalarType(); 5017 5018 bool UseSRA = false; 5019 SmallVector<SDValue, 16> Shifts, Factors; 5020 5021 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5022 if (C->isNullValue()) 5023 return false; 5024 APInt Divisor = C->getAPIntValue(); 5025 unsigned Shift = Divisor.countTrailingZeros(); 5026 if (Shift) { 5027 Divisor.ashrInPlace(Shift); 5028 UseSRA = true; 5029 } 5030 // Calculate the multiplicative inverse, using Newton's method. 5031 APInt t; 5032 APInt Factor = Divisor; 5033 while ((t = Divisor * Factor) != 1) 5034 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5035 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5036 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5037 return true; 5038 }; 5039 5040 // Collect all magic values from the build vector. 5041 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5042 return SDValue(); 5043 5044 SDValue Shift, Factor; 5045 if (VT.isVector()) { 5046 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5047 Factor = DAG.getBuildVector(VT, dl, Factors); 5048 } else { 5049 Shift = Shifts[0]; 5050 Factor = Factors[0]; 5051 } 5052 5053 SDValue Res = Op0; 5054 5055 // Shift the value upfront if it is even, so the LSB is one. 5056 if (UseSRA) { 5057 // TODO: For UDIV use SRL instead of SRA. 5058 SDNodeFlags Flags; 5059 Flags.setExact(true); 5060 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5061 Created.push_back(Res.getNode()); 5062 } 5063 5064 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5065 } 5066 5067 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5068 SelectionDAG &DAG, 5069 SmallVectorImpl<SDNode *> &Created) const { 5070 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5071 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5072 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5073 return SDValue(N, 0); // Lower SDIV as SDIV 5074 return SDValue(); 5075 } 5076 5077 /// Given an ISD::SDIV node expressing a divide by constant, 5078 /// return a DAG expression to select that will generate the same value by 5079 /// multiplying by a magic number. 5080 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5081 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5082 bool IsAfterLegalization, 5083 SmallVectorImpl<SDNode *> &Created) const { 5084 SDLoc dl(N); 5085 EVT VT = N->getValueType(0); 5086 EVT SVT = VT.getScalarType(); 5087 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5088 EVT ShSVT = ShVT.getScalarType(); 5089 unsigned EltBits = VT.getScalarSizeInBits(); 5090 5091 // Check to see if we can do this. 5092 // FIXME: We should be more aggressive here. 5093 if (!isTypeLegal(VT)) 5094 return SDValue(); 5095 5096 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5097 if (N->getFlags().hasExact()) 5098 return BuildExactSDIV(*this, N, dl, DAG, Created); 5099 5100 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5101 5102 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5103 if (C->isNullValue()) 5104 return false; 5105 5106 const APInt &Divisor = C->getAPIntValue(); 5107 APInt::ms magics = Divisor.magic(); 5108 int NumeratorFactor = 0; 5109 int ShiftMask = -1; 5110 5111 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) { 5112 // If d is +1/-1, we just multiply the numerator by +1/-1. 5113 NumeratorFactor = Divisor.getSExtValue(); 5114 magics.m = 0; 5115 magics.s = 0; 5116 ShiftMask = 0; 5117 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { 5118 // If d > 0 and m < 0, add the numerator. 5119 NumeratorFactor = 1; 5120 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { 5121 // If d < 0 and m > 0, subtract the numerator. 5122 NumeratorFactor = -1; 5123 } 5124 5125 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); 5126 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5127 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); 5128 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5129 return true; 5130 }; 5131 5132 SDValue N0 = N->getOperand(0); 5133 SDValue N1 = N->getOperand(1); 5134 5135 // Collect the shifts / magic values from each element. 5136 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5137 return SDValue(); 5138 5139 SDValue MagicFactor, Factor, Shift, ShiftMask; 5140 if (VT.isVector()) { 5141 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5142 Factor = DAG.getBuildVector(VT, dl, Factors); 5143 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5144 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5145 } else { 5146 MagicFactor = MagicFactors[0]; 5147 Factor = Factors[0]; 5148 Shift = Shifts[0]; 5149 ShiftMask = ShiftMasks[0]; 5150 } 5151 5152 // Multiply the numerator (operand 0) by the magic value. 5153 // FIXME: We should support doing a MUL in a wider type. 5154 SDValue Q; 5155 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) 5156 : isOperationLegalOrCustom(ISD::MULHS, VT)) 5157 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); 5158 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) 5159 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { 5160 SDValue LoHi = 5161 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); 5162 Q = SDValue(LoHi.getNode(), 1); 5163 } else 5164 return SDValue(); // No mulhs or equivalent. 5165 Created.push_back(Q.getNode()); 5166 5167 // (Optionally) Add/subtract the numerator using Factor. 5168 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5169 Created.push_back(Factor.getNode()); 5170 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5171 Created.push_back(Q.getNode()); 5172 5173 // Shift right algebraic by shift value. 5174 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5175 Created.push_back(Q.getNode()); 5176 5177 // Extract the sign bit, mask it and add it to the quotient. 5178 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5179 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5180 Created.push_back(T.getNode()); 5181 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5182 Created.push_back(T.getNode()); 5183 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5184 } 5185 5186 /// Given an ISD::UDIV node expressing a divide by constant, 5187 /// return a DAG expression to select that will generate the same value by 5188 /// multiplying by a magic number. 5189 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5190 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5191 bool IsAfterLegalization, 5192 SmallVectorImpl<SDNode *> &Created) const { 5193 SDLoc dl(N); 5194 EVT VT = N->getValueType(0); 5195 EVT SVT = VT.getScalarType(); 5196 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5197 EVT ShSVT = ShVT.getScalarType(); 5198 unsigned EltBits = VT.getScalarSizeInBits(); 5199 5200 // Check to see if we can do this. 5201 // FIXME: We should be more aggressive here. 5202 if (!isTypeLegal(VT)) 5203 return SDValue(); 5204 5205 bool UseNPQ = false; 5206 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5207 5208 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5209 if (C->isNullValue()) 5210 return false; 5211 // FIXME: We should use a narrower constant when the upper 5212 // bits are known to be zero. 5213 APInt Divisor = C->getAPIntValue(); 5214 APInt::mu magics = Divisor.magicu(); 5215 unsigned PreShift = 0, PostShift = 0; 5216 5217 // If the divisor is even, we can avoid using the expensive fixup by 5218 // shifting the divided value upfront. 5219 if (magics.a != 0 && !Divisor[0]) { 5220 PreShift = Divisor.countTrailingZeros(); 5221 // Get magic number for the shifted divisor. 5222 magics = Divisor.lshr(PreShift).magicu(PreShift); 5223 assert(magics.a == 0 && "Should use cheap fixup now"); 5224 } 5225 5226 APInt Magic = magics.m; 5227 5228 unsigned SelNPQ; 5229 if (magics.a == 0 || Divisor.isOneValue()) { 5230 assert(magics.s < Divisor.getBitWidth() && 5231 "We shouldn't generate an undefined shift!"); 5232 PostShift = magics.s; 5233 SelNPQ = false; 5234 } else { 5235 PostShift = magics.s - 1; 5236 SelNPQ = true; 5237 } 5238 5239 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5240 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5241 NPQFactors.push_back( 5242 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5243 : APInt::getNullValue(EltBits), 5244 dl, SVT)); 5245 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5246 UseNPQ |= SelNPQ; 5247 return true; 5248 }; 5249 5250 SDValue N0 = N->getOperand(0); 5251 SDValue N1 = N->getOperand(1); 5252 5253 // Collect the shifts/magic values from each element. 5254 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5255 return SDValue(); 5256 5257 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5258 if (VT.isVector()) { 5259 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5260 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5261 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5262 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5263 } else { 5264 PreShift = PreShifts[0]; 5265 MagicFactor = MagicFactors[0]; 5266 PostShift = PostShifts[0]; 5267 } 5268 5269 SDValue Q = N0; 5270 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5271 Created.push_back(Q.getNode()); 5272 5273 // FIXME: We should support doing a MUL in a wider type. 5274 auto GetMULHU = [&](SDValue X, SDValue Y) { 5275 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) 5276 : isOperationLegalOrCustom(ISD::MULHU, VT)) 5277 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5278 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) 5279 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) { 5280 SDValue LoHi = 5281 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5282 return SDValue(LoHi.getNode(), 1); 5283 } 5284 return SDValue(); // No mulhu or equivalent 5285 }; 5286 5287 // Multiply the numerator (operand 0) by the magic value. 5288 Q = GetMULHU(Q, MagicFactor); 5289 if (!Q) 5290 return SDValue(); 5291 5292 Created.push_back(Q.getNode()); 5293 5294 if (UseNPQ) { 5295 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5296 Created.push_back(NPQ.getNode()); 5297 5298 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5299 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5300 if (VT.isVector()) 5301 NPQ = GetMULHU(NPQ, NPQFactor); 5302 else 5303 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5304 5305 Created.push_back(NPQ.getNode()); 5306 5307 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5308 Created.push_back(Q.getNode()); 5309 } 5310 5311 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5312 Created.push_back(Q.getNode()); 5313 5314 SDValue One = DAG.getConstant(1, dl, VT); 5315 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ); 5316 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5317 } 5318 5319 /// If all values in Values that *don't* match the predicate are same 'splat' 5320 /// value, then replace all values with that splat value. 5321 /// Else, if AlternativeReplacement was provided, then replace all values that 5322 /// do match predicate with AlternativeReplacement value. 5323 static void 5324 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5325 std::function<bool(SDValue)> Predicate, 5326 SDValue AlternativeReplacement = SDValue()) { 5327 SDValue Replacement; 5328 // Is there a value for which the Predicate does *NOT* match? What is it? 5329 auto SplatValue = llvm::find_if_not(Values, Predicate); 5330 if (SplatValue != Values.end()) { 5331 // Does Values consist only of SplatValue's and values matching Predicate? 5332 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5333 return Value == *SplatValue || Predicate(Value); 5334 })) // Then we shall replace values matching predicate with SplatValue. 5335 Replacement = *SplatValue; 5336 } 5337 if (!Replacement) { 5338 // Oops, we did not find the "baseline" splat value. 5339 if (!AlternativeReplacement) 5340 return; // Nothing to do. 5341 // Let's replace with provided value then. 5342 Replacement = AlternativeReplacement; 5343 } 5344 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5345 } 5346 5347 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5348 /// where the divisor is constant and the comparison target is zero, 5349 /// return a DAG expression that will generate the same comparison result 5350 /// using only multiplications, additions and shifts/rotations. 5351 /// Ref: "Hacker's Delight" 10-17. 5352 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5353 SDValue CompTargetNode, 5354 ISD::CondCode Cond, 5355 DAGCombinerInfo &DCI, 5356 const SDLoc &DL) const { 5357 SmallVector<SDNode *, 5> Built; 5358 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5359 DCI, DL, Built)) { 5360 for (SDNode *N : Built) 5361 DCI.AddToWorklist(N); 5362 return Folded; 5363 } 5364 5365 return SDValue(); 5366 } 5367 5368 SDValue 5369 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5370 SDValue CompTargetNode, ISD::CondCode Cond, 5371 DAGCombinerInfo &DCI, const SDLoc &DL, 5372 SmallVectorImpl<SDNode *> &Created) const { 5373 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5374 // - D must be constant, with D = D0 * 2^K where D0 is odd 5375 // - P is the multiplicative inverse of D0 modulo 2^W 5376 // - Q = floor(((2^W) - 1) / D) 5377 // where W is the width of the common type of N and D. 5378 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5379 "Only applicable for (in)equality comparisons."); 5380 5381 SelectionDAG &DAG = DCI.DAG; 5382 5383 EVT VT = REMNode.getValueType(); 5384 EVT SVT = VT.getScalarType(); 5385 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5386 EVT ShSVT = ShVT.getScalarType(); 5387 5388 // If MUL is unavailable, we cannot proceed in any case. 5389 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5390 return SDValue(); 5391 5392 bool ComparingWithAllZeros = true; 5393 bool AllComparisonsWithNonZerosAreTautological = true; 5394 bool HadTautologicalLanes = false; 5395 bool AllLanesAreTautological = true; 5396 bool HadEvenDivisor = false; 5397 bool AllDivisorsArePowerOfTwo = true; 5398 bool HadTautologicalInvertedLanes = false; 5399 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5400 5401 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5402 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5403 if (CDiv->isNullValue()) 5404 return false; 5405 5406 const APInt &D = CDiv->getAPIntValue(); 5407 const APInt &Cmp = CCmp->getAPIntValue(); 5408 5409 ComparingWithAllZeros &= Cmp.isNullValue(); 5410 5411 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5412 // if C2 is not less than C1, the comparison is always false. 5413 // But we will only be able to produce the comparison that will give the 5414 // opposive tautological answer. So this lane would need to be fixed up. 5415 bool TautologicalInvertedLane = D.ule(Cmp); 5416 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5417 5418 // If all lanes are tautological (either all divisors are ones, or divisor 5419 // is not greater than the constant we are comparing with), 5420 // we will prefer to avoid the fold. 5421 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; 5422 HadTautologicalLanes |= TautologicalLane; 5423 AllLanesAreTautological &= TautologicalLane; 5424 5425 // If we are comparing with non-zero, we need'll need to subtract said 5426 // comparison value from the LHS. But there is no point in doing that if 5427 // every lane where we are comparing with non-zero is tautological.. 5428 if (!Cmp.isNullValue()) 5429 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5430 5431 // Decompose D into D0 * 2^K 5432 unsigned K = D.countTrailingZeros(); 5433 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5434 APInt D0 = D.lshr(K); 5435 5436 // D is even if it has trailing zeros. 5437 HadEvenDivisor |= (K != 0); 5438 // D is a power-of-two if D0 is one. 5439 // If all divisors are power-of-two, we will prefer to avoid the fold. 5440 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5441 5442 // P = inv(D0, 2^W) 5443 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5444 unsigned W = D.getBitWidth(); 5445 APInt P = D0.zext(W + 1) 5446 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5447 .trunc(W); 5448 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5449 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5450 5451 // Q = floor((2^W - 1) u/ D) 5452 // R = ((2^W - 1) u% D) 5453 APInt Q, R; 5454 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R); 5455 5456 // If we are comparing with zero, then that comparison constant is okay, 5457 // else it may need to be one less than that. 5458 if (Cmp.ugt(R)) 5459 Q -= 1; 5460 5461 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5462 "We are expecting that K is always less than all-ones for ShSVT"); 5463 5464 // If the lane is tautological the result can be constant-folded. 5465 if (TautologicalLane) { 5466 // Set P and K amount to a bogus values so we can try to splat them. 5467 P = 0; 5468 K = -1; 5469 // And ensure that comparison constant is tautological, 5470 // it will always compare true/false. 5471 Q = -1; 5472 } 5473 5474 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5475 KAmts.push_back( 5476 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5477 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5478 return true; 5479 }; 5480 5481 SDValue N = REMNode.getOperand(0); 5482 SDValue D = REMNode.getOperand(1); 5483 5484 // Collect the values from each element. 5485 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5486 return SDValue(); 5487 5488 // If all lanes are tautological, the result can be constant-folded. 5489 if (AllLanesAreTautological) 5490 return SDValue(); 5491 5492 // If this is a urem by a powers-of-two, avoid the fold since it can be 5493 // best implemented as a bit test. 5494 if (AllDivisorsArePowerOfTwo) 5495 return SDValue(); 5496 5497 SDValue PVal, KVal, QVal; 5498 if (VT.isVector()) { 5499 if (HadTautologicalLanes) { 5500 // Try to turn PAmts into a splat, since we don't care about the values 5501 // that are currently '0'. If we can't, just keep '0'`s. 5502 turnVectorIntoSplatVector(PAmts, isNullConstant); 5503 // Try to turn KAmts into a splat, since we don't care about the values 5504 // that are currently '-1'. If we can't, change them to '0'`s. 5505 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5506 DAG.getConstant(0, DL, ShSVT)); 5507 } 5508 5509 PVal = DAG.getBuildVector(VT, DL, PAmts); 5510 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5511 QVal = DAG.getBuildVector(VT, DL, QAmts); 5512 } else { 5513 PVal = PAmts[0]; 5514 KVal = KAmts[0]; 5515 QVal = QAmts[0]; 5516 } 5517 5518 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5519 if (!isOperationLegalOrCustom(ISD::SUB, VT)) 5520 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5521 assert(CompTargetNode.getValueType() == N.getValueType() && 5522 "Expecting that the types on LHS and RHS of comparisons match."); 5523 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5524 } 5525 5526 // (mul N, P) 5527 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5528 Created.push_back(Op0.getNode()); 5529 5530 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5531 // divisors as a performance improvement, since rotating by 0 is a no-op. 5532 if (HadEvenDivisor) { 5533 // We need ROTR to do this. 5534 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5535 return SDValue(); 5536 SDNodeFlags Flags; 5537 Flags.setExact(true); 5538 // UREM: (rotr (mul N, P), K) 5539 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5540 Created.push_back(Op0.getNode()); 5541 } 5542 5543 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5544 SDValue NewCC = 5545 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5546 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5547 if (!HadTautologicalInvertedLanes) 5548 return NewCC; 5549 5550 // If any lanes previously compared always-false, the NewCC will give 5551 // always-true result for them, so we need to fixup those lanes. 5552 // Or the other way around for inequality predicate. 5553 assert(VT.isVector() && "Can/should only get here for vectors."); 5554 Created.push_back(NewCC.getNode()); 5555 5556 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5557 // if C2 is not less than C1, the comparison is always false. 5558 // But we have produced the comparison that will give the 5559 // opposive tautological answer. So these lanes would need to be fixed up. 5560 SDValue TautologicalInvertedChannels = 5561 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5562 Created.push_back(TautologicalInvertedChannels.getNode()); 5563 5564 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5565 // If we have a vector select, let's replace the comparison results in the 5566 // affected lanes with the correct tautological result. 5567 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5568 DL, SETCCVT, SETCCVT); 5569 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5570 Replacement, NewCC); 5571 } 5572 5573 // Else, we can just invert the comparison result in the appropriate lanes. 5574 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5575 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5576 TautologicalInvertedChannels); 5577 5578 return SDValue(); // Don't know how to lower. 5579 } 5580 5581 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5582 /// where the divisor is constant and the comparison target is zero, 5583 /// return a DAG expression that will generate the same comparison result 5584 /// using only multiplications, additions and shifts/rotations. 5585 /// Ref: "Hacker's Delight" 10-17. 5586 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5587 SDValue CompTargetNode, 5588 ISD::CondCode Cond, 5589 DAGCombinerInfo &DCI, 5590 const SDLoc &DL) const { 5591 SmallVector<SDNode *, 7> Built; 5592 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5593 DCI, DL, Built)) { 5594 assert(Built.size() <= 7 && "Max size prediction failed."); 5595 for (SDNode *N : Built) 5596 DCI.AddToWorklist(N); 5597 return Folded; 5598 } 5599 5600 return SDValue(); 5601 } 5602 5603 SDValue 5604 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5605 SDValue CompTargetNode, ISD::CondCode Cond, 5606 DAGCombinerInfo &DCI, const SDLoc &DL, 5607 SmallVectorImpl<SDNode *> &Created) const { 5608 // Fold: 5609 // (seteq/ne (srem N, D), 0) 5610 // To: 5611 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5612 // 5613 // - D must be constant, with D = D0 * 2^K where D0 is odd 5614 // - P is the multiplicative inverse of D0 modulo 2^W 5615 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5616 // - Q = floor((2 * A) / (2^K)) 5617 // where W is the width of the common type of N and D. 5618 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5619 "Only applicable for (in)equality comparisons."); 5620 5621 SelectionDAG &DAG = DCI.DAG; 5622 5623 EVT VT = REMNode.getValueType(); 5624 EVT SVT = VT.getScalarType(); 5625 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5626 EVT ShSVT = ShVT.getScalarType(); 5627 5628 // If MUL is unavailable, we cannot proceed in any case. 5629 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5630 return SDValue(); 5631 5632 // TODO: Could support comparing with non-zero too. 5633 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5634 if (!CompTarget || !CompTarget->isNullValue()) 5635 return SDValue(); 5636 5637 bool HadIntMinDivisor = false; 5638 bool HadOneDivisor = false; 5639 bool AllDivisorsAreOnes = true; 5640 bool HadEvenDivisor = false; 5641 bool NeedToApplyOffset = false; 5642 bool AllDivisorsArePowerOfTwo = true; 5643 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5644 5645 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5646 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5647 if (C->isNullValue()) 5648 return false; 5649 5650 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5651 5652 // WARNING: this fold is only valid for positive divisors! 5653 APInt D = C->getAPIntValue(); 5654 if (D.isNegative()) 5655 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5656 5657 HadIntMinDivisor |= D.isMinSignedValue(); 5658 5659 // If all divisors are ones, we will prefer to avoid the fold. 5660 HadOneDivisor |= D.isOneValue(); 5661 AllDivisorsAreOnes &= D.isOneValue(); 5662 5663 // Decompose D into D0 * 2^K 5664 unsigned K = D.countTrailingZeros(); 5665 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5666 APInt D0 = D.lshr(K); 5667 5668 if (!D.isMinSignedValue()) { 5669 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5670 // we don't care about this lane in this fold, we'll special-handle it. 5671 HadEvenDivisor |= (K != 0); 5672 } 5673 5674 // D is a power-of-two if D0 is one. This includes INT_MIN. 5675 // If all divisors are power-of-two, we will prefer to avoid the fold. 5676 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5677 5678 // P = inv(D0, 2^W) 5679 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5680 unsigned W = D.getBitWidth(); 5681 APInt P = D0.zext(W + 1) 5682 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5683 .trunc(W); 5684 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5685 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5686 5687 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5688 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5689 A.clearLowBits(K); 5690 5691 if (!D.isMinSignedValue()) { 5692 // If divisor INT_MIN, then we don't care about this lane in this fold, 5693 // we'll special-handle it. 5694 NeedToApplyOffset |= A != 0; 5695 } 5696 5697 // Q = floor((2 * A) / (2^K)) 5698 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5699 5700 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) && 5701 "We are expecting that A is always less than all-ones for SVT"); 5702 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5703 "We are expecting that K is always less than all-ones for ShSVT"); 5704 5705 // If the divisor is 1 the result can be constant-folded. Likewise, we 5706 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5707 if (D.isOneValue()) { 5708 // Set P, A and K to a bogus values so we can try to splat them. 5709 P = 0; 5710 A = -1; 5711 K = -1; 5712 5713 // x ?% 1 == 0 <--> true <--> x u<= -1 5714 Q = -1; 5715 } 5716 5717 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5718 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5719 KAmts.push_back( 5720 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5721 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5722 return true; 5723 }; 5724 5725 SDValue N = REMNode.getOperand(0); 5726 SDValue D = REMNode.getOperand(1); 5727 5728 // Collect the values from each element. 5729 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5730 return SDValue(); 5731 5732 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5733 if (AllDivisorsAreOnes) 5734 return SDValue(); 5735 5736 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5737 // since it can be best implemented as a bit test. 5738 if (AllDivisorsArePowerOfTwo) 5739 return SDValue(); 5740 5741 SDValue PVal, AVal, KVal, QVal; 5742 if (VT.isVector()) { 5743 if (HadOneDivisor) { 5744 // Try to turn PAmts into a splat, since we don't care about the values 5745 // that are currently '0'. If we can't, just keep '0'`s. 5746 turnVectorIntoSplatVector(PAmts, isNullConstant); 5747 // Try to turn AAmts into a splat, since we don't care about the 5748 // values that are currently '-1'. If we can't, change them to '0'`s. 5749 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5750 DAG.getConstant(0, DL, SVT)); 5751 // Try to turn KAmts into a splat, since we don't care about the values 5752 // that are currently '-1'. If we can't, change them to '0'`s. 5753 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5754 DAG.getConstant(0, DL, ShSVT)); 5755 } 5756 5757 PVal = DAG.getBuildVector(VT, DL, PAmts); 5758 AVal = DAG.getBuildVector(VT, DL, AAmts); 5759 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5760 QVal = DAG.getBuildVector(VT, DL, QAmts); 5761 } else { 5762 PVal = PAmts[0]; 5763 AVal = AAmts[0]; 5764 KVal = KAmts[0]; 5765 QVal = QAmts[0]; 5766 } 5767 5768 // (mul N, P) 5769 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5770 Created.push_back(Op0.getNode()); 5771 5772 if (NeedToApplyOffset) { 5773 // We need ADD to do this. 5774 if (!isOperationLegalOrCustom(ISD::ADD, VT)) 5775 return SDValue(); 5776 5777 // (add (mul N, P), A) 5778 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 5779 Created.push_back(Op0.getNode()); 5780 } 5781 5782 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5783 // divisors as a performance improvement, since rotating by 0 is a no-op. 5784 if (HadEvenDivisor) { 5785 // We need ROTR to do this. 5786 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5787 return SDValue(); 5788 SDNodeFlags Flags; 5789 Flags.setExact(true); 5790 // SREM: (rotr (add (mul N, P), A), K) 5791 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5792 Created.push_back(Op0.getNode()); 5793 } 5794 5795 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 5796 SDValue Fold = 5797 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5798 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5799 5800 // If we didn't have lanes with INT_MIN divisor, then we're done. 5801 if (!HadIntMinDivisor) 5802 return Fold; 5803 5804 // That fold is only valid for positive divisors. Which effectively means, 5805 // it is invalid for INT_MIN divisors. So if we have such a lane, 5806 // we must fix-up results for said lanes. 5807 assert(VT.isVector() && "Can/should only get here for vectors."); 5808 5809 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 5810 !isOperationLegalOrCustom(ISD::AND, VT) || 5811 !isOperationLegalOrCustom(Cond, VT) || 5812 !isOperationLegalOrCustom(ISD::VSELECT, VT)) 5813 return SDValue(); 5814 5815 Created.push_back(Fold.getNode()); 5816 5817 SDValue IntMin = DAG.getConstant( 5818 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 5819 SDValue IntMax = DAG.getConstant( 5820 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 5821 SDValue Zero = 5822 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT); 5823 5824 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 5825 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 5826 Created.push_back(DivisorIsIntMin.getNode()); 5827 5828 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 5829 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 5830 Created.push_back(Masked.getNode()); 5831 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 5832 Created.push_back(MaskedIsZero.getNode()); 5833 5834 // To produce final result we need to blend 2 vectors: 'SetCC' and 5835 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 5836 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 5837 // constant-folded, select can get lowered to a shuffle with constant mask. 5838 SDValue Blended = 5839 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold); 5840 5841 return Blended; 5842 } 5843 5844 bool TargetLowering:: 5845 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 5846 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 5847 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 5848 "be a constant integer"); 5849 return true; 5850 } 5851 5852 return false; 5853 } 5854 5855 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 5856 bool LegalOps, bool OptForSize, 5857 NegatibleCost &Cost, 5858 unsigned Depth) const { 5859 // fneg is removable even if it has multiple uses. 5860 if (Op.getOpcode() == ISD::FNEG) { 5861 Cost = NegatibleCost::Cheaper; 5862 return Op.getOperand(0); 5863 } 5864 5865 // Don't recurse exponentially. 5866 if (Depth > SelectionDAG::MaxRecursionDepth) 5867 return SDValue(); 5868 5869 // Pre-increment recursion depth for use in recursive calls. 5870 ++Depth; 5871 const SDNodeFlags Flags = Op->getFlags(); 5872 const TargetOptions &Options = DAG.getTarget().Options; 5873 EVT VT = Op.getValueType(); 5874 unsigned Opcode = Op.getOpcode(); 5875 5876 // Don't allow anything with multiple uses unless we know it is free. 5877 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 5878 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 5879 isFPExtFree(VT, Op.getOperand(0).getValueType()); 5880 if (!IsFreeExtend) 5881 return SDValue(); 5882 } 5883 5884 auto RemoveDeadNode = [&](SDValue N) { 5885 if (N && N.getNode()->use_empty()) 5886 DAG.RemoveDeadNode(N.getNode()); 5887 }; 5888 5889 SDLoc DL(Op); 5890 5891 switch (Opcode) { 5892 case ISD::ConstantFP: { 5893 // Don't invert constant FP values after legalization unless the target says 5894 // the negated constant is legal. 5895 bool IsOpLegal = 5896 isOperationLegal(ISD::ConstantFP, VT) || 5897 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 5898 OptForSize); 5899 5900 if (LegalOps && !IsOpLegal) 5901 break; 5902 5903 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5904 V.changeSign(); 5905 SDValue CFP = DAG.getConstantFP(V, DL, VT); 5906 5907 // If we already have the use of the negated floating constant, it is free 5908 // to negate it even it has multiple uses. 5909 if (!Op.hasOneUse() && CFP.use_empty()) 5910 break; 5911 Cost = NegatibleCost::Neutral; 5912 return CFP; 5913 } 5914 case ISD::BUILD_VECTOR: { 5915 // Only permit BUILD_VECTOR of constants. 5916 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 5917 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 5918 })) 5919 break; 5920 5921 bool IsOpLegal = 5922 (isOperationLegal(ISD::ConstantFP, VT) && 5923 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 5924 llvm::all_of(Op->op_values(), [&](SDValue N) { 5925 return N.isUndef() || 5926 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 5927 OptForSize); 5928 }); 5929 5930 if (LegalOps && !IsOpLegal) 5931 break; 5932 5933 SmallVector<SDValue, 4> Ops; 5934 for (SDValue C : Op->op_values()) { 5935 if (C.isUndef()) { 5936 Ops.push_back(C); 5937 continue; 5938 } 5939 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 5940 V.changeSign(); 5941 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 5942 } 5943 Cost = NegatibleCost::Neutral; 5944 return DAG.getBuildVector(VT, DL, Ops); 5945 } 5946 case ISD::FADD: { 5947 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5948 break; 5949 5950 // After operation legalization, it might not be legal to create new FSUBs. 5951 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 5952 break; 5953 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5954 5955 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 5956 NegatibleCost CostX = NegatibleCost::Expensive; 5957 SDValue NegX = 5958 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5959 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 5960 NegatibleCost CostY = NegatibleCost::Expensive; 5961 SDValue NegY = 5962 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5963 5964 // Negate the X if its cost is less or equal than Y. 5965 if (NegX && (CostX <= CostY)) { 5966 Cost = CostX; 5967 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 5968 if (NegY != N) 5969 RemoveDeadNode(NegY); 5970 return N; 5971 } 5972 5973 // Negate the Y if it is not expensive. 5974 if (NegY) { 5975 Cost = CostY; 5976 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 5977 if (NegX != N) 5978 RemoveDeadNode(NegX); 5979 return N; 5980 } 5981 break; 5982 } 5983 case ISD::FSUB: { 5984 // We can't turn -(A-B) into B-A when we honor signed zeros. 5985 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5986 break; 5987 5988 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5989 // fold (fneg (fsub 0, Y)) -> Y 5990 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 5991 if (C->isZero()) { 5992 Cost = NegatibleCost::Cheaper; 5993 return Y; 5994 } 5995 5996 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 5997 Cost = NegatibleCost::Neutral; 5998 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 5999 } 6000 case ISD::FMUL: 6001 case ISD::FDIV: { 6002 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6003 6004 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 6005 NegatibleCost CostX = NegatibleCost::Expensive; 6006 SDValue NegX = 6007 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6008 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 6009 NegatibleCost CostY = NegatibleCost::Expensive; 6010 SDValue NegY = 6011 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6012 6013 // Negate the X if its cost is less or equal than Y. 6014 if (NegX && (CostX <= CostY)) { 6015 Cost = CostX; 6016 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 6017 if (NegY != N) 6018 RemoveDeadNode(NegY); 6019 return N; 6020 } 6021 6022 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 6023 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 6024 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 6025 break; 6026 6027 // Negate the Y if it is not expensive. 6028 if (NegY) { 6029 Cost = CostY; 6030 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 6031 if (NegX != N) 6032 RemoveDeadNode(NegX); 6033 return N; 6034 } 6035 break; 6036 } 6037 case ISD::FMA: 6038 case ISD::FMAD: { 6039 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6040 break; 6041 6042 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 6043 NegatibleCost CostZ = NegatibleCost::Expensive; 6044 SDValue NegZ = 6045 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 6046 // Give up if fail to negate the Z. 6047 if (!NegZ) 6048 break; 6049 6050 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 6051 NegatibleCost CostX = NegatibleCost::Expensive; 6052 SDValue NegX = 6053 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6054 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 6055 NegatibleCost CostY = NegatibleCost::Expensive; 6056 SDValue NegY = 6057 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6058 6059 // Negate the X if its cost is less or equal than Y. 6060 if (NegX && (CostX <= CostY)) { 6061 Cost = std::min(CostX, CostZ); 6062 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 6063 if (NegY != N) 6064 RemoveDeadNode(NegY); 6065 return N; 6066 } 6067 6068 // Negate the Y if it is not expensive. 6069 if (NegY) { 6070 Cost = std::min(CostY, CostZ); 6071 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 6072 if (NegX != N) 6073 RemoveDeadNode(NegX); 6074 return N; 6075 } 6076 break; 6077 } 6078 6079 case ISD::FP_EXTEND: 6080 case ISD::FSIN: 6081 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6082 OptForSize, Cost, Depth)) 6083 return DAG.getNode(Opcode, DL, VT, NegV); 6084 break; 6085 case ISD::FP_ROUND: 6086 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6087 OptForSize, Cost, Depth)) 6088 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6089 break; 6090 } 6091 6092 return SDValue(); 6093 } 6094 6095 //===----------------------------------------------------------------------===// 6096 // Legalization Utilities 6097 //===----------------------------------------------------------------------===// 6098 6099 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6100 SDValue LHS, SDValue RHS, 6101 SmallVectorImpl<SDValue> &Result, 6102 EVT HiLoVT, SelectionDAG &DAG, 6103 MulExpansionKind Kind, SDValue LL, 6104 SDValue LH, SDValue RL, SDValue RH) const { 6105 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6106 Opcode == ISD::SMUL_LOHI); 6107 6108 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6109 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6110 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6111 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6112 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6113 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6114 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6115 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6116 6117 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6118 return false; 6119 6120 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6121 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6122 6123 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6124 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6125 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6126 6127 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6128 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6129 bool Signed) -> bool { 6130 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6131 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6132 Hi = SDValue(Lo.getNode(), 1); 6133 return true; 6134 } 6135 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6136 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6137 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6138 return true; 6139 } 6140 return false; 6141 }; 6142 6143 SDValue Lo, Hi; 6144 6145 if (!LL.getNode() && !RL.getNode() && 6146 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6147 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6148 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6149 } 6150 6151 if (!LL.getNode()) 6152 return false; 6153 6154 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6155 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6156 DAG.MaskedValueIsZero(RHS, HighMask)) { 6157 // The inputs are both zero-extended. 6158 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6159 Result.push_back(Lo); 6160 Result.push_back(Hi); 6161 if (Opcode != ISD::MUL) { 6162 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6163 Result.push_back(Zero); 6164 Result.push_back(Zero); 6165 } 6166 return true; 6167 } 6168 } 6169 6170 if (!VT.isVector() && Opcode == ISD::MUL && 6171 DAG.ComputeNumSignBits(LHS) > InnerBitSize && 6172 DAG.ComputeNumSignBits(RHS) > InnerBitSize) { 6173 // The input values are both sign-extended. 6174 // TODO non-MUL case? 6175 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6176 Result.push_back(Lo); 6177 Result.push_back(Hi); 6178 return true; 6179 } 6180 } 6181 6182 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6183 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6184 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) { 6185 // FIXME getShiftAmountTy does not always return a sensible result when VT 6186 // is an illegal type, and so the type may be too small to fit the shift 6187 // amount. Override it with i32. The shift will have to be legalized. 6188 ShiftAmountTy = MVT::i32; 6189 } 6190 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6191 6192 if (!LH.getNode() && !RH.getNode() && 6193 isOperationLegalOrCustom(ISD::SRL, VT) && 6194 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6195 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6196 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6197 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6198 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6199 } 6200 6201 if (!LH.getNode()) 6202 return false; 6203 6204 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6205 return false; 6206 6207 Result.push_back(Lo); 6208 6209 if (Opcode == ISD::MUL) { 6210 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6211 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6212 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6213 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6214 Result.push_back(Hi); 6215 return true; 6216 } 6217 6218 // Compute the full width result. 6219 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6220 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6221 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6222 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6223 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6224 }; 6225 6226 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6227 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6228 return false; 6229 6230 // This is effectively the add part of a multiply-add of half-sized operands, 6231 // so it cannot overflow. 6232 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6233 6234 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6235 return false; 6236 6237 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6238 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6239 6240 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6241 isOperationLegalOrCustom(ISD::ADDE, VT)); 6242 if (UseGlue) 6243 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6244 Merge(Lo, Hi)); 6245 else 6246 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6247 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6248 6249 SDValue Carry = Next.getValue(1); 6250 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6251 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6252 6253 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6254 return false; 6255 6256 if (UseGlue) 6257 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6258 Carry); 6259 else 6260 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6261 Zero, Carry); 6262 6263 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6264 6265 if (Opcode == ISD::SMUL_LOHI) { 6266 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6267 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6268 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6269 6270 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6271 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6272 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6273 } 6274 6275 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6276 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6277 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6278 return true; 6279 } 6280 6281 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6282 SelectionDAG &DAG, MulExpansionKind Kind, 6283 SDValue LL, SDValue LH, SDValue RL, 6284 SDValue RH) const { 6285 SmallVector<SDValue, 2> Result; 6286 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 6287 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6288 DAG, Kind, LL, LH, RL, RH); 6289 if (Ok) { 6290 assert(Result.size() == 2); 6291 Lo = Result[0]; 6292 Hi = Result[1]; 6293 } 6294 return Ok; 6295 } 6296 6297 // Check that (every element of) Z is undef or not an exact multiple of BW. 6298 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 6299 return ISD::matchUnaryPredicate( 6300 Z, 6301 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 6302 true); 6303 } 6304 6305 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result, 6306 SelectionDAG &DAG) const { 6307 EVT VT = Node->getValueType(0); 6308 6309 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6310 !isOperationLegalOrCustom(ISD::SRL, VT) || 6311 !isOperationLegalOrCustom(ISD::SUB, VT) || 6312 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6313 return false; 6314 6315 SDValue X = Node->getOperand(0); 6316 SDValue Y = Node->getOperand(1); 6317 SDValue Z = Node->getOperand(2); 6318 6319 unsigned BW = VT.getScalarSizeInBits(); 6320 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6321 SDLoc DL(SDValue(Node, 0)); 6322 6323 EVT ShVT = Z.getValueType(); 6324 6325 // If a funnel shift in the other direction is more supported, use it. 6326 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 6327 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 6328 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 6329 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6330 // fshl X, Y, Z -> fshr X, Y, -Z 6331 // fshr X, Y, Z -> fshl X, Y, -Z 6332 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6333 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 6334 } else { 6335 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 6336 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 6337 SDValue One = DAG.getConstant(1, DL, ShVT); 6338 if (IsFSHL) { 6339 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6340 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 6341 } else { 6342 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6343 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 6344 } 6345 Z = DAG.getNOT(DL, Z, ShVT); 6346 } 6347 Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 6348 return true; 6349 } 6350 6351 SDValue ShX, ShY; 6352 SDValue ShAmt, InvShAmt; 6353 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6354 // fshl: X << C | Y >> (BW - C) 6355 // fshr: X << (BW - C) | Y >> C 6356 // where C = Z % BW is not zero 6357 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6358 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6359 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 6360 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 6361 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6362 } else { 6363 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6364 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6365 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 6366 if (isPowerOf2_32(BW)) { 6367 // Z % BW -> Z & (BW - 1) 6368 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6369 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6370 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6371 } else { 6372 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6373 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6374 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6375 } 6376 6377 SDValue One = DAG.getConstant(1, DL, ShVT); 6378 if (IsFSHL) { 6379 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 6380 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 6381 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 6382 } else { 6383 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 6384 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 6385 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 6386 } 6387 } 6388 Result = DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 6389 return true; 6390 } 6391 6392 // TODO: Merge with expandFunnelShift. 6393 bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 6394 SDValue &Result, SelectionDAG &DAG) const { 6395 EVT VT = Node->getValueType(0); 6396 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6397 bool IsLeft = Node->getOpcode() == ISD::ROTL; 6398 SDValue Op0 = Node->getOperand(0); 6399 SDValue Op1 = Node->getOperand(1); 6400 SDLoc DL(SDValue(Node, 0)); 6401 6402 EVT ShVT = Op1.getValueType(); 6403 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6404 6405 // If a rotate in the other direction is supported, use it. 6406 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 6407 if (isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 6408 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6409 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub); 6410 return true; 6411 } 6412 6413 if (!AllowVectorOps && VT.isVector() && 6414 (!isOperationLegalOrCustom(ISD::SHL, VT) || 6415 !isOperationLegalOrCustom(ISD::SRL, VT) || 6416 !isOperationLegalOrCustom(ISD::SUB, VT) || 6417 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6418 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6419 return false; 6420 6421 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6422 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6423 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6424 SDValue ShVal; 6425 SDValue HsVal; 6426 if (isPowerOf2_32(EltSizeInBits)) { 6427 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 6428 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 6429 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6430 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6431 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6432 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6433 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 6434 } else { 6435 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 6436 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 6437 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 6438 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 6439 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6440 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 6441 SDValue One = DAG.getConstant(1, DL, ShVT); 6442 HsVal = 6443 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 6444 } 6445 Result = DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 6446 return true; 6447 } 6448 6449 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6450 SelectionDAG &DAG) const { 6451 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6452 SDValue Src = Node->getOperand(OpNo); 6453 EVT SrcVT = Src.getValueType(); 6454 EVT DstVT = Node->getValueType(0); 6455 SDLoc dl(SDValue(Node, 0)); 6456 6457 // FIXME: Only f32 to i64 conversions are supported. 6458 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6459 return false; 6460 6461 if (Node->isStrictFPOpcode()) 6462 // When a NaN is converted to an integer a trap is allowed. We can't 6463 // use this expansion here because it would eliminate that trap. Other 6464 // traps are also allowed and cannot be eliminated. See 6465 // IEEE 754-2008 sec 5.8. 6466 return false; 6467 6468 // Expand f32 -> i64 conversion 6469 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6470 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c 6471 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6472 EVT IntVT = SrcVT.changeTypeToInteger(); 6473 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6474 6475 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6476 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6477 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6478 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6479 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6480 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6481 6482 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6483 6484 SDValue ExponentBits = DAG.getNode( 6485 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6486 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6487 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6488 6489 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6490 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6491 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6492 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6493 6494 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6495 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6496 DAG.getConstant(0x00800000, dl, IntVT)); 6497 6498 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6499 6500 R = DAG.getSelectCC( 6501 dl, Exponent, ExponentLoBit, 6502 DAG.getNode(ISD::SHL, dl, DstVT, R, 6503 DAG.getZExtOrTrunc( 6504 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6505 dl, IntShVT)), 6506 DAG.getNode(ISD::SRL, dl, DstVT, R, 6507 DAG.getZExtOrTrunc( 6508 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6509 dl, IntShVT)), 6510 ISD::SETGT); 6511 6512 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6513 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6514 6515 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6516 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6517 return true; 6518 } 6519 6520 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6521 SDValue &Chain, 6522 SelectionDAG &DAG) const { 6523 SDLoc dl(SDValue(Node, 0)); 6524 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6525 SDValue Src = Node->getOperand(OpNo); 6526 6527 EVT SrcVT = Src.getValueType(); 6528 EVT DstVT = Node->getValueType(0); 6529 EVT SetCCVT = 6530 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6531 EVT DstSetCCVT = 6532 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6533 6534 // Only expand vector types if we have the appropriate vector bit operations. 6535 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6536 ISD::FP_TO_SINT; 6537 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6538 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6539 return false; 6540 6541 // If the maximum float value is smaller then the signed integer range, 6542 // the destination signmask can't be represented by the float, so we can 6543 // just use FP_TO_SINT directly. 6544 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6545 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits())); 6546 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6547 if (APFloat::opOverflow & 6548 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6549 if (Node->isStrictFPOpcode()) { 6550 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6551 { Node->getOperand(0), Src }); 6552 Chain = Result.getValue(1); 6553 } else 6554 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6555 return true; 6556 } 6557 6558 // Don't expand it if there isn't cheap fsub instruction. 6559 if (!isOperationLegalOrCustom( 6560 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 6561 return false; 6562 6563 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6564 SDValue Sel; 6565 6566 if (Node->isStrictFPOpcode()) { 6567 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6568 Node->getOperand(0), /*IsSignaling*/ true); 6569 Chain = Sel.getValue(1); 6570 } else { 6571 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6572 } 6573 6574 bool Strict = Node->isStrictFPOpcode() || 6575 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6576 6577 if (Strict) { 6578 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6579 // signmask then offset (the result of which should be fully representable). 6580 // Sel = Src < 0x8000000000000000 6581 // FltOfs = select Sel, 0, 0x8000000000000000 6582 // IntOfs = select Sel, 0, 0x8000000000000000 6583 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6584 6585 // TODO: Should any fast-math-flags be set for the FSUB? 6586 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6587 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6588 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6589 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6590 DAG.getConstant(0, dl, DstVT), 6591 DAG.getConstant(SignMask, dl, DstVT)); 6592 SDValue SInt; 6593 if (Node->isStrictFPOpcode()) { 6594 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6595 { Chain, Src, FltOfs }); 6596 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6597 { Val.getValue(1), Val }); 6598 Chain = SInt.getValue(1); 6599 } else { 6600 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6601 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6602 } 6603 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6604 } else { 6605 // Expand based on maximum range of FP_TO_SINT: 6606 // True = fp_to_sint(Src) 6607 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6608 // Result = select (Src < 0x8000000000000000), True, False 6609 6610 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6611 // TODO: Should any fast-math-flags be set for the FSUB? 6612 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6613 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6614 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6615 DAG.getConstant(SignMask, dl, DstVT)); 6616 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6617 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6618 } 6619 return true; 6620 } 6621 6622 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6623 SDValue &Chain, 6624 SelectionDAG &DAG) const { 6625 // This transform is not correct for converting 0 when rounding mode is set 6626 // to round toward negative infinity which will produce -0.0. So disable under 6627 // strictfp. 6628 if (Node->isStrictFPOpcode()) 6629 return false; 6630 6631 SDValue Src = Node->getOperand(0); 6632 EVT SrcVT = Src.getValueType(); 6633 EVT DstVT = Node->getValueType(0); 6634 6635 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6636 return false; 6637 6638 // Only expand vector types if we have the appropriate vector bit operations. 6639 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6640 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6641 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6642 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6643 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6644 return false; 6645 6646 SDLoc dl(SDValue(Node, 0)); 6647 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6648 6649 // Implementation of unsigned i64 to f64 following the algorithm in 6650 // __floatundidf in compiler_rt. This implementation performs rounding 6651 // correctly in all rounding modes with the exception of converting 0 6652 // when rounding toward negative infinity. In that case the fsub will produce 6653 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 6654 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6655 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6656 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6657 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6658 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6659 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6660 6661 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6662 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6663 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6664 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6665 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6666 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6667 SDValue HiSub = 6668 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6669 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6670 return true; 6671 } 6672 6673 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 6674 SelectionDAG &DAG) const { 6675 SDLoc dl(Node); 6676 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 6677 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 6678 EVT VT = Node->getValueType(0); 6679 6680 if (VT.isScalableVector()) 6681 report_fatal_error( 6682 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 6683 6684 if (isOperationLegalOrCustom(NewOp, VT)) { 6685 SDValue Quiet0 = Node->getOperand(0); 6686 SDValue Quiet1 = Node->getOperand(1); 6687 6688 if (!Node->getFlags().hasNoNaNs()) { 6689 // Insert canonicalizes if it's possible we need to quiet to get correct 6690 // sNaN behavior. 6691 if (!DAG.isKnownNeverSNaN(Quiet0)) { 6692 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 6693 Node->getFlags()); 6694 } 6695 if (!DAG.isKnownNeverSNaN(Quiet1)) { 6696 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 6697 Node->getFlags()); 6698 } 6699 } 6700 6701 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 6702 } 6703 6704 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 6705 // instead if there are no NaNs. 6706 if (Node->getFlags().hasNoNaNs()) { 6707 unsigned IEEE2018Op = 6708 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 6709 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 6710 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 6711 Node->getOperand(1), Node->getFlags()); 6712 } 6713 } 6714 6715 // If none of the above worked, but there are no NaNs, then expand to 6716 // a compare/select sequence. This is required for correctness since 6717 // InstCombine might have canonicalized a fcmp+select sequence to a 6718 // FMINNUM/FMAXNUM node. If we were to fall through to the default 6719 // expansion to libcall, we might introduce a link-time dependency 6720 // on libm into a file that originally did not have one. 6721 if (Node->getFlags().hasNoNaNs()) { 6722 ISD::CondCode Pred = 6723 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 6724 SDValue Op1 = Node->getOperand(0); 6725 SDValue Op2 = Node->getOperand(1); 6726 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 6727 // Copy FMF flags, but always set the no-signed-zeros flag 6728 // as this is implied by the FMINNUM/FMAXNUM semantics. 6729 SDNodeFlags Flags = Node->getFlags(); 6730 Flags.setNoSignedZeros(true); 6731 SelCC->setFlags(Flags); 6732 return SelCC; 6733 } 6734 6735 return SDValue(); 6736 } 6737 6738 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result, 6739 SelectionDAG &DAG) const { 6740 SDLoc dl(Node); 6741 EVT VT = Node->getValueType(0); 6742 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6743 SDValue Op = Node->getOperand(0); 6744 unsigned Len = VT.getScalarSizeInBits(); 6745 assert(VT.isInteger() && "CTPOP not implemented for this type."); 6746 6747 // TODO: Add support for irregular type lengths. 6748 if (!(Len <= 128 && Len % 8 == 0)) 6749 return false; 6750 6751 // Only expand vector types if we have the appropriate vector bit operations. 6752 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) || 6753 !isOperationLegalOrCustom(ISD::SUB, VT) || 6754 !isOperationLegalOrCustom(ISD::SRL, VT) || 6755 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) || 6756 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6757 return false; 6758 6759 // This is the "best" algorithm from 6760 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 6761 SDValue Mask55 = 6762 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 6763 SDValue Mask33 = 6764 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 6765 SDValue Mask0F = 6766 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 6767 SDValue Mask01 = 6768 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 6769 6770 // v = v - ((v >> 1) & 0x55555555...) 6771 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 6772 DAG.getNode(ISD::AND, dl, VT, 6773 DAG.getNode(ISD::SRL, dl, VT, Op, 6774 DAG.getConstant(1, dl, ShVT)), 6775 Mask55)); 6776 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 6777 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 6778 DAG.getNode(ISD::AND, dl, VT, 6779 DAG.getNode(ISD::SRL, dl, VT, Op, 6780 DAG.getConstant(2, dl, ShVT)), 6781 Mask33)); 6782 // v = (v + (v >> 4)) & 0x0F0F0F0F... 6783 Op = DAG.getNode(ISD::AND, dl, VT, 6784 DAG.getNode(ISD::ADD, dl, VT, Op, 6785 DAG.getNode(ISD::SRL, dl, VT, Op, 6786 DAG.getConstant(4, dl, ShVT))), 6787 Mask0F); 6788 // v = (v * 0x01010101...) >> (Len - 8) 6789 if (Len > 8) 6790 Op = 6791 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 6792 DAG.getConstant(Len - 8, dl, ShVT)); 6793 6794 Result = Op; 6795 return true; 6796 } 6797 6798 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result, 6799 SelectionDAG &DAG) const { 6800 SDLoc dl(Node); 6801 EVT VT = Node->getValueType(0); 6802 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6803 SDValue Op = Node->getOperand(0); 6804 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6805 6806 // If the non-ZERO_UNDEF version is supported we can use that instead. 6807 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 6808 isOperationLegalOrCustom(ISD::CTLZ, VT)) { 6809 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op); 6810 return true; 6811 } 6812 6813 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6814 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 6815 EVT SetCCVT = 6816 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6817 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 6818 SDValue Zero = DAG.getConstant(0, dl, VT); 6819 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6820 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6821 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 6822 return true; 6823 } 6824 6825 // Only expand vector types if we have the appropriate vector bit operations. 6826 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6827 !isOperationLegalOrCustom(ISD::CTPOP, VT) || 6828 !isOperationLegalOrCustom(ISD::SRL, VT) || 6829 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6830 return false; 6831 6832 // for now, we do this: 6833 // x = x | (x >> 1); 6834 // x = x | (x >> 2); 6835 // ... 6836 // x = x | (x >>16); 6837 // x = x | (x >>32); // for 64-bit input 6838 // return popcount(~x); 6839 // 6840 // Ref: "Hacker's Delight" by Henry Warren 6841 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 6842 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 6843 Op = DAG.getNode(ISD::OR, dl, VT, Op, 6844 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 6845 } 6846 Op = DAG.getNOT(dl, Op, VT); 6847 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op); 6848 return true; 6849 } 6850 6851 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result, 6852 SelectionDAG &DAG) const { 6853 SDLoc dl(Node); 6854 EVT VT = Node->getValueType(0); 6855 SDValue Op = Node->getOperand(0); 6856 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6857 6858 // If the non-ZERO_UNDEF version is supported we can use that instead. 6859 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 6860 isOperationLegalOrCustom(ISD::CTTZ, VT)) { 6861 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op); 6862 return true; 6863 } 6864 6865 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6866 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 6867 EVT SetCCVT = 6868 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6869 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 6870 SDValue Zero = DAG.getConstant(0, dl, VT); 6871 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6872 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6873 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 6874 return true; 6875 } 6876 6877 // Only expand vector types if we have the appropriate vector bit operations. 6878 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6879 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 6880 !isOperationLegalOrCustom(ISD::CTLZ, VT)) || 6881 !isOperationLegalOrCustom(ISD::SUB, VT) || 6882 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 6883 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6884 return false; 6885 6886 // for now, we use: { return popcount(~x & (x - 1)); } 6887 // unless the target has ctlz but not ctpop, in which case we use: 6888 // { return 32 - nlz(~x & (x-1)); } 6889 // Ref: "Hacker's Delight" by Henry Warren 6890 SDValue Tmp = DAG.getNode( 6891 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 6892 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 6893 6894 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 6895 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 6896 Result = 6897 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 6898 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 6899 return true; 6900 } 6901 6902 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 6903 return true; 6904 } 6905 6906 bool TargetLowering::expandABS(SDNode *N, SDValue &Result, 6907 SelectionDAG &DAG, bool IsNegative) const { 6908 SDLoc dl(N); 6909 EVT VT = N->getValueType(0); 6910 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6911 SDValue Op = N->getOperand(0); 6912 6913 // abs(x) -> smax(x,sub(0,x)) 6914 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 6915 isOperationLegal(ISD::SMAX, VT)) { 6916 SDValue Zero = DAG.getConstant(0, dl, VT); 6917 Result = DAG.getNode(ISD::SMAX, dl, VT, Op, 6918 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 6919 return true; 6920 } 6921 6922 // abs(x) -> umin(x,sub(0,x)) 6923 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 6924 isOperationLegal(ISD::UMIN, VT)) { 6925 SDValue Zero = DAG.getConstant(0, dl, VT); 6926 Result = DAG.getNode(ISD::UMIN, dl, VT, Op, 6927 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 6928 return true; 6929 } 6930 6931 // 0 - abs(x) -> smin(x, sub(0,x)) 6932 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 6933 isOperationLegal(ISD::SMIN, VT)) { 6934 SDValue Zero = DAG.getConstant(0, dl, VT); 6935 Result = DAG.getNode(ISD::SMIN, dl, VT, Op, 6936 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 6937 return true; 6938 } 6939 6940 // Only expand vector types if we have the appropriate vector operations. 6941 if (VT.isVector() && 6942 (!isOperationLegalOrCustom(ISD::SRA, VT) || 6943 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 6944 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 6945 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6946 return false; 6947 6948 SDValue Shift = 6949 DAG.getNode(ISD::SRA, dl, VT, Op, 6950 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 6951 if (!IsNegative) { 6952 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 6953 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 6954 } else { 6955 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 6956 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 6957 Result = DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 6958 } 6959 return true; 6960 } 6961 6962 std::pair<SDValue, SDValue> 6963 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 6964 SelectionDAG &DAG) const { 6965 SDLoc SL(LD); 6966 SDValue Chain = LD->getChain(); 6967 SDValue BasePTR = LD->getBasePtr(); 6968 EVT SrcVT = LD->getMemoryVT(); 6969 EVT DstVT = LD->getValueType(0); 6970 ISD::LoadExtType ExtType = LD->getExtensionType(); 6971 6972 if (SrcVT.isScalableVector()) 6973 report_fatal_error("Cannot scalarize scalable vector loads"); 6974 6975 unsigned NumElem = SrcVT.getVectorNumElements(); 6976 6977 EVT SrcEltVT = SrcVT.getScalarType(); 6978 EVT DstEltVT = DstVT.getScalarType(); 6979 6980 // A vector must always be stored in memory as-is, i.e. without any padding 6981 // between the elements, since various code depend on it, e.g. in the 6982 // handling of a bitcast of a vector type to int, which may be done with a 6983 // vector store followed by an integer load. A vector that does not have 6984 // elements that are byte-sized must therefore be stored as an integer 6985 // built out of the extracted vector elements. 6986 if (!SrcEltVT.isByteSized()) { 6987 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 6988 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 6989 6990 unsigned NumSrcBits = SrcVT.getSizeInBits(); 6991 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 6992 6993 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 6994 SDValue SrcEltBitMask = DAG.getConstant( 6995 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 6996 6997 // Load the whole vector and avoid masking off the top bits as it makes 6998 // the codegen worse. 6999 SDValue Load = 7000 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 7001 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 7002 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7003 7004 SmallVector<SDValue, 8> Vals; 7005 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7006 unsigned ShiftIntoIdx = 7007 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7008 SDValue ShiftAmount = 7009 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 7010 LoadVT, SL, /*LegalTypes=*/false); 7011 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 7012 SDValue Elt = 7013 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 7014 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 7015 7016 if (ExtType != ISD::NON_EXTLOAD) { 7017 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 7018 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 7019 } 7020 7021 Vals.push_back(Scalar); 7022 } 7023 7024 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7025 return std::make_pair(Value, Load.getValue(1)); 7026 } 7027 7028 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 7029 assert(SrcEltVT.isByteSized()); 7030 7031 SmallVector<SDValue, 8> Vals; 7032 SmallVector<SDValue, 8> LoadChains; 7033 7034 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7035 SDValue ScalarLoad = 7036 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 7037 LD->getPointerInfo().getWithOffset(Idx * Stride), 7038 SrcEltVT, LD->getOriginalAlign(), 7039 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7040 7041 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 7042 7043 Vals.push_back(ScalarLoad.getValue(0)); 7044 LoadChains.push_back(ScalarLoad.getValue(1)); 7045 } 7046 7047 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 7048 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7049 7050 return std::make_pair(Value, NewChain); 7051 } 7052 7053 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 7054 SelectionDAG &DAG) const { 7055 SDLoc SL(ST); 7056 7057 SDValue Chain = ST->getChain(); 7058 SDValue BasePtr = ST->getBasePtr(); 7059 SDValue Value = ST->getValue(); 7060 EVT StVT = ST->getMemoryVT(); 7061 7062 if (StVT.isScalableVector()) 7063 report_fatal_error("Cannot scalarize scalable vector stores"); 7064 7065 // The type of the data we want to save 7066 EVT RegVT = Value.getValueType(); 7067 EVT RegSclVT = RegVT.getScalarType(); 7068 7069 // The type of data as saved in memory. 7070 EVT MemSclVT = StVT.getScalarType(); 7071 7072 unsigned NumElem = StVT.getVectorNumElements(); 7073 7074 // A vector must always be stored in memory as-is, i.e. without any padding 7075 // between the elements, since various code depend on it, e.g. in the 7076 // handling of a bitcast of a vector type to int, which may be done with a 7077 // vector store followed by an integer load. A vector that does not have 7078 // elements that are byte-sized must therefore be stored as an integer 7079 // built out of the extracted vector elements. 7080 if (!MemSclVT.isByteSized()) { 7081 unsigned NumBits = StVT.getSizeInBits(); 7082 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 7083 7084 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 7085 7086 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7087 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7088 DAG.getVectorIdxConstant(Idx, SL)); 7089 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 7090 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 7091 unsigned ShiftIntoIdx = 7092 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7093 SDValue ShiftAmount = 7094 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 7095 SDValue ShiftedElt = 7096 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 7097 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 7098 } 7099 7100 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 7101 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7102 ST->getAAInfo()); 7103 } 7104 7105 // Store Stride in bytes 7106 unsigned Stride = MemSclVT.getSizeInBits() / 8; 7107 assert(Stride && "Zero stride!"); 7108 // Extract each of the elements from the original vector and save them into 7109 // memory individually. 7110 SmallVector<SDValue, 8> Stores; 7111 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7112 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7113 DAG.getVectorIdxConstant(Idx, SL)); 7114 7115 SDValue Ptr = 7116 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 7117 7118 // This scalar TruncStore may be illegal, but we legalize it later. 7119 SDValue Store = DAG.getTruncStore( 7120 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 7121 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7122 ST->getAAInfo()); 7123 7124 Stores.push_back(Store); 7125 } 7126 7127 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 7128 } 7129 7130 std::pair<SDValue, SDValue> 7131 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 7132 assert(LD->getAddressingMode() == ISD::UNINDEXED && 7133 "unaligned indexed loads not implemented!"); 7134 SDValue Chain = LD->getChain(); 7135 SDValue Ptr = LD->getBasePtr(); 7136 EVT VT = LD->getValueType(0); 7137 EVT LoadedVT = LD->getMemoryVT(); 7138 SDLoc dl(LD); 7139 auto &MF = DAG.getMachineFunction(); 7140 7141 if (VT.isFloatingPoint() || VT.isVector()) { 7142 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 7143 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 7144 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 7145 LoadedVT.isVector()) { 7146 // Scalarize the load and let the individual components be handled. 7147 return scalarizeVectorLoad(LD, DAG); 7148 } 7149 7150 // Expand to a (misaligned) integer load of the same size, 7151 // then bitconvert to floating point or vector. 7152 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 7153 LD->getMemOperand()); 7154 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 7155 if (LoadedVT != VT) 7156 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 7157 ISD::ANY_EXTEND, dl, VT, Result); 7158 7159 return std::make_pair(Result, newLoad.getValue(1)); 7160 } 7161 7162 // Copy the value to a (aligned) stack slot using (unaligned) integer 7163 // loads and stores, then do a (aligned) load from the stack slot. 7164 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 7165 unsigned LoadedBytes = LoadedVT.getStoreSize(); 7166 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7167 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 7168 7169 // Make sure the stack slot is also aligned for the register type. 7170 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 7171 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 7172 SmallVector<SDValue, 8> Stores; 7173 SDValue StackPtr = StackBase; 7174 unsigned Offset = 0; 7175 7176 EVT PtrVT = Ptr.getValueType(); 7177 EVT StackPtrVT = StackPtr.getValueType(); 7178 7179 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7180 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7181 7182 // Do all but one copies using the full register width. 7183 for (unsigned i = 1; i < NumRegs; i++) { 7184 // Load one integer register's worth from the original location. 7185 SDValue Load = DAG.getLoad( 7186 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 7187 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7188 LD->getAAInfo()); 7189 // Follow the load with a store to the stack slot. Remember the store. 7190 Stores.push_back(DAG.getStore( 7191 Load.getValue(1), dl, Load, StackPtr, 7192 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 7193 // Increment the pointers. 7194 Offset += RegBytes; 7195 7196 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7197 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7198 } 7199 7200 // The last copy may be partial. Do an extending load. 7201 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 7202 8 * (LoadedBytes - Offset)); 7203 SDValue Load = 7204 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 7205 LD->getPointerInfo().getWithOffset(Offset), MemVT, 7206 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7207 LD->getAAInfo()); 7208 // Follow the load with a store to the stack slot. Remember the store. 7209 // On big-endian machines this requires a truncating store to ensure 7210 // that the bits end up in the right place. 7211 Stores.push_back(DAG.getTruncStore( 7212 Load.getValue(1), dl, Load, StackPtr, 7213 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 7214 7215 // The order of the stores doesn't matter - say it with a TokenFactor. 7216 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7217 7218 // Finally, perform the original load only redirected to the stack slot. 7219 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 7220 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 7221 LoadedVT); 7222 7223 // Callers expect a MERGE_VALUES node. 7224 return std::make_pair(Load, TF); 7225 } 7226 7227 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 7228 "Unaligned load of unsupported type."); 7229 7230 // Compute the new VT that is half the size of the old one. This is an 7231 // integer MVT. 7232 unsigned NumBits = LoadedVT.getSizeInBits(); 7233 EVT NewLoadedVT; 7234 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 7235 NumBits >>= 1; 7236 7237 Align Alignment = LD->getOriginalAlign(); 7238 unsigned IncrementSize = NumBits / 8; 7239 ISD::LoadExtType HiExtType = LD->getExtensionType(); 7240 7241 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 7242 if (HiExtType == ISD::NON_EXTLOAD) 7243 HiExtType = ISD::ZEXTLOAD; 7244 7245 // Load the value in two parts 7246 SDValue Lo, Hi; 7247 if (DAG.getDataLayout().isLittleEndian()) { 7248 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7249 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7250 LD->getAAInfo()); 7251 7252 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7253 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 7254 LD->getPointerInfo().getWithOffset(IncrementSize), 7255 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7256 LD->getAAInfo()); 7257 } else { 7258 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7259 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7260 LD->getAAInfo()); 7261 7262 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7263 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 7264 LD->getPointerInfo().getWithOffset(IncrementSize), 7265 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7266 LD->getAAInfo()); 7267 } 7268 7269 // aggregate the two parts 7270 SDValue ShiftAmount = 7271 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 7272 DAG.getDataLayout())); 7273 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 7274 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 7275 7276 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 7277 Hi.getValue(1)); 7278 7279 return std::make_pair(Result, TF); 7280 } 7281 7282 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 7283 SelectionDAG &DAG) const { 7284 assert(ST->getAddressingMode() == ISD::UNINDEXED && 7285 "unaligned indexed stores not implemented!"); 7286 SDValue Chain = ST->getChain(); 7287 SDValue Ptr = ST->getBasePtr(); 7288 SDValue Val = ST->getValue(); 7289 EVT VT = Val.getValueType(); 7290 Align Alignment = ST->getOriginalAlign(); 7291 auto &MF = DAG.getMachineFunction(); 7292 EVT StoreMemVT = ST->getMemoryVT(); 7293 7294 SDLoc dl(ST); 7295 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 7296 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7297 if (isTypeLegal(intVT)) { 7298 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 7299 StoreMemVT.isVector()) { 7300 // Scalarize the store and let the individual components be handled. 7301 SDValue Result = scalarizeVectorStore(ST, DAG); 7302 return Result; 7303 } 7304 // Expand to a bitconvert of the value to the integer type of the 7305 // same size, then a (misaligned) int store. 7306 // FIXME: Does not handle truncating floating point stores! 7307 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 7308 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 7309 Alignment, ST->getMemOperand()->getFlags()); 7310 return Result; 7311 } 7312 // Do a (aligned) store to a stack slot, then copy from the stack slot 7313 // to the final destination using (unaligned) integer loads and stores. 7314 MVT RegVT = getRegisterType( 7315 *DAG.getContext(), 7316 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 7317 EVT PtrVT = Ptr.getValueType(); 7318 unsigned StoredBytes = StoreMemVT.getStoreSize(); 7319 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7320 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 7321 7322 // Make sure the stack slot is also aligned for the register type. 7323 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 7324 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 7325 7326 // Perform the original store, only redirected to the stack slot. 7327 SDValue Store = DAG.getTruncStore( 7328 Chain, dl, Val, StackPtr, 7329 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 7330 7331 EVT StackPtrVT = StackPtr.getValueType(); 7332 7333 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7334 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7335 SmallVector<SDValue, 8> Stores; 7336 unsigned Offset = 0; 7337 7338 // Do all but one copies using the full register width. 7339 for (unsigned i = 1; i < NumRegs; i++) { 7340 // Load one integer register's worth from the stack slot. 7341 SDValue Load = DAG.getLoad( 7342 RegVT, dl, Store, StackPtr, 7343 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 7344 // Store it to the final location. Remember the store. 7345 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 7346 ST->getPointerInfo().getWithOffset(Offset), 7347 ST->getOriginalAlign(), 7348 ST->getMemOperand()->getFlags())); 7349 // Increment the pointers. 7350 Offset += RegBytes; 7351 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7352 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7353 } 7354 7355 // The last store may be partial. Do a truncating store. On big-endian 7356 // machines this requires an extending load from the stack slot to ensure 7357 // that the bits are in the right place. 7358 EVT LoadMemVT = 7359 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 7360 7361 // Load from the stack slot. 7362 SDValue Load = DAG.getExtLoad( 7363 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 7364 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 7365 7366 Stores.push_back( 7367 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 7368 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 7369 ST->getOriginalAlign(), 7370 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 7371 // The order of the stores doesn't matter - say it with a TokenFactor. 7372 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7373 return Result; 7374 } 7375 7376 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 7377 "Unaligned store of unknown type."); 7378 // Get the half-size VT 7379 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 7380 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 7381 unsigned IncrementSize = NumBits / 8; 7382 7383 // Divide the stored value in two parts. 7384 SDValue ShiftAmount = DAG.getConstant( 7385 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 7386 SDValue Lo = Val; 7387 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 7388 7389 // Store the two parts 7390 SDValue Store1, Store2; 7391 Store1 = DAG.getTruncStore(Chain, dl, 7392 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 7393 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 7394 ST->getMemOperand()->getFlags()); 7395 7396 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7397 Store2 = DAG.getTruncStore( 7398 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 7399 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 7400 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 7401 7402 SDValue Result = 7403 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 7404 return Result; 7405 } 7406 7407 SDValue 7408 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 7409 const SDLoc &DL, EVT DataVT, 7410 SelectionDAG &DAG, 7411 bool IsCompressedMemory) const { 7412 SDValue Increment; 7413 EVT AddrVT = Addr.getValueType(); 7414 EVT MaskVT = Mask.getValueType(); 7415 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 7416 "Incompatible types of Data and Mask"); 7417 if (IsCompressedMemory) { 7418 if (DataVT.isScalableVector()) 7419 report_fatal_error( 7420 "Cannot currently handle compressed memory with scalable vectors"); 7421 // Incrementing the pointer according to number of '1's in the mask. 7422 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 7423 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 7424 if (MaskIntVT.getSizeInBits() < 32) { 7425 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 7426 MaskIntVT = MVT::i32; 7427 } 7428 7429 // Count '1's with POPCNT. 7430 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 7431 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 7432 // Scale is an element size in bytes. 7433 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 7434 AddrVT); 7435 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 7436 } else if (DataVT.isScalableVector()) { 7437 Increment = DAG.getVScale(DL, AddrVT, 7438 APInt(AddrVT.getFixedSizeInBits(), 7439 DataVT.getStoreSize().getKnownMinSize())); 7440 } else 7441 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 7442 7443 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 7444 } 7445 7446 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, 7447 SDValue Idx, 7448 EVT VecVT, 7449 const SDLoc &dl) { 7450 if (!VecVT.isScalableVector() && isa<ConstantSDNode>(Idx)) 7451 return Idx; 7452 7453 EVT IdxVT = Idx.getValueType(); 7454 unsigned NElts = VecVT.getVectorMinNumElements(); 7455 if (VecVT.isScalableVector()) { 7456 SDValue VS = DAG.getVScale(dl, IdxVT, 7457 APInt(IdxVT.getFixedSizeInBits(), 7458 NElts)); 7459 SDValue Sub = DAG.getNode(ISD::SUB, dl, IdxVT, VS, 7460 DAG.getConstant(1, dl, IdxVT)); 7461 7462 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 7463 } else { 7464 if (isPowerOf2_32(NElts)) { 7465 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), 7466 Log2_32(NElts)); 7467 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 7468 DAG.getConstant(Imm, dl, IdxVT)); 7469 } 7470 } 7471 7472 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 7473 DAG.getConstant(NElts - 1, dl, IdxVT)); 7474 } 7475 7476 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 7477 SDValue VecPtr, EVT VecVT, 7478 SDValue Index) const { 7479 SDLoc dl(Index); 7480 // Make sure the index type is big enough to compute in. 7481 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 7482 7483 EVT EltVT = VecVT.getVectorElementType(); 7484 7485 // Calculate the element offset and add it to the pointer. 7486 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 7487 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 7488 "Converting bits to bytes lost precision"); 7489 7490 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl); 7491 7492 EVT IdxVT = Index.getValueType(); 7493 7494 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7495 DAG.getConstant(EltSize, dl, IdxVT)); 7496 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 7497 } 7498 7499 //===----------------------------------------------------------------------===// 7500 // Implementation of Emulated TLS Model 7501 //===----------------------------------------------------------------------===// 7502 7503 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 7504 SelectionDAG &DAG) const { 7505 // Access to address of TLS varialbe xyz is lowered to a function call: 7506 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 7507 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7508 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 7509 SDLoc dl(GA); 7510 7511 ArgListTy Args; 7512 ArgListEntry Entry; 7513 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 7514 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 7515 StringRef EmuTlsVarName(NameString); 7516 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 7517 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 7518 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 7519 Entry.Ty = VoidPtrType; 7520 Args.push_back(Entry); 7521 7522 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 7523 7524 TargetLowering::CallLoweringInfo CLI(DAG); 7525 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 7526 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 7527 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7528 7529 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7530 // At last for X86 targets, maybe good for other targets too? 7531 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7532 MFI.setAdjustsStack(true); // Is this only for X86 target? 7533 MFI.setHasCalls(true); 7534 7535 assert((GA->getOffset() == 0) && 7536 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7537 return CallResult.first; 7538 } 7539 7540 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 7541 SelectionDAG &DAG) const { 7542 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 7543 if (!isCtlzFast()) 7544 return SDValue(); 7545 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7546 SDLoc dl(Op); 7547 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 7548 if (C->isNullValue() && CC == ISD::SETEQ) { 7549 EVT VT = Op.getOperand(0).getValueType(); 7550 SDValue Zext = Op.getOperand(0); 7551 if (VT.bitsLT(MVT::i32)) { 7552 VT = MVT::i32; 7553 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 7554 } 7555 unsigned Log2b = Log2_32(VT.getSizeInBits()); 7556 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 7557 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 7558 DAG.getConstant(Log2b, dl, MVT::i32)); 7559 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 7560 } 7561 } 7562 return SDValue(); 7563 } 7564 7565 // Convert redundant addressing modes (e.g. scaling is redundant 7566 // when accessing bytes). 7567 ISD::MemIndexType 7568 TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT, 7569 SDValue Offsets) const { 7570 bool IsScaledIndex = 7571 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED); 7572 bool IsSignedIndex = 7573 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED); 7574 7575 // Scaling is unimportant for bytes, canonicalize to unscaled. 7576 if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) { 7577 IsScaledIndex = false; 7578 IndexType = IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED; 7579 } 7580 7581 return IndexType; 7582 } 7583 7584 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 7585 SDValue Op0 = Node->getOperand(0); 7586 SDValue Op1 = Node->getOperand(1); 7587 EVT VT = Op0.getValueType(); 7588 unsigned Opcode = Node->getOpcode(); 7589 SDLoc DL(Node); 7590 7591 // umin(x,y) -> sub(x,usubsat(x,y)) 7592 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 7593 isOperationLegal(ISD::USUBSAT, VT)) { 7594 return DAG.getNode(ISD::SUB, DL, VT, Op0, 7595 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 7596 } 7597 7598 // umax(x,y) -> add(x,usubsat(y,x)) 7599 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 7600 isOperationLegal(ISD::USUBSAT, VT)) { 7601 return DAG.getNode(ISD::ADD, DL, VT, Op0, 7602 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 7603 } 7604 7605 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 7606 ISD::CondCode CC; 7607 switch (Opcode) { 7608 default: llvm_unreachable("How did we get here?"); 7609 case ISD::SMAX: CC = ISD::SETGT; break; 7610 case ISD::SMIN: CC = ISD::SETLT; break; 7611 case ISD::UMAX: CC = ISD::SETUGT; break; 7612 case ISD::UMIN: CC = ISD::SETULT; break; 7613 } 7614 7615 // FIXME: Should really try to split the vector in case it's legal on a 7616 // subvector. 7617 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 7618 return DAG.UnrollVectorOp(Node); 7619 7620 SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC); 7621 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 7622 } 7623 7624 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 7625 unsigned Opcode = Node->getOpcode(); 7626 SDValue LHS = Node->getOperand(0); 7627 SDValue RHS = Node->getOperand(1); 7628 EVT VT = LHS.getValueType(); 7629 SDLoc dl(Node); 7630 7631 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7632 assert(VT.isInteger() && "Expected operands to be integers"); 7633 7634 // usub.sat(a, b) -> umax(a, b) - b 7635 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 7636 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 7637 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 7638 } 7639 7640 // uadd.sat(a, b) -> umin(a, ~b) + b 7641 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 7642 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 7643 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 7644 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 7645 } 7646 7647 unsigned OverflowOp; 7648 switch (Opcode) { 7649 case ISD::SADDSAT: 7650 OverflowOp = ISD::SADDO; 7651 break; 7652 case ISD::UADDSAT: 7653 OverflowOp = ISD::UADDO; 7654 break; 7655 case ISD::SSUBSAT: 7656 OverflowOp = ISD::SSUBO; 7657 break; 7658 case ISD::USUBSAT: 7659 OverflowOp = ISD::USUBO; 7660 break; 7661 default: 7662 llvm_unreachable("Expected method to receive signed or unsigned saturation " 7663 "addition or subtraction node."); 7664 } 7665 7666 // FIXME: Should really try to split the vector in case it's legal on a 7667 // subvector. 7668 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 7669 return DAG.UnrollVectorOp(Node); 7670 7671 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 7672 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7673 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), 7674 LHS, RHS); 7675 SDValue SumDiff = Result.getValue(0); 7676 SDValue Overflow = Result.getValue(1); 7677 SDValue Zero = DAG.getConstant(0, dl, VT); 7678 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 7679 7680 if (Opcode == ISD::UADDSAT) { 7681 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7682 // (LHS + RHS) | OverflowMask 7683 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7684 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 7685 } 7686 // Overflow ? 0xffff.... : (LHS + RHS) 7687 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 7688 } else if (Opcode == ISD::USUBSAT) { 7689 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7690 // (LHS - RHS) & ~OverflowMask 7691 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7692 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 7693 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 7694 } 7695 // Overflow ? 0 : (LHS - RHS) 7696 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 7697 } else { 7698 // SatMax -> Overflow && SumDiff < 0 7699 // SatMin -> Overflow && SumDiff >= 0 7700 APInt MinVal = APInt::getSignedMinValue(BitWidth); 7701 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 7702 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7703 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7704 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); 7705 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin); 7706 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 7707 } 7708 } 7709 7710 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 7711 unsigned Opcode = Node->getOpcode(); 7712 bool IsSigned = Opcode == ISD::SSHLSAT; 7713 SDValue LHS = Node->getOperand(0); 7714 SDValue RHS = Node->getOperand(1); 7715 EVT VT = LHS.getValueType(); 7716 SDLoc dl(Node); 7717 7718 assert((Node->getOpcode() == ISD::SSHLSAT || 7719 Node->getOpcode() == ISD::USHLSAT) && 7720 "Expected a SHLSAT opcode"); 7721 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7722 assert(VT.isInteger() && "Expected operands to be integers"); 7723 7724 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 7725 7726 unsigned BW = VT.getScalarSizeInBits(); 7727 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 7728 SDValue Orig = 7729 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 7730 7731 SDValue SatVal; 7732 if (IsSigned) { 7733 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 7734 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 7735 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 7736 SatMin, SatMax, ISD::SETLT); 7737 } else { 7738 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 7739 } 7740 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 7741 7742 return Result; 7743 } 7744 7745 SDValue 7746 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 7747 assert((Node->getOpcode() == ISD::SMULFIX || 7748 Node->getOpcode() == ISD::UMULFIX || 7749 Node->getOpcode() == ISD::SMULFIXSAT || 7750 Node->getOpcode() == ISD::UMULFIXSAT) && 7751 "Expected a fixed point multiplication opcode"); 7752 7753 SDLoc dl(Node); 7754 SDValue LHS = Node->getOperand(0); 7755 SDValue RHS = Node->getOperand(1); 7756 EVT VT = LHS.getValueType(); 7757 unsigned Scale = Node->getConstantOperandVal(2); 7758 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 7759 Node->getOpcode() == ISD::UMULFIXSAT); 7760 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 7761 Node->getOpcode() == ISD::SMULFIXSAT); 7762 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7763 unsigned VTSize = VT.getScalarSizeInBits(); 7764 7765 if (!Scale) { 7766 // [us]mul.fix(a, b, 0) -> mul(a, b) 7767 if (!Saturating) { 7768 if (isOperationLegalOrCustom(ISD::MUL, VT)) 7769 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7770 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 7771 SDValue Result = 7772 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7773 SDValue Product = Result.getValue(0); 7774 SDValue Overflow = Result.getValue(1); 7775 SDValue Zero = DAG.getConstant(0, dl, VT); 7776 7777 APInt MinVal = APInt::getSignedMinValue(VTSize); 7778 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 7779 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7780 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7781 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT); 7782 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin); 7783 return DAG.getSelect(dl, VT, Overflow, Result, Product); 7784 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 7785 SDValue Result = 7786 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7787 SDValue Product = Result.getValue(0); 7788 SDValue Overflow = Result.getValue(1); 7789 7790 APInt MaxVal = APInt::getMaxValue(VTSize); 7791 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7792 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 7793 } 7794 } 7795 7796 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 7797 "Expected scale to be less than the number of bits if signed or at " 7798 "most the number of bits if unsigned."); 7799 assert(LHS.getValueType() == RHS.getValueType() && 7800 "Expected both operands to be the same type"); 7801 7802 // Get the upper and lower bits of the result. 7803 SDValue Lo, Hi; 7804 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 7805 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 7806 if (isOperationLegalOrCustom(LoHiOp, VT)) { 7807 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 7808 Lo = Result.getValue(0); 7809 Hi = Result.getValue(1); 7810 } else if (isOperationLegalOrCustom(HiOp, VT)) { 7811 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7812 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 7813 } else if (VT.isVector()) { 7814 return SDValue(); 7815 } else { 7816 report_fatal_error("Unable to expand fixed point multiplication."); 7817 } 7818 7819 if (Scale == VTSize) 7820 // Result is just the top half since we'd be shifting by the width of the 7821 // operand. Overflow impossible so this works for both UMULFIX and 7822 // UMULFIXSAT. 7823 return Hi; 7824 7825 // The result will need to be shifted right by the scale since both operands 7826 // are scaled. The result is given to us in 2 halves, so we only want part of 7827 // both in the result. 7828 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7829 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 7830 DAG.getConstant(Scale, dl, ShiftTy)); 7831 if (!Saturating) 7832 return Result; 7833 7834 if (!Signed) { 7835 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 7836 // widened multiplication) aren't all zeroes. 7837 7838 // Saturate to max if ((Hi >> Scale) != 0), 7839 // which is the same as if (Hi > ((1 << Scale) - 1)) 7840 APInt MaxVal = APInt::getMaxValue(VTSize); 7841 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 7842 dl, VT); 7843 Result = DAG.getSelectCC(dl, Hi, LowMask, 7844 DAG.getConstant(MaxVal, dl, VT), Result, 7845 ISD::SETUGT); 7846 7847 return Result; 7848 } 7849 7850 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 7851 // widened multiplication) aren't all ones or all zeroes. 7852 7853 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 7854 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 7855 7856 if (Scale == 0) { 7857 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 7858 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 7859 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 7860 // Saturated to SatMin if wide product is negative, and SatMax if wide 7861 // product is positive ... 7862 SDValue Zero = DAG.getConstant(0, dl, VT); 7863 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 7864 ISD::SETLT); 7865 // ... but only if we overflowed. 7866 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 7867 } 7868 7869 // We handled Scale==0 above so all the bits to examine is in Hi. 7870 7871 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 7872 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 7873 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 7874 dl, VT); 7875 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 7876 // Saturate to min if (Hi >> (Scale - 1)) < -1), 7877 // which is the same as if (HI < (-1 << (Scale - 1)) 7878 SDValue HighMask = 7879 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 7880 dl, VT); 7881 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 7882 return Result; 7883 } 7884 7885 SDValue 7886 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 7887 SDValue LHS, SDValue RHS, 7888 unsigned Scale, SelectionDAG &DAG) const { 7889 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 7890 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 7891 "Expected a fixed point division opcode"); 7892 7893 EVT VT = LHS.getValueType(); 7894 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 7895 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 7896 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7897 7898 // If there is enough room in the type to upscale the LHS or downscale the 7899 // RHS before the division, we can perform it in this type without having to 7900 // resize. For signed operations, the LHS headroom is the number of 7901 // redundant sign bits, and for unsigned ones it is the number of zeroes. 7902 // The headroom for the RHS is the number of trailing zeroes. 7903 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 7904 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 7905 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 7906 7907 // For signed saturating operations, we need to be able to detect true integer 7908 // division overflow; that is, when you have MIN / -EPS. However, this 7909 // is undefined behavior and if we emit divisions that could take such 7910 // values it may cause undesired behavior (arithmetic exceptions on x86, for 7911 // example). 7912 // Avoid this by requiring an extra bit so that we never get this case. 7913 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 7914 // signed saturating division, we need to emit a whopping 32-bit division. 7915 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 7916 return SDValue(); 7917 7918 unsigned LHSShift = std::min(LHSLead, Scale); 7919 unsigned RHSShift = Scale - LHSShift; 7920 7921 // At this point, we know that if we shift the LHS up by LHSShift and the 7922 // RHS down by RHSShift, we can emit a regular division with a final scaling 7923 // factor of Scale. 7924 7925 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7926 if (LHSShift) 7927 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 7928 DAG.getConstant(LHSShift, dl, ShiftTy)); 7929 if (RHSShift) 7930 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 7931 DAG.getConstant(RHSShift, dl, ShiftTy)); 7932 7933 SDValue Quot; 7934 if (Signed) { 7935 // For signed operations, if the resulting quotient is negative and the 7936 // remainder is nonzero, subtract 1 from the quotient to round towards 7937 // negative infinity. 7938 SDValue Rem; 7939 // FIXME: Ideally we would always produce an SDIVREM here, but if the 7940 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 7941 // we couldn't just form a libcall, but the type legalizer doesn't do it. 7942 if (isTypeLegal(VT) && 7943 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 7944 Quot = DAG.getNode(ISD::SDIVREM, dl, 7945 DAG.getVTList(VT, VT), 7946 LHS, RHS); 7947 Rem = Quot.getValue(1); 7948 Quot = Quot.getValue(0); 7949 } else { 7950 Quot = DAG.getNode(ISD::SDIV, dl, VT, 7951 LHS, RHS); 7952 Rem = DAG.getNode(ISD::SREM, dl, VT, 7953 LHS, RHS); 7954 } 7955 SDValue Zero = DAG.getConstant(0, dl, VT); 7956 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 7957 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 7958 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 7959 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 7960 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 7961 DAG.getConstant(1, dl, VT)); 7962 Quot = DAG.getSelect(dl, VT, 7963 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 7964 Sub1, Quot); 7965 } else 7966 Quot = DAG.getNode(ISD::UDIV, dl, VT, 7967 LHS, RHS); 7968 7969 return Quot; 7970 } 7971 7972 void TargetLowering::expandUADDSUBO( 7973 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7974 SDLoc dl(Node); 7975 SDValue LHS = Node->getOperand(0); 7976 SDValue RHS = Node->getOperand(1); 7977 bool IsAdd = Node->getOpcode() == ISD::UADDO; 7978 7979 // If ADD/SUBCARRY is legal, use that instead. 7980 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 7981 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 7982 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 7983 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 7984 { LHS, RHS, CarryIn }); 7985 Result = SDValue(NodeCarry.getNode(), 0); 7986 Overflow = SDValue(NodeCarry.getNode(), 1); 7987 return; 7988 } 7989 7990 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7991 LHS.getValueType(), LHS, RHS); 7992 7993 EVT ResultType = Node->getValueType(1); 7994 EVT SetCCType = getSetCCResultType( 7995 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7996 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 7997 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 7998 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7999 } 8000 8001 void TargetLowering::expandSADDSUBO( 8002 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 8003 SDLoc dl(Node); 8004 SDValue LHS = Node->getOperand(0); 8005 SDValue RHS = Node->getOperand(1); 8006 bool IsAdd = Node->getOpcode() == ISD::SADDO; 8007 8008 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 8009 LHS.getValueType(), LHS, RHS); 8010 8011 EVT ResultType = Node->getValueType(1); 8012 EVT OType = getSetCCResultType( 8013 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 8014 8015 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 8016 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 8017 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) { 8018 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 8019 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 8020 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 8021 return; 8022 } 8023 8024 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 8025 8026 // For an addition, the result should be less than one of the operands (LHS) 8027 // if and only if the other operand (RHS) is negative, otherwise there will 8028 // be overflow. 8029 // For a subtraction, the result should be less than one of the operands 8030 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 8031 // otherwise there will be overflow. 8032 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 8033 SDValue ConditionRHS = 8034 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 8035 8036 Overflow = DAG.getBoolExtOrTrunc( 8037 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 8038 ResultType, ResultType); 8039 } 8040 8041 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 8042 SDValue &Overflow, SelectionDAG &DAG) const { 8043 SDLoc dl(Node); 8044 EVT VT = Node->getValueType(0); 8045 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8046 SDValue LHS = Node->getOperand(0); 8047 SDValue RHS = Node->getOperand(1); 8048 bool isSigned = Node->getOpcode() == ISD::SMULO; 8049 8050 // For power-of-two multiplications we can use a simpler shift expansion. 8051 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 8052 const APInt &C = RHSC->getAPIntValue(); 8053 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 8054 if (C.isPowerOf2()) { 8055 // smulo(x, signed_min) is same as umulo(x, signed_min). 8056 bool UseArithShift = isSigned && !C.isMinSignedValue(); 8057 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8058 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 8059 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 8060 Overflow = DAG.getSetCC(dl, SetCCVT, 8061 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 8062 dl, VT, Result, ShiftAmt), 8063 LHS, ISD::SETNE); 8064 return true; 8065 } 8066 } 8067 8068 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 8069 if (VT.isVector()) 8070 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 8071 VT.getVectorNumElements()); 8072 8073 SDValue BottomHalf; 8074 SDValue TopHalf; 8075 static const unsigned Ops[2][3] = 8076 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 8077 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 8078 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 8079 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8080 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 8081 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 8082 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 8083 RHS); 8084 TopHalf = BottomHalf.getValue(1); 8085 } else if (isTypeLegal(WideVT)) { 8086 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 8087 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 8088 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 8089 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 8090 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 8091 getShiftAmountTy(WideVT, DAG.getDataLayout())); 8092 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 8093 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 8094 } else { 8095 if (VT.isVector()) 8096 return false; 8097 8098 // We can fall back to a libcall with an illegal type for the MUL if we 8099 // have a libcall big enough. 8100 // Also, we can fall back to a division in some cases, but that's a big 8101 // performance hit in the general case. 8102 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 8103 if (WideVT == MVT::i16) 8104 LC = RTLIB::MUL_I16; 8105 else if (WideVT == MVT::i32) 8106 LC = RTLIB::MUL_I32; 8107 else if (WideVT == MVT::i64) 8108 LC = RTLIB::MUL_I64; 8109 else if (WideVT == MVT::i128) 8110 LC = RTLIB::MUL_I128; 8111 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 8112 8113 SDValue HiLHS; 8114 SDValue HiRHS; 8115 if (isSigned) { 8116 // The high part is obtained by SRA'ing all but one of the bits of low 8117 // part. 8118 unsigned LoSize = VT.getFixedSizeInBits(); 8119 HiLHS = 8120 DAG.getNode(ISD::SRA, dl, VT, LHS, 8121 DAG.getConstant(LoSize - 1, dl, 8122 getPointerTy(DAG.getDataLayout()))); 8123 HiRHS = 8124 DAG.getNode(ISD::SRA, dl, VT, RHS, 8125 DAG.getConstant(LoSize - 1, dl, 8126 getPointerTy(DAG.getDataLayout()))); 8127 } else { 8128 HiLHS = DAG.getConstant(0, dl, VT); 8129 HiRHS = DAG.getConstant(0, dl, VT); 8130 } 8131 8132 // Here we're passing the 2 arguments explicitly as 4 arguments that are 8133 // pre-lowered to the correct types. This all depends upon WideVT not 8134 // being a legal type for the architecture and thus has to be split to 8135 // two arguments. 8136 SDValue Ret; 8137 TargetLowering::MakeLibCallOptions CallOptions; 8138 CallOptions.setSExt(isSigned); 8139 CallOptions.setIsPostTypeLegalization(true); 8140 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 8141 // Halves of WideVT are packed into registers in different order 8142 // depending on platform endianness. This is usually handled by 8143 // the C calling convention, but we can't defer to it in 8144 // the legalizer. 8145 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 8146 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8147 } else { 8148 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 8149 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8150 } 8151 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 8152 "Ret value is a collection of constituent nodes holding result."); 8153 if (DAG.getDataLayout().isLittleEndian()) { 8154 // Same as above. 8155 BottomHalf = Ret.getOperand(0); 8156 TopHalf = Ret.getOperand(1); 8157 } else { 8158 BottomHalf = Ret.getOperand(1); 8159 TopHalf = Ret.getOperand(0); 8160 } 8161 } 8162 8163 Result = BottomHalf; 8164 if (isSigned) { 8165 SDValue ShiftAmt = DAG.getConstant( 8166 VT.getScalarSizeInBits() - 1, dl, 8167 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 8168 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 8169 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 8170 } else { 8171 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 8172 DAG.getConstant(0, dl, VT), ISD::SETNE); 8173 } 8174 8175 // Truncate the result if SetCC returns a larger type than needed. 8176 EVT RType = Node->getValueType(1); 8177 if (RType.bitsLT(Overflow.getValueType())) 8178 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 8179 8180 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 8181 "Unexpected result type for S/UMULO legalization"); 8182 return true; 8183 } 8184 8185 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 8186 SDLoc dl(Node); 8187 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 8188 SDValue Op = Node->getOperand(0); 8189 EVT VT = Op.getValueType(); 8190 8191 if (VT.isScalableVector()) 8192 report_fatal_error( 8193 "Expanding reductions for scalable vectors is undefined."); 8194 8195 // Try to use a shuffle reduction for power of two vectors. 8196 if (VT.isPow2VectorType()) { 8197 while (VT.getVectorNumElements() > 1) { 8198 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 8199 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 8200 break; 8201 8202 SDValue Lo, Hi; 8203 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 8204 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 8205 VT = HalfVT; 8206 } 8207 } 8208 8209 EVT EltVT = VT.getVectorElementType(); 8210 unsigned NumElts = VT.getVectorNumElements(); 8211 8212 SmallVector<SDValue, 8> Ops; 8213 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 8214 8215 SDValue Res = Ops[0]; 8216 for (unsigned i = 1; i < NumElts; i++) 8217 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 8218 8219 // Result type may be wider than element type. 8220 if (EltVT != Node->getValueType(0)) 8221 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 8222 return Res; 8223 } 8224 8225 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 8226 SDLoc dl(Node); 8227 SDValue AccOp = Node->getOperand(0); 8228 SDValue VecOp = Node->getOperand(1); 8229 SDNodeFlags Flags = Node->getFlags(); 8230 8231 EVT VT = VecOp.getValueType(); 8232 EVT EltVT = VT.getVectorElementType(); 8233 8234 if (VT.isScalableVector()) 8235 report_fatal_error( 8236 "Expanding reductions for scalable vectors is undefined."); 8237 8238 unsigned NumElts = VT.getVectorNumElements(); 8239 8240 SmallVector<SDValue, 8> Ops; 8241 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 8242 8243 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 8244 8245 SDValue Res = AccOp; 8246 for (unsigned i = 0; i < NumElts; i++) 8247 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 8248 8249 return Res; 8250 } 8251 8252 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 8253 SelectionDAG &DAG) const { 8254 EVT VT = Node->getValueType(0); 8255 SDLoc dl(Node); 8256 bool isSigned = Node->getOpcode() == ISD::SREM; 8257 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 8258 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 8259 SDValue Dividend = Node->getOperand(0); 8260 SDValue Divisor = Node->getOperand(1); 8261 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 8262 SDVTList VTs = DAG.getVTList(VT, VT); 8263 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 8264 return true; 8265 } else if (isOperationLegalOrCustom(DivOpc, VT)) { 8266 // X % Y -> X-X/Y*Y 8267 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 8268 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 8269 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 8270 return true; 8271 } 8272 return false; 8273 } 8274 8275 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 8276 SelectionDAG &DAG) const { 8277 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 8278 SDLoc dl(SDValue(Node, 0)); 8279 SDValue Src = Node->getOperand(0); 8280 8281 // DstVT is the result type, while SatVT is the size to which we saturate 8282 EVT SrcVT = Src.getValueType(); 8283 EVT DstVT = Node->getValueType(0); 8284 8285 unsigned SatWidth = Node->getConstantOperandVal(1); 8286 unsigned DstWidth = DstVT.getScalarSizeInBits(); 8287 assert(SatWidth <= DstWidth && 8288 "Expected saturation width smaller than result width"); 8289 8290 // Determine minimum and maximum integer values and their corresponding 8291 // floating-point values. 8292 APInt MinInt, MaxInt; 8293 if (IsSigned) { 8294 MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth); 8295 MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth); 8296 } else { 8297 MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth); 8298 MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth); 8299 } 8300 8301 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 8302 // libcall emission cannot handle this. Large result types will fail. 8303 if (SrcVT == MVT::f16) { 8304 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 8305 SrcVT = Src.getValueType(); 8306 } 8307 8308 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 8309 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 8310 8311 APFloat::opStatus MinStatus = 8312 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 8313 APFloat::opStatus MaxStatus = 8314 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 8315 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 8316 !(MaxStatus & APFloat::opStatus::opInexact); 8317 8318 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 8319 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 8320 8321 // If the integer bounds are exactly representable as floats and min/max are 8322 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 8323 // of comparisons and selects. 8324 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 8325 isOperationLegal(ISD::FMAXNUM, SrcVT); 8326 if (AreExactFloatBounds && MinMaxLegal) { 8327 SDValue Clamped = Src; 8328 8329 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 8330 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 8331 // Clamp by MaxFloat from above. NaN cannot occur. 8332 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 8333 // Convert clamped value to integer. 8334 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 8335 dl, DstVT, Clamped); 8336 8337 // In the unsigned case we're done, because we mapped NaN to MinFloat, 8338 // which will cast to zero. 8339 if (!IsSigned) 8340 return FpToInt; 8341 8342 // Otherwise, select 0 if Src is NaN. 8343 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 8344 return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt, 8345 ISD::CondCode::SETUO); 8346 } 8347 8348 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 8349 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 8350 8351 // Result of direct conversion. The assumption here is that the operation is 8352 // non-trapping and it's fine to apply it to an out-of-range value if we 8353 // select it away later. 8354 SDValue FpToInt = 8355 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 8356 8357 SDValue Select = FpToInt; 8358 8359 // If Src ULT MinFloat, select MinInt. In particular, this also selects 8360 // MinInt if Src is NaN. 8361 Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select, 8362 ISD::CondCode::SETULT); 8363 // If Src OGT MaxFloat, select MaxInt. 8364 Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select, 8365 ISD::CondCode::SETOGT); 8366 8367 // In the unsigned case we are done, because we mapped NaN to MinInt, which 8368 // is already zero. 8369 if (!IsSigned) 8370 return Select; 8371 8372 // Otherwise, select 0 if Src is NaN. 8373 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 8374 return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO); 8375 } 8376