1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetLoweringObjectFile.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore NoAlias and NonNull because they don't affect the 61 // call sequence. 62 AttributeList CallerAttrs = F.getAttributes(); 63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) 64 .removeAttribute(Attribute::NoAlias) 65 .removeAttribute(Attribute::NonNull) 66 .hasAttributes()) 67 return false; 68 69 // It's not safe to eliminate the sign / zero extension of the return value. 70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || 71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 72 return false; 73 74 // Check if the only use is a function return node. 75 return isUsedByReturnOnly(Node, Chain); 76 } 77 78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 79 const uint32_t *CallerPreservedMask, 80 const SmallVectorImpl<CCValAssign> &ArgLocs, 81 const SmallVectorImpl<SDValue> &OutVals) const { 82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 83 const CCValAssign &ArgLoc = ArgLocs[I]; 84 if (!ArgLoc.isRegLoc()) 85 continue; 86 MCRegister Reg = ArgLoc.getLocReg(); 87 // Only look at callee saved registers. 88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 89 continue; 90 // Check that we pass the value used for the caller. 91 // (We look for a CopyFromReg reading a virtual register that is used 92 // for the function live-in value of register Reg) 93 SDValue Value = OutVals[I]; 94 if (Value->getOpcode() != ISD::CopyFromReg) 95 return false; 96 MCRegister ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 97 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 98 return false; 99 } 100 return true; 101 } 102 103 /// Set CallLoweringInfo attribute flags based on a call instruction 104 /// and called function attributes. 105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 106 unsigned ArgIdx) { 107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 113 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 114 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 115 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 116 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 117 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 118 Alignment = Call->getParamAlign(ArgIdx); 119 ByValType = nullptr; 120 if (IsByVal) 121 ByValType = Call->getParamByValType(ArgIdx); 122 PreallocatedType = nullptr; 123 if (IsPreallocated) 124 PreallocatedType = Call->getParamPreallocatedType(ArgIdx); 125 } 126 127 /// Generate a libcall taking the given operands as arguments and returning a 128 /// result of type RetVT. 129 std::pair<SDValue, SDValue> 130 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 131 ArrayRef<SDValue> Ops, 132 MakeLibCallOptions CallOptions, 133 const SDLoc &dl, 134 SDValue InChain) const { 135 if (!InChain) 136 InChain = DAG.getEntryNode(); 137 138 TargetLowering::ArgListTy Args; 139 Args.reserve(Ops.size()); 140 141 TargetLowering::ArgListEntry Entry; 142 for (unsigned i = 0; i < Ops.size(); ++i) { 143 SDValue NewOp = Ops[i]; 144 Entry.Node = NewOp; 145 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 146 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 147 CallOptions.IsSExt); 148 Entry.IsZExt = !Entry.IsSExt; 149 150 if (CallOptions.IsSoften && 151 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 152 Entry.IsSExt = Entry.IsZExt = false; 153 } 154 Args.push_back(Entry); 155 } 156 157 if (LC == RTLIB::UNKNOWN_LIBCALL) 158 report_fatal_error("Unsupported library call operation!"); 159 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 160 getPointerTy(DAG.getDataLayout())); 161 162 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 163 TargetLowering::CallLoweringInfo CLI(DAG); 164 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 165 bool zeroExtend = !signExtend; 166 167 if (CallOptions.IsSoften && 168 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 169 signExtend = zeroExtend = false; 170 } 171 172 CLI.setDebugLoc(dl) 173 .setChain(InChain) 174 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 175 .setNoReturn(CallOptions.DoesNotReturn) 176 .setDiscardResult(!CallOptions.IsReturnValueUsed) 177 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 178 .setSExtResult(signExtend) 179 .setZExtResult(zeroExtend); 180 return LowerCallTo(CLI); 181 } 182 183 bool TargetLowering::findOptimalMemOpLowering( 184 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 185 unsigned SrcAS, const AttributeList &FuncAttributes) const { 186 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 187 return false; 188 189 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 190 191 if (VT == MVT::Other) { 192 // Use the largest integer type whose alignment constraints are satisfied. 193 // We only need to check DstAlign here as SrcAlign is always greater or 194 // equal to DstAlign (or zero). 195 VT = MVT::i64; 196 if (Op.isFixedDstAlign()) 197 while ( 198 Op.getDstAlign() < (VT.getSizeInBits() / 8) && 199 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) 200 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 201 assert(VT.isInteger()); 202 203 // Find the largest legal integer type. 204 MVT LVT = MVT::i64; 205 while (!isTypeLegal(LVT)) 206 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 207 assert(LVT.isInteger()); 208 209 // If the type we've chosen is larger than the largest legal integer type 210 // then use that instead. 211 if (VT.bitsGT(LVT)) 212 VT = LVT; 213 } 214 215 unsigned NumMemOps = 0; 216 uint64_t Size = Op.size(); 217 while (Size) { 218 unsigned VTSize = VT.getSizeInBits() / 8; 219 while (VTSize > Size) { 220 // For now, only use non-vector load / store's for the left-over pieces. 221 EVT NewVT = VT; 222 unsigned NewVTSize; 223 224 bool Found = false; 225 if (VT.isVector() || VT.isFloatingPoint()) { 226 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 227 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 228 isSafeMemOpType(NewVT.getSimpleVT())) 229 Found = true; 230 else if (NewVT == MVT::i64 && 231 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 232 isSafeMemOpType(MVT::f64)) { 233 // i64 is usually not legal on 32-bit targets, but f64 may be. 234 NewVT = MVT::f64; 235 Found = true; 236 } 237 } 238 239 if (!Found) { 240 do { 241 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 242 if (NewVT == MVT::i8) 243 break; 244 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 245 } 246 NewVTSize = NewVT.getSizeInBits() / 8; 247 248 // If the new VT cannot cover all of the remaining bits, then consider 249 // issuing a (or a pair of) unaligned and overlapping load / store. 250 bool Fast; 251 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 252 allowsMisalignedMemoryAccesses( 253 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, 254 MachineMemOperand::MONone, &Fast) && 255 Fast) 256 VTSize = Size; 257 else { 258 VT = NewVT; 259 VTSize = NewVTSize; 260 } 261 } 262 263 if (++NumMemOps > Limit) 264 return false; 265 266 MemOps.push_back(VT); 267 Size -= VTSize; 268 } 269 270 return true; 271 } 272 273 /// Soften the operands of a comparison. This code is shared among BR_CC, 274 /// SELECT_CC, and SETCC handlers. 275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 276 SDValue &NewLHS, SDValue &NewRHS, 277 ISD::CondCode &CCCode, 278 const SDLoc &dl, const SDValue OldLHS, 279 const SDValue OldRHS) const { 280 SDValue Chain; 281 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 282 OldRHS, Chain); 283 } 284 285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 286 SDValue &NewLHS, SDValue &NewRHS, 287 ISD::CondCode &CCCode, 288 const SDLoc &dl, const SDValue OldLHS, 289 const SDValue OldRHS, 290 SDValue &Chain, 291 bool IsSignaling) const { 292 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 293 // not supporting it. We can update this code when libgcc provides such 294 // functions. 295 296 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 297 && "Unsupported setcc type!"); 298 299 // Expand into one or more soft-fp libcall(s). 300 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 301 bool ShouldInvertCC = false; 302 switch (CCCode) { 303 case ISD::SETEQ: 304 case ISD::SETOEQ: 305 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 306 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 307 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 308 break; 309 case ISD::SETNE: 310 case ISD::SETUNE: 311 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 312 (VT == MVT::f64) ? RTLIB::UNE_F64 : 313 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 314 break; 315 case ISD::SETGE: 316 case ISD::SETOGE: 317 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 318 (VT == MVT::f64) ? RTLIB::OGE_F64 : 319 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 320 break; 321 case ISD::SETLT: 322 case ISD::SETOLT: 323 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 324 (VT == MVT::f64) ? RTLIB::OLT_F64 : 325 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 326 break; 327 case ISD::SETLE: 328 case ISD::SETOLE: 329 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 330 (VT == MVT::f64) ? RTLIB::OLE_F64 : 331 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 332 break; 333 case ISD::SETGT: 334 case ISD::SETOGT: 335 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 336 (VT == MVT::f64) ? RTLIB::OGT_F64 : 337 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 338 break; 339 case ISD::SETO: 340 ShouldInvertCC = true; 341 LLVM_FALLTHROUGH; 342 case ISD::SETUO: 343 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 344 (VT == MVT::f64) ? RTLIB::UO_F64 : 345 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 346 break; 347 case ISD::SETONE: 348 // SETONE = O && UNE 349 ShouldInvertCC = true; 350 LLVM_FALLTHROUGH; 351 case ISD::SETUEQ: 352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 353 (VT == MVT::f64) ? RTLIB::UO_F64 : 354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 355 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 356 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 357 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 358 break; 359 default: 360 // Invert CC for unordered comparisons 361 ShouldInvertCC = true; 362 switch (CCCode) { 363 case ISD::SETULT: 364 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 365 (VT == MVT::f64) ? RTLIB::OGE_F64 : 366 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 367 break; 368 case ISD::SETULE: 369 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 370 (VT == MVT::f64) ? RTLIB::OGT_F64 : 371 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 372 break; 373 case ISD::SETUGT: 374 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 375 (VT == MVT::f64) ? RTLIB::OLE_F64 : 376 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 377 break; 378 case ISD::SETUGE: 379 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 380 (VT == MVT::f64) ? RTLIB::OLT_F64 : 381 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 382 break; 383 default: llvm_unreachable("Do not know how to soften this setcc!"); 384 } 385 } 386 387 // Use the target specific return value for comparions lib calls. 388 EVT RetVT = getCmpLibcallReturnType(); 389 SDValue Ops[2] = {NewLHS, NewRHS}; 390 TargetLowering::MakeLibCallOptions CallOptions; 391 EVT OpsVT[2] = { OldLHS.getValueType(), 392 OldRHS.getValueType() }; 393 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 394 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 395 NewLHS = Call.first; 396 NewRHS = DAG.getConstant(0, dl, RetVT); 397 398 CCCode = getCmpLibcallCC(LC1); 399 if (ShouldInvertCC) { 400 assert(RetVT.isInteger()); 401 CCCode = getSetCCInverse(CCCode, RetVT); 402 } 403 404 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 405 // Update Chain. 406 Chain = Call.second; 407 } else { 408 EVT SetCCVT = 409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 410 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 411 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 412 CCCode = getCmpLibcallCC(LC2); 413 if (ShouldInvertCC) 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 416 if (Chain) 417 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 418 Call2.second); 419 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 420 Tmp.getValueType(), Tmp, NewLHS); 421 NewRHS = SDValue(); 422 } 423 } 424 425 /// Return the entry encoding for a jump table in the current function. The 426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 427 unsigned TargetLowering::getJumpTableEncoding() const { 428 // In non-pic modes, just use the address of a block. 429 if (!isPositionIndependent()) 430 return MachineJumpTableInfo::EK_BlockAddress; 431 432 // In PIC mode, if the target supports a GPRel32 directive, use it. 433 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 434 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 435 436 // Otherwise, use a label difference. 437 return MachineJumpTableInfo::EK_LabelDifference32; 438 } 439 440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 441 SelectionDAG &DAG) const { 442 // If our PIC model is GP relative, use the global offset table as the base. 443 unsigned JTEncoding = getJumpTableEncoding(); 444 445 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 446 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 447 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 448 449 return Table; 450 } 451 452 /// This returns the relocation base for the given PIC jumptable, the same as 453 /// getPICJumpTableRelocBase, but as an MCExpr. 454 const MCExpr * 455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 456 unsigned JTI,MCContext &Ctx) const{ 457 // The normal PIC reloc base is the label at the start of the jump table. 458 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 459 } 460 461 bool 462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 463 const TargetMachine &TM = getTargetMachine(); 464 const GlobalValue *GV = GA->getGlobal(); 465 466 // If the address is not even local to this DSO we will have to load it from 467 // a got and then add the offset. 468 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 469 return false; 470 471 // If the code is position independent we will have to add a base register. 472 if (isPositionIndependent()) 473 return false; 474 475 // Otherwise we can do it. 476 return true; 477 } 478 479 //===----------------------------------------------------------------------===// 480 // Optimization Methods 481 //===----------------------------------------------------------------------===// 482 483 /// If the specified instruction has a constant integer operand and there are 484 /// bits set in that constant that are not demanded, then clear those bits and 485 /// return true. 486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded, 487 TargetLoweringOpt &TLO) const { 488 SDLoc DL(Op); 489 unsigned Opcode = Op.getOpcode(); 490 491 // Do target-specific constant optimization. 492 if (targetShrinkDemandedConstant(Op, Demanded, TLO)) 493 return TLO.New.getNode(); 494 495 // FIXME: ISD::SELECT, ISD::SELECT_CC 496 switch (Opcode) { 497 default: 498 break; 499 case ISD::XOR: 500 case ISD::AND: 501 case ISD::OR: { 502 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 503 if (!Op1C) 504 return false; 505 506 // If this is a 'not' op, don't touch it because that's a canonical form. 507 const APInt &C = Op1C->getAPIntValue(); 508 if (Opcode == ISD::XOR && Demanded.isSubsetOf(C)) 509 return false; 510 511 if (!C.isSubsetOf(Demanded)) { 512 EVT VT = Op.getValueType(); 513 SDValue NewC = TLO.DAG.getConstant(Demanded & C, DL, VT); 514 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 515 return TLO.CombineTo(Op, NewOp); 516 } 517 518 break; 519 } 520 } 521 522 return false; 523 } 524 525 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 526 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 527 /// generalized for targets with other types of implicit widening casts. 528 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 529 const APInt &Demanded, 530 TargetLoweringOpt &TLO) const { 531 assert(Op.getNumOperands() == 2 && 532 "ShrinkDemandedOp only supports binary operators!"); 533 assert(Op.getNode()->getNumValues() == 1 && 534 "ShrinkDemandedOp only supports nodes with one result!"); 535 536 SelectionDAG &DAG = TLO.DAG; 537 SDLoc dl(Op); 538 539 // Early return, as this function cannot handle vector types. 540 if (Op.getValueType().isVector()) 541 return false; 542 543 // Don't do this if the node has another user, which may require the 544 // full value. 545 if (!Op.getNode()->hasOneUse()) 546 return false; 547 548 // Search for the smallest integer type with free casts to and from 549 // Op's type. For expedience, just check power-of-2 integer types. 550 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 551 unsigned DemandedSize = Demanded.getActiveBits(); 552 unsigned SmallVTBits = DemandedSize; 553 if (!isPowerOf2_32(SmallVTBits)) 554 SmallVTBits = NextPowerOf2(SmallVTBits); 555 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 556 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 557 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 558 TLI.isZExtFree(SmallVT, Op.getValueType())) { 559 // We found a type with free casts. 560 SDValue X = DAG.getNode( 561 Op.getOpcode(), dl, SmallVT, 562 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 563 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 564 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 565 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 566 return TLO.CombineTo(Op, Z); 567 } 568 } 569 return false; 570 } 571 572 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 573 DAGCombinerInfo &DCI) const { 574 SelectionDAG &DAG = DCI.DAG; 575 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 576 !DCI.isBeforeLegalizeOps()); 577 KnownBits Known; 578 579 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 580 if (Simplified) { 581 DCI.AddToWorklist(Op.getNode()); 582 DCI.CommitTargetLoweringOpt(TLO); 583 } 584 return Simplified; 585 } 586 587 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 588 KnownBits &Known, 589 TargetLoweringOpt &TLO, 590 unsigned Depth, 591 bool AssumeSingleUse) const { 592 EVT VT = Op.getValueType(); 593 594 // TODO: We can probably do more work on calculating the known bits and 595 // simplifying the operations for scalable vectors, but for now we just 596 // bail out. 597 if (VT.isScalableVector()) { 598 // Pretend we don't know anything for now. 599 Known = KnownBits(DemandedBits.getBitWidth()); 600 return false; 601 } 602 603 APInt DemandedElts = VT.isVector() 604 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 605 : APInt(1, 1); 606 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 607 AssumeSingleUse); 608 } 609 610 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 611 // TODO: Under what circumstances can we create nodes? Constant folding? 612 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 613 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 614 SelectionDAG &DAG, unsigned Depth) const { 615 // Limit search depth. 616 if (Depth >= SelectionDAG::MaxRecursionDepth) 617 return SDValue(); 618 619 // Ignore UNDEFs. 620 if (Op.isUndef()) 621 return SDValue(); 622 623 // Not demanding any bits/elts from Op. 624 if (DemandedBits == 0 || DemandedElts == 0) 625 return DAG.getUNDEF(Op.getValueType()); 626 627 unsigned NumElts = DemandedElts.getBitWidth(); 628 unsigned BitWidth = DemandedBits.getBitWidth(); 629 KnownBits LHSKnown, RHSKnown; 630 switch (Op.getOpcode()) { 631 case ISD::BITCAST: { 632 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 633 EVT SrcVT = Src.getValueType(); 634 EVT DstVT = Op.getValueType(); 635 if (SrcVT == DstVT) 636 return Src; 637 638 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 639 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 640 if (NumSrcEltBits == NumDstEltBits) 641 if (SDValue V = SimplifyMultipleUseDemandedBits( 642 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 643 return DAG.getBitcast(DstVT, V); 644 645 // TODO - bigendian once we have test coverage. 646 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 && 647 DAG.getDataLayout().isLittleEndian()) { 648 unsigned Scale = NumDstEltBits / NumSrcEltBits; 649 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 650 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 651 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 652 for (unsigned i = 0; i != Scale; ++i) { 653 unsigned Offset = i * NumSrcEltBits; 654 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 655 if (!Sub.isNullValue()) { 656 DemandedSrcBits |= Sub; 657 for (unsigned j = 0; j != NumElts; ++j) 658 if (DemandedElts[j]) 659 DemandedSrcElts.setBit((j * Scale) + i); 660 } 661 } 662 663 if (SDValue V = SimplifyMultipleUseDemandedBits( 664 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 665 return DAG.getBitcast(DstVT, V); 666 } 667 668 // TODO - bigendian once we have test coverage. 669 if ((NumSrcEltBits % NumDstEltBits) == 0 && 670 DAG.getDataLayout().isLittleEndian()) { 671 unsigned Scale = NumSrcEltBits / NumDstEltBits; 672 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 673 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 674 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 675 for (unsigned i = 0; i != NumElts; ++i) 676 if (DemandedElts[i]) { 677 unsigned Offset = (i % Scale) * NumDstEltBits; 678 DemandedSrcBits.insertBits(DemandedBits, Offset); 679 DemandedSrcElts.setBit(i / Scale); 680 } 681 682 if (SDValue V = SimplifyMultipleUseDemandedBits( 683 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 684 return DAG.getBitcast(DstVT, V); 685 } 686 687 break; 688 } 689 case ISD::AND: { 690 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 691 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 692 693 // If all of the demanded bits are known 1 on one side, return the other. 694 // These bits cannot contribute to the result of the 'and' in this 695 // context. 696 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 697 return Op.getOperand(0); 698 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 699 return Op.getOperand(1); 700 break; 701 } 702 case ISD::OR: { 703 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 704 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 705 706 // If all of the demanded bits are known zero on one side, return the 707 // other. These bits cannot contribute to the result of the 'or' in this 708 // context. 709 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 710 return Op.getOperand(0); 711 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 712 return Op.getOperand(1); 713 break; 714 } 715 case ISD::XOR: { 716 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 717 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 718 719 // If all of the demanded bits are known zero on one side, return the 720 // other. 721 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 722 return Op.getOperand(0); 723 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 724 return Op.getOperand(1); 725 break; 726 } 727 case ISD::SHL: { 728 // If we are only demanding sign bits then we can use the shift source 729 // directly. 730 if (const APInt *MaxSA = 731 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 732 SDValue Op0 = Op.getOperand(0); 733 unsigned ShAmt = MaxSA->getZExtValue(); 734 unsigned NumSignBits = 735 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 736 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 737 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 738 return Op0; 739 } 740 break; 741 } 742 case ISD::SETCC: { 743 SDValue Op0 = Op.getOperand(0); 744 SDValue Op1 = Op.getOperand(1); 745 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 746 // If (1) we only need the sign-bit, (2) the setcc operands are the same 747 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 748 // -1, we may be able to bypass the setcc. 749 if (DemandedBits.isSignMask() && 750 Op0.getScalarValueSizeInBits() == BitWidth && 751 getBooleanContents(Op0.getValueType()) == 752 BooleanContent::ZeroOrNegativeOneBooleanContent) { 753 // If we're testing X < 0, then this compare isn't needed - just use X! 754 // FIXME: We're limiting to integer types here, but this should also work 755 // if we don't care about FP signed-zero. The use of SETLT with FP means 756 // that we don't care about NaNs. 757 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 758 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 759 return Op0; 760 } 761 break; 762 } 763 case ISD::SIGN_EXTEND_INREG: { 764 // If none of the extended bits are demanded, eliminate the sextinreg. 765 SDValue Op0 = Op.getOperand(0); 766 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 767 unsigned ExBits = ExVT.getScalarSizeInBits(); 768 if (DemandedBits.getActiveBits() <= ExBits) 769 return Op0; 770 // If the input is already sign extended, just drop the extension. 771 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 772 if (NumSignBits >= (BitWidth - ExBits + 1)) 773 return Op0; 774 break; 775 } 776 case ISD::ANY_EXTEND_VECTOR_INREG: 777 case ISD::SIGN_EXTEND_VECTOR_INREG: 778 case ISD::ZERO_EXTEND_VECTOR_INREG: { 779 // If we only want the lowest element and none of extended bits, then we can 780 // return the bitcasted source vector. 781 SDValue Src = Op.getOperand(0); 782 EVT SrcVT = Src.getValueType(); 783 EVT DstVT = Op.getValueType(); 784 if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 785 DAG.getDataLayout().isLittleEndian() && 786 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 787 return DAG.getBitcast(DstVT, Src); 788 } 789 break; 790 } 791 case ISD::INSERT_VECTOR_ELT: { 792 // If we don't demand the inserted element, return the base vector. 793 SDValue Vec = Op.getOperand(0); 794 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 795 EVT VecVT = Vec.getValueType(); 796 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 797 !DemandedElts[CIdx->getZExtValue()]) 798 return Vec; 799 break; 800 } 801 case ISD::INSERT_SUBVECTOR: { 802 // If we don't demand the inserted subvector, return the base vector. 803 SDValue Vec = Op.getOperand(0); 804 SDValue Sub = Op.getOperand(1); 805 uint64_t Idx = Op.getConstantOperandVal(2); 806 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 807 if (DemandedElts.extractBits(NumSubElts, Idx) == 0) 808 return Vec; 809 break; 810 } 811 case ISD::VECTOR_SHUFFLE: { 812 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 813 814 // If all the demanded elts are from one operand and are inline, 815 // then we can use the operand directly. 816 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 817 for (unsigned i = 0; i != NumElts; ++i) { 818 int M = ShuffleMask[i]; 819 if (M < 0 || !DemandedElts[i]) 820 continue; 821 AllUndef = false; 822 IdentityLHS &= (M == (int)i); 823 IdentityRHS &= ((M - NumElts) == i); 824 } 825 826 if (AllUndef) 827 return DAG.getUNDEF(Op.getValueType()); 828 if (IdentityLHS) 829 return Op.getOperand(0); 830 if (IdentityRHS) 831 return Op.getOperand(1); 832 break; 833 } 834 default: 835 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 836 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 837 Op, DemandedBits, DemandedElts, DAG, Depth)) 838 return V; 839 break; 840 } 841 return SDValue(); 842 } 843 844 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 845 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 846 unsigned Depth) const { 847 EVT VT = Op.getValueType(); 848 APInt DemandedElts = VT.isVector() 849 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 850 : APInt(1, 1); 851 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 852 Depth); 853 } 854 855 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 856 /// result of Op are ever used downstream. If we can use this information to 857 /// simplify Op, create a new simplified DAG node and return true, returning the 858 /// original and new nodes in Old and New. Otherwise, analyze the expression and 859 /// return a mask of Known bits for the expression (used to simplify the 860 /// caller). The Known bits may only be accurate for those bits in the 861 /// OriginalDemandedBits and OriginalDemandedElts. 862 bool TargetLowering::SimplifyDemandedBits( 863 SDValue Op, const APInt &OriginalDemandedBits, 864 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 865 unsigned Depth, bool AssumeSingleUse) const { 866 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 867 assert(Op.getScalarValueSizeInBits() == BitWidth && 868 "Mask size mismatches value type size!"); 869 870 // Don't know anything. 871 Known = KnownBits(BitWidth); 872 873 // TODO: We can probably do more work on calculating the known bits and 874 // simplifying the operations for scalable vectors, but for now we just 875 // bail out. 876 if (Op.getValueType().isScalableVector()) 877 return false; 878 879 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 880 assert((!Op.getValueType().isVector() || 881 NumElts == Op.getValueType().getVectorNumElements()) && 882 "Unexpected vector size"); 883 884 APInt DemandedBits = OriginalDemandedBits; 885 APInt DemandedElts = OriginalDemandedElts; 886 SDLoc dl(Op); 887 auto &DL = TLO.DAG.getDataLayout(); 888 889 // Undef operand. 890 if (Op.isUndef()) 891 return false; 892 893 if (Op.getOpcode() == ISD::Constant) { 894 // We know all of the bits for a constant! 895 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 896 Known.Zero = ~Known.One; 897 return false; 898 } 899 900 // Other users may use these bits. 901 EVT VT = Op.getValueType(); 902 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 903 if (Depth != 0) { 904 // If not at the root, Just compute the Known bits to 905 // simplify things downstream. 906 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 907 return false; 908 } 909 // If this is the root being simplified, allow it to have multiple uses, 910 // just set the DemandedBits/Elts to all bits. 911 DemandedBits = APInt::getAllOnesValue(BitWidth); 912 DemandedElts = APInt::getAllOnesValue(NumElts); 913 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 914 // Not demanding any bits/elts from Op. 915 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 916 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 917 // Limit search depth. 918 return false; 919 } 920 921 KnownBits Known2; 922 switch (Op.getOpcode()) { 923 case ISD::TargetConstant: 924 llvm_unreachable("Can't simplify this node"); 925 case ISD::SCALAR_TO_VECTOR: { 926 if (!DemandedElts[0]) 927 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 928 929 KnownBits SrcKnown; 930 SDValue Src = Op.getOperand(0); 931 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 932 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 933 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 934 return true; 935 936 // Upper elements are undef, so only get the knownbits if we just demand 937 // the bottom element. 938 if (DemandedElts == 1) 939 Known = SrcKnown.anyextOrTrunc(BitWidth); 940 break; 941 } 942 case ISD::BUILD_VECTOR: 943 // Collect the known bits that are shared by every demanded element. 944 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 945 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 946 return false; // Don't fall through, will infinitely loop. 947 case ISD::LOAD: { 948 LoadSDNode *LD = cast<LoadSDNode>(Op); 949 if (getTargetConstantFromLoad(LD)) { 950 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 951 return false; // Don't fall through, will infinitely loop. 952 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 953 // If this is a ZEXTLoad and we are looking at the loaded value. 954 EVT MemVT = LD->getMemoryVT(); 955 unsigned MemBits = MemVT.getScalarSizeInBits(); 956 Known.Zero.setBitsFrom(MemBits); 957 return false; // Don't fall through, will infinitely loop. 958 } 959 break; 960 } 961 case ISD::INSERT_VECTOR_ELT: { 962 SDValue Vec = Op.getOperand(0); 963 SDValue Scl = Op.getOperand(1); 964 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 965 EVT VecVT = Vec.getValueType(); 966 967 // If index isn't constant, assume we need all vector elements AND the 968 // inserted element. 969 APInt DemandedVecElts(DemandedElts); 970 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 971 unsigned Idx = CIdx->getZExtValue(); 972 DemandedVecElts.clearBit(Idx); 973 974 // Inserted element is not required. 975 if (!DemandedElts[Idx]) 976 return TLO.CombineTo(Op, Vec); 977 } 978 979 KnownBits KnownScl; 980 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 981 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 982 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 983 return true; 984 985 Known = KnownScl.anyextOrTrunc(BitWidth); 986 987 KnownBits KnownVec; 988 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 989 Depth + 1)) 990 return true; 991 992 if (!!DemandedVecElts) { 993 Known.One &= KnownVec.One; 994 Known.Zero &= KnownVec.Zero; 995 } 996 997 return false; 998 } 999 case ISD::INSERT_SUBVECTOR: { 1000 // Demand any elements from the subvector and the remainder from the src its 1001 // inserted into. 1002 SDValue Src = Op.getOperand(0); 1003 SDValue Sub = Op.getOperand(1); 1004 uint64_t Idx = Op.getConstantOperandVal(2); 1005 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1006 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1007 APInt DemandedSrcElts = DemandedElts; 1008 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 1009 1010 KnownBits KnownSub, KnownSrc; 1011 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1012 Depth + 1)) 1013 return true; 1014 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1015 Depth + 1)) 1016 return true; 1017 1018 Known.Zero.setAllBits(); 1019 Known.One.setAllBits(); 1020 if (!!DemandedSubElts) { 1021 Known.One &= KnownSub.One; 1022 Known.Zero &= KnownSub.Zero; 1023 } 1024 if (!!DemandedSrcElts) { 1025 Known.One &= KnownSrc.One; 1026 Known.Zero &= KnownSrc.Zero; 1027 } 1028 1029 // Attempt to avoid multi-use src if we don't need anything from it. 1030 if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() || 1031 !DemandedSrcElts.isAllOnesValue()) { 1032 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1033 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1034 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1035 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1036 if (NewSub || NewSrc) { 1037 NewSub = NewSub ? NewSub : Sub; 1038 NewSrc = NewSrc ? NewSrc : Src; 1039 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1040 Op.getOperand(2)); 1041 return TLO.CombineTo(Op, NewOp); 1042 } 1043 } 1044 break; 1045 } 1046 case ISD::EXTRACT_SUBVECTOR: { 1047 // Offset the demanded elts by the subvector index. 1048 SDValue Src = Op.getOperand(0); 1049 uint64_t Idx = Op.getConstantOperandVal(1); 1050 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1051 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1052 1053 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1054 Depth + 1)) 1055 return true; 1056 1057 // Attempt to avoid multi-use src if we don't need anything from it. 1058 if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) { 1059 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1060 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1061 if (DemandedSrc) { 1062 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1063 Op.getOperand(1)); 1064 return TLO.CombineTo(Op, NewOp); 1065 } 1066 } 1067 break; 1068 } 1069 case ISD::CONCAT_VECTORS: { 1070 Known.Zero.setAllBits(); 1071 Known.One.setAllBits(); 1072 EVT SubVT = Op.getOperand(0).getValueType(); 1073 unsigned NumSubVecs = Op.getNumOperands(); 1074 unsigned NumSubElts = SubVT.getVectorNumElements(); 1075 for (unsigned i = 0; i != NumSubVecs; ++i) { 1076 APInt DemandedSubElts = 1077 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1078 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1079 Known2, TLO, Depth + 1)) 1080 return true; 1081 // Known bits are shared by every demanded subvector element. 1082 if (!!DemandedSubElts) { 1083 Known.One &= Known2.One; 1084 Known.Zero &= Known2.Zero; 1085 } 1086 } 1087 break; 1088 } 1089 case ISD::VECTOR_SHUFFLE: { 1090 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1091 1092 // Collect demanded elements from shuffle operands.. 1093 APInt DemandedLHS(NumElts, 0); 1094 APInt DemandedRHS(NumElts, 0); 1095 for (unsigned i = 0; i != NumElts; ++i) { 1096 if (!DemandedElts[i]) 1097 continue; 1098 int M = ShuffleMask[i]; 1099 if (M < 0) { 1100 // For UNDEF elements, we don't know anything about the common state of 1101 // the shuffle result. 1102 DemandedLHS.clearAllBits(); 1103 DemandedRHS.clearAllBits(); 1104 break; 1105 } 1106 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1107 if (M < (int)NumElts) 1108 DemandedLHS.setBit(M); 1109 else 1110 DemandedRHS.setBit(M - NumElts); 1111 } 1112 1113 if (!!DemandedLHS || !!DemandedRHS) { 1114 SDValue Op0 = Op.getOperand(0); 1115 SDValue Op1 = Op.getOperand(1); 1116 1117 Known.Zero.setAllBits(); 1118 Known.One.setAllBits(); 1119 if (!!DemandedLHS) { 1120 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1121 Depth + 1)) 1122 return true; 1123 Known.One &= Known2.One; 1124 Known.Zero &= Known2.Zero; 1125 } 1126 if (!!DemandedRHS) { 1127 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1128 Depth + 1)) 1129 return true; 1130 Known.One &= Known2.One; 1131 Known.Zero &= Known2.Zero; 1132 } 1133 1134 // Attempt to avoid multi-use ops if we don't need anything from them. 1135 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1136 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1137 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1138 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1139 if (DemandedOp0 || DemandedOp1) { 1140 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1141 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1142 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1143 return TLO.CombineTo(Op, NewOp); 1144 } 1145 } 1146 break; 1147 } 1148 case ISD::AND: { 1149 SDValue Op0 = Op.getOperand(0); 1150 SDValue Op1 = Op.getOperand(1); 1151 1152 // If the RHS is a constant, check to see if the LHS would be zero without 1153 // using the bits from the RHS. Below, we use knowledge about the RHS to 1154 // simplify the LHS, here we're using information from the LHS to simplify 1155 // the RHS. 1156 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1157 // Do not increment Depth here; that can cause an infinite loop. 1158 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1159 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1160 if ((LHSKnown.Zero & DemandedBits) == 1161 (~RHSC->getAPIntValue() & DemandedBits)) 1162 return TLO.CombineTo(Op, Op0); 1163 1164 // If any of the set bits in the RHS are known zero on the LHS, shrink 1165 // the constant. 1166 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, TLO)) 1167 return true; 1168 1169 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1170 // constant, but if this 'and' is only clearing bits that were just set by 1171 // the xor, then this 'and' can be eliminated by shrinking the mask of 1172 // the xor. For example, for a 32-bit X: 1173 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1174 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1175 LHSKnown.One == ~RHSC->getAPIntValue()) { 1176 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1177 return TLO.CombineTo(Op, Xor); 1178 } 1179 } 1180 1181 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1182 Depth + 1)) 1183 return true; 1184 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1185 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1186 Known2, TLO, Depth + 1)) 1187 return true; 1188 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1189 1190 // Attempt to avoid multi-use ops if we don't need anything from them. 1191 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1192 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1193 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1194 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1195 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1196 if (DemandedOp0 || DemandedOp1) { 1197 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1198 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1199 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1200 return TLO.CombineTo(Op, NewOp); 1201 } 1202 } 1203 1204 // If all of the demanded bits are known one on one side, return the other. 1205 // These bits cannot contribute to the result of the 'and'. 1206 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1207 return TLO.CombineTo(Op, Op0); 1208 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1209 return TLO.CombineTo(Op, Op1); 1210 // If all of the demanded bits in the inputs are known zeros, return zero. 1211 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1212 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1213 // If the RHS is a constant, see if we can simplify it. 1214 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, TLO)) 1215 return true; 1216 // If the operation can be done in a smaller type, do so. 1217 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1218 return true; 1219 1220 Known &= Known2; 1221 break; 1222 } 1223 case ISD::OR: { 1224 SDValue Op0 = Op.getOperand(0); 1225 SDValue Op1 = Op.getOperand(1); 1226 1227 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1228 Depth + 1)) 1229 return true; 1230 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1231 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1232 Known2, TLO, Depth + 1)) 1233 return true; 1234 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1235 1236 // Attempt to avoid multi-use ops if we don't need anything from them. 1237 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1238 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1239 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1240 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1241 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1242 if (DemandedOp0 || DemandedOp1) { 1243 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1244 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1245 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1246 return TLO.CombineTo(Op, NewOp); 1247 } 1248 } 1249 1250 // If all of the demanded bits are known zero on one side, return the other. 1251 // These bits cannot contribute to the result of the 'or'. 1252 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1253 return TLO.CombineTo(Op, Op0); 1254 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1255 return TLO.CombineTo(Op, Op1); 1256 // If the RHS is a constant, see if we can simplify it. 1257 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1258 return true; 1259 // If the operation can be done in a smaller type, do so. 1260 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1261 return true; 1262 1263 Known |= Known2; 1264 break; 1265 } 1266 case ISD::XOR: { 1267 SDValue Op0 = Op.getOperand(0); 1268 SDValue Op1 = Op.getOperand(1); 1269 1270 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1271 Depth + 1)) 1272 return true; 1273 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1274 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1275 Depth + 1)) 1276 return true; 1277 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1278 1279 // Attempt to avoid multi-use ops if we don't need anything from them. 1280 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1281 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1282 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1283 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1284 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1285 if (DemandedOp0 || DemandedOp1) { 1286 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1287 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1288 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1289 return TLO.CombineTo(Op, NewOp); 1290 } 1291 } 1292 1293 // If all of the demanded bits are known zero on one side, return the other. 1294 // These bits cannot contribute to the result of the 'xor'. 1295 if (DemandedBits.isSubsetOf(Known.Zero)) 1296 return TLO.CombineTo(Op, Op0); 1297 if (DemandedBits.isSubsetOf(Known2.Zero)) 1298 return TLO.CombineTo(Op, Op1); 1299 // If the operation can be done in a smaller type, do so. 1300 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1301 return true; 1302 1303 // If all of the unknown bits are known to be zero on one side or the other 1304 // (but not both) turn this into an *inclusive* or. 1305 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1306 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1307 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1308 1309 if (ConstantSDNode *C = isConstOrConstSplat(Op1)) { 1310 // If one side is a constant, and all of the known set bits on the other 1311 // side are also set in the constant, turn this into an AND, as we know 1312 // the bits will be cleared. 1313 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1314 // NB: it is okay if more bits are known than are requested 1315 if (C->getAPIntValue() == Known2.One) { 1316 SDValue ANDC = 1317 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1318 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1319 } 1320 1321 // If the RHS is a constant, see if we can change it. Don't alter a -1 1322 // constant because that's a 'not' op, and that is better for combining 1323 // and codegen. 1324 if (!C->isAllOnesValue()) { 1325 if (DemandedBits.isSubsetOf(C->getAPIntValue())) { 1326 // We're flipping all demanded bits. Flip the undemanded bits too. 1327 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1328 return TLO.CombineTo(Op, New); 1329 } 1330 // If we can't turn this into a 'not', try to shrink the constant. 1331 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1332 return true; 1333 } 1334 } 1335 1336 Known ^= Known2; 1337 break; 1338 } 1339 case ISD::SELECT: 1340 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1341 Depth + 1)) 1342 return true; 1343 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1344 Depth + 1)) 1345 return true; 1346 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1347 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1348 1349 // If the operands are constants, see if we can simplify them. 1350 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1351 return true; 1352 1353 // Only known if known in both the LHS and RHS. 1354 Known.One &= Known2.One; 1355 Known.Zero &= Known2.Zero; 1356 break; 1357 case ISD::SELECT_CC: 1358 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1359 Depth + 1)) 1360 return true; 1361 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1362 Depth + 1)) 1363 return true; 1364 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1365 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1366 1367 // If the operands are constants, see if we can simplify them. 1368 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1369 return true; 1370 1371 // Only known if known in both the LHS and RHS. 1372 Known.One &= Known2.One; 1373 Known.Zero &= Known2.Zero; 1374 break; 1375 case ISD::SETCC: { 1376 SDValue Op0 = Op.getOperand(0); 1377 SDValue Op1 = Op.getOperand(1); 1378 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1379 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1380 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1381 // -1, we may be able to bypass the setcc. 1382 if (DemandedBits.isSignMask() && 1383 Op0.getScalarValueSizeInBits() == BitWidth && 1384 getBooleanContents(Op0.getValueType()) == 1385 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1386 // If we're testing X < 0, then this compare isn't needed - just use X! 1387 // FIXME: We're limiting to integer types here, but this should also work 1388 // if we don't care about FP signed-zero. The use of SETLT with FP means 1389 // that we don't care about NaNs. 1390 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1391 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1392 return TLO.CombineTo(Op, Op0); 1393 1394 // TODO: Should we check for other forms of sign-bit comparisons? 1395 // Examples: X <= -1, X >= 0 1396 } 1397 if (getBooleanContents(Op0.getValueType()) == 1398 TargetLowering::ZeroOrOneBooleanContent && 1399 BitWidth > 1) 1400 Known.Zero.setBitsFrom(1); 1401 break; 1402 } 1403 case ISD::SHL: { 1404 SDValue Op0 = Op.getOperand(0); 1405 SDValue Op1 = Op.getOperand(1); 1406 EVT ShiftVT = Op1.getValueType(); 1407 1408 if (const APInt *SA = 1409 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1410 unsigned ShAmt = SA->getZExtValue(); 1411 if (ShAmt == 0) 1412 return TLO.CombineTo(Op, Op0); 1413 1414 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1415 // single shift. We can do this if the bottom bits (which are shifted 1416 // out) are never demanded. 1417 // TODO - support non-uniform vector amounts. 1418 if (Op0.getOpcode() == ISD::SRL) { 1419 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1420 if (const APInt *SA2 = 1421 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1422 unsigned C1 = SA2->getZExtValue(); 1423 unsigned Opc = ISD::SHL; 1424 int Diff = ShAmt - C1; 1425 if (Diff < 0) { 1426 Diff = -Diff; 1427 Opc = ISD::SRL; 1428 } 1429 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1430 return TLO.CombineTo( 1431 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1432 } 1433 } 1434 } 1435 1436 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1437 // are not demanded. This will likely allow the anyext to be folded away. 1438 // TODO - support non-uniform vector amounts. 1439 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1440 SDValue InnerOp = Op0.getOperand(0); 1441 EVT InnerVT = InnerOp.getValueType(); 1442 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1443 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1444 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1445 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1446 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1447 ShTy = InnerVT; 1448 SDValue NarrowShl = 1449 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1450 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1451 return TLO.CombineTo( 1452 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1453 } 1454 1455 // Repeat the SHL optimization above in cases where an extension 1456 // intervenes: (shl (anyext (shr x, c1)), c2) to 1457 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1458 // aren't demanded (as above) and that the shifted upper c1 bits of 1459 // x aren't demanded. 1460 // TODO - support non-uniform vector amounts. 1461 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1462 InnerOp.hasOneUse()) { 1463 if (const APInt *SA2 = 1464 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1465 unsigned InnerShAmt = SA2->getZExtValue(); 1466 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1467 DemandedBits.getActiveBits() <= 1468 (InnerBits - InnerShAmt + ShAmt) && 1469 DemandedBits.countTrailingZeros() >= ShAmt) { 1470 SDValue NewSA = 1471 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1472 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1473 InnerOp.getOperand(0)); 1474 return TLO.CombineTo( 1475 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1476 } 1477 } 1478 } 1479 } 1480 1481 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1482 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1483 Depth + 1)) 1484 return true; 1485 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1486 Known.Zero <<= ShAmt; 1487 Known.One <<= ShAmt; 1488 // low bits known zero. 1489 Known.Zero.setLowBits(ShAmt); 1490 1491 // Try shrinking the operation as long as the shift amount will still be 1492 // in range. 1493 if ((ShAmt < DemandedBits.getActiveBits()) && 1494 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1495 return true; 1496 } 1497 1498 // If we are only demanding sign bits then we can use the shift source 1499 // directly. 1500 if (const APInt *MaxSA = 1501 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1502 unsigned ShAmt = MaxSA->getZExtValue(); 1503 unsigned NumSignBits = 1504 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1505 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1506 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1507 return TLO.CombineTo(Op, Op0); 1508 } 1509 break; 1510 } 1511 case ISD::SRL: { 1512 SDValue Op0 = Op.getOperand(0); 1513 SDValue Op1 = Op.getOperand(1); 1514 EVT ShiftVT = Op1.getValueType(); 1515 1516 if (const APInt *SA = 1517 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1518 unsigned ShAmt = SA->getZExtValue(); 1519 if (ShAmt == 0) 1520 return TLO.CombineTo(Op, Op0); 1521 1522 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1523 // single shift. We can do this if the top bits (which are shifted out) 1524 // are never demanded. 1525 // TODO - support non-uniform vector amounts. 1526 if (Op0.getOpcode() == ISD::SHL) { 1527 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1528 if (const APInt *SA2 = 1529 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1530 unsigned C1 = SA2->getZExtValue(); 1531 unsigned Opc = ISD::SRL; 1532 int Diff = ShAmt - C1; 1533 if (Diff < 0) { 1534 Diff = -Diff; 1535 Opc = ISD::SHL; 1536 } 1537 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1538 return TLO.CombineTo( 1539 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1540 } 1541 } 1542 } 1543 1544 APInt InDemandedMask = (DemandedBits << ShAmt); 1545 1546 // If the shift is exact, then it does demand the low bits (and knows that 1547 // they are zero). 1548 if (Op->getFlags().hasExact()) 1549 InDemandedMask.setLowBits(ShAmt); 1550 1551 // Compute the new bits that are at the top now. 1552 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1553 Depth + 1)) 1554 return true; 1555 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1556 Known.Zero.lshrInPlace(ShAmt); 1557 Known.One.lshrInPlace(ShAmt); 1558 // High bits known zero. 1559 Known.Zero.setHighBits(ShAmt); 1560 } 1561 break; 1562 } 1563 case ISD::SRA: { 1564 SDValue Op0 = Op.getOperand(0); 1565 SDValue Op1 = Op.getOperand(1); 1566 EVT ShiftVT = Op1.getValueType(); 1567 1568 // If we only want bits that already match the signbit then we don't need 1569 // to shift. 1570 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1571 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1572 NumHiDemandedBits) 1573 return TLO.CombineTo(Op, Op0); 1574 1575 // If this is an arithmetic shift right and only the low-bit is set, we can 1576 // always convert this into a logical shr, even if the shift amount is 1577 // variable. The low bit of the shift cannot be an input sign bit unless 1578 // the shift amount is >= the size of the datatype, which is undefined. 1579 if (DemandedBits.isOneValue()) 1580 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1581 1582 if (const APInt *SA = 1583 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1584 unsigned ShAmt = SA->getZExtValue(); 1585 if (ShAmt == 0) 1586 return TLO.CombineTo(Op, Op0); 1587 1588 APInt InDemandedMask = (DemandedBits << ShAmt); 1589 1590 // If the shift is exact, then it does demand the low bits (and knows that 1591 // they are zero). 1592 if (Op->getFlags().hasExact()) 1593 InDemandedMask.setLowBits(ShAmt); 1594 1595 // If any of the demanded bits are produced by the sign extension, we also 1596 // demand the input sign bit. 1597 if (DemandedBits.countLeadingZeros() < ShAmt) 1598 InDemandedMask.setSignBit(); 1599 1600 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1601 Depth + 1)) 1602 return true; 1603 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1604 Known.Zero.lshrInPlace(ShAmt); 1605 Known.One.lshrInPlace(ShAmt); 1606 1607 // If the input sign bit is known to be zero, or if none of the top bits 1608 // are demanded, turn this into an unsigned shift right. 1609 if (Known.Zero[BitWidth - ShAmt - 1] || 1610 DemandedBits.countLeadingZeros() >= ShAmt) { 1611 SDNodeFlags Flags; 1612 Flags.setExact(Op->getFlags().hasExact()); 1613 return TLO.CombineTo( 1614 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1615 } 1616 1617 int Log2 = DemandedBits.exactLogBase2(); 1618 if (Log2 >= 0) { 1619 // The bit must come from the sign. 1620 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1621 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1622 } 1623 1624 if (Known.One[BitWidth - ShAmt - 1]) 1625 // New bits are known one. 1626 Known.One.setHighBits(ShAmt); 1627 1628 // Attempt to avoid multi-use ops if we don't need anything from them. 1629 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1630 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1631 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1632 if (DemandedOp0) { 1633 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1634 return TLO.CombineTo(Op, NewOp); 1635 } 1636 } 1637 } 1638 break; 1639 } 1640 case ISD::FSHL: 1641 case ISD::FSHR: { 1642 SDValue Op0 = Op.getOperand(0); 1643 SDValue Op1 = Op.getOperand(1); 1644 SDValue Op2 = Op.getOperand(2); 1645 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1646 1647 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1648 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1649 1650 // For fshl, 0-shift returns the 1st arg. 1651 // For fshr, 0-shift returns the 2nd arg. 1652 if (Amt == 0) { 1653 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1654 Known, TLO, Depth + 1)) 1655 return true; 1656 break; 1657 } 1658 1659 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1660 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1661 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1662 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1663 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1664 Depth + 1)) 1665 return true; 1666 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1667 Depth + 1)) 1668 return true; 1669 1670 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1671 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1672 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1673 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1674 Known.One |= Known2.One; 1675 Known.Zero |= Known2.Zero; 1676 } 1677 1678 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1679 if (isPowerOf2_32(BitWidth)) { 1680 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1681 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1682 Known2, TLO, Depth + 1)) 1683 return true; 1684 } 1685 break; 1686 } 1687 case ISD::ROTL: 1688 case ISD::ROTR: { 1689 SDValue Op0 = Op.getOperand(0); 1690 SDValue Op1 = Op.getOperand(1); 1691 1692 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1693 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1694 return TLO.CombineTo(Op, Op0); 1695 1696 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1697 if (isPowerOf2_32(BitWidth)) { 1698 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1699 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1700 Depth + 1)) 1701 return true; 1702 } 1703 break; 1704 } 1705 case ISD::BITREVERSE: { 1706 SDValue Src = Op.getOperand(0); 1707 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1708 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1709 Depth + 1)) 1710 return true; 1711 Known.One = Known2.One.reverseBits(); 1712 Known.Zero = Known2.Zero.reverseBits(); 1713 break; 1714 } 1715 case ISD::BSWAP: { 1716 SDValue Src = Op.getOperand(0); 1717 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1718 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1719 Depth + 1)) 1720 return true; 1721 Known.One = Known2.One.byteSwap(); 1722 Known.Zero = Known2.Zero.byteSwap(); 1723 break; 1724 } 1725 case ISD::SIGN_EXTEND_INREG: { 1726 SDValue Op0 = Op.getOperand(0); 1727 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1728 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1729 1730 // If we only care about the highest bit, don't bother shifting right. 1731 if (DemandedBits.isSignMask()) { 1732 unsigned NumSignBits = 1733 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1734 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1; 1735 // However if the input is already sign extended we expect the sign 1736 // extension to be dropped altogether later and do not simplify. 1737 if (!AlreadySignExtended) { 1738 // Compute the correct shift amount type, which must be getShiftAmountTy 1739 // for scalar types after legalization. 1740 EVT ShiftAmtTy = VT; 1741 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1742 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1743 1744 SDValue ShiftAmt = 1745 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1746 return TLO.CombineTo(Op, 1747 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1748 } 1749 } 1750 1751 // If none of the extended bits are demanded, eliminate the sextinreg. 1752 if (DemandedBits.getActiveBits() <= ExVTBits) 1753 return TLO.CombineTo(Op, Op0); 1754 1755 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1756 1757 // Since the sign extended bits are demanded, we know that the sign 1758 // bit is demanded. 1759 InputDemandedBits.setBit(ExVTBits - 1); 1760 1761 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1762 return true; 1763 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1764 1765 // If the sign bit of the input is known set or clear, then we know the 1766 // top bits of the result. 1767 1768 // If the input sign bit is known zero, convert this into a zero extension. 1769 if (Known.Zero[ExVTBits - 1]) 1770 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 1771 1772 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1773 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1774 Known.One.setBitsFrom(ExVTBits); 1775 Known.Zero &= Mask; 1776 } else { // Input sign bit unknown 1777 Known.Zero &= Mask; 1778 Known.One &= Mask; 1779 } 1780 break; 1781 } 1782 case ISD::BUILD_PAIR: { 1783 EVT HalfVT = Op.getOperand(0).getValueType(); 1784 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1785 1786 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1787 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1788 1789 KnownBits KnownLo, KnownHi; 1790 1791 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1792 return true; 1793 1794 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1795 return true; 1796 1797 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1798 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1799 1800 Known.One = KnownLo.One.zext(BitWidth) | 1801 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1802 break; 1803 } 1804 case ISD::ZERO_EXTEND: 1805 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1806 SDValue Src = Op.getOperand(0); 1807 EVT SrcVT = Src.getValueType(); 1808 unsigned InBits = SrcVT.getScalarSizeInBits(); 1809 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1810 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1811 1812 // If none of the top bits are demanded, convert this into an any_extend. 1813 if (DemandedBits.getActiveBits() <= InBits) { 1814 // If we only need the non-extended bits of the bottom element 1815 // then we can just bitcast to the result. 1816 if (IsVecInReg && DemandedElts == 1 && 1817 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1818 TLO.DAG.getDataLayout().isLittleEndian()) 1819 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1820 1821 unsigned Opc = 1822 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1823 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1824 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1825 } 1826 1827 APInt InDemandedBits = DemandedBits.trunc(InBits); 1828 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1829 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1830 Depth + 1)) 1831 return true; 1832 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1833 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1834 Known = Known.zext(BitWidth); 1835 break; 1836 } 1837 case ISD::SIGN_EXTEND: 1838 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1839 SDValue Src = Op.getOperand(0); 1840 EVT SrcVT = Src.getValueType(); 1841 unsigned InBits = SrcVT.getScalarSizeInBits(); 1842 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1843 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1844 1845 // If none of the top bits are demanded, convert this into an any_extend. 1846 if (DemandedBits.getActiveBits() <= InBits) { 1847 // If we only need the non-extended bits of the bottom element 1848 // then we can just bitcast to the result. 1849 if (IsVecInReg && DemandedElts == 1 && 1850 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1851 TLO.DAG.getDataLayout().isLittleEndian()) 1852 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1853 1854 unsigned Opc = 1855 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1856 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1857 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1858 } 1859 1860 APInt InDemandedBits = DemandedBits.trunc(InBits); 1861 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1862 1863 // Since some of the sign extended bits are demanded, we know that the sign 1864 // bit is demanded. 1865 InDemandedBits.setBit(InBits - 1); 1866 1867 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1868 Depth + 1)) 1869 return true; 1870 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1871 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1872 1873 // If the sign bit is known one, the top bits match. 1874 Known = Known.sext(BitWidth); 1875 1876 // If the sign bit is known zero, convert this to a zero extend. 1877 if (Known.isNonNegative()) { 1878 unsigned Opc = 1879 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 1880 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1881 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1882 } 1883 break; 1884 } 1885 case ISD::ANY_EXTEND: 1886 case ISD::ANY_EXTEND_VECTOR_INREG: { 1887 SDValue Src = Op.getOperand(0); 1888 EVT SrcVT = Src.getValueType(); 1889 unsigned InBits = SrcVT.getScalarSizeInBits(); 1890 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1891 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 1892 1893 // If we only need the bottom element then we can just bitcast. 1894 // TODO: Handle ANY_EXTEND? 1895 if (IsVecInReg && DemandedElts == 1 && 1896 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1897 TLO.DAG.getDataLayout().isLittleEndian()) 1898 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1899 1900 APInt InDemandedBits = DemandedBits.trunc(InBits); 1901 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1902 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1903 Depth + 1)) 1904 return true; 1905 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1906 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1907 Known = Known.anyext(BitWidth); 1908 1909 // Attempt to avoid multi-use ops if we don't need anything from them. 1910 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1911 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1912 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1913 break; 1914 } 1915 case ISD::TRUNCATE: { 1916 SDValue Src = Op.getOperand(0); 1917 1918 // Simplify the input, using demanded bit information, and compute the known 1919 // zero/one bits live out. 1920 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 1921 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 1922 if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1)) 1923 return true; 1924 Known = Known.trunc(BitWidth); 1925 1926 // Attempt to avoid multi-use ops if we don't need anything from them. 1927 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1928 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 1929 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 1930 1931 // If the input is only used by this truncate, see if we can shrink it based 1932 // on the known demanded bits. 1933 if (Src.getNode()->hasOneUse()) { 1934 switch (Src.getOpcode()) { 1935 default: 1936 break; 1937 case ISD::SRL: 1938 // Shrink SRL by a constant if none of the high bits shifted in are 1939 // demanded. 1940 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 1941 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1942 // undesirable. 1943 break; 1944 1945 SDValue ShAmt = Src.getOperand(1); 1946 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt); 1947 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 1948 break; 1949 uint64_t ShVal = ShAmtC->getZExtValue(); 1950 1951 APInt HighBits = 1952 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 1953 HighBits.lshrInPlace(ShVal); 1954 HighBits = HighBits.trunc(BitWidth); 1955 1956 if (!(HighBits & DemandedBits)) { 1957 // None of the shifted in bits are needed. Add a truncate of the 1958 // shift input, then shift it. 1959 if (TLO.LegalTypes()) 1960 ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL)); 1961 SDValue NewTrunc = 1962 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 1963 return TLO.CombineTo( 1964 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt)); 1965 } 1966 break; 1967 } 1968 } 1969 1970 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1971 break; 1972 } 1973 case ISD::AssertZext: { 1974 // AssertZext demands all of the high bits, plus any of the low bits 1975 // demanded by its users. 1976 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1977 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 1978 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 1979 TLO, Depth + 1)) 1980 return true; 1981 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1982 1983 Known.Zero |= ~InMask; 1984 break; 1985 } 1986 case ISD::EXTRACT_VECTOR_ELT: { 1987 SDValue Src = Op.getOperand(0); 1988 SDValue Idx = Op.getOperand(1); 1989 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1990 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 1991 1992 // Demand the bits from every vector element without a constant index. 1993 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 1994 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 1995 if (CIdx->getAPIntValue().ult(NumSrcElts)) 1996 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 1997 1998 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 1999 // anything about the extended bits. 2000 APInt DemandedSrcBits = DemandedBits; 2001 if (BitWidth > EltBitWidth) 2002 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2003 2004 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2005 Depth + 1)) 2006 return true; 2007 2008 // Attempt to avoid multi-use ops if we don't need anything from them. 2009 if (!DemandedSrcBits.isAllOnesValue() || 2010 !DemandedSrcElts.isAllOnesValue()) { 2011 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2012 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2013 SDValue NewOp = 2014 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2015 return TLO.CombineTo(Op, NewOp); 2016 } 2017 } 2018 2019 Known = Known2; 2020 if (BitWidth > EltBitWidth) 2021 Known = Known.anyext(BitWidth); 2022 break; 2023 } 2024 case ISD::BITCAST: { 2025 SDValue Src = Op.getOperand(0); 2026 EVT SrcVT = Src.getValueType(); 2027 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2028 2029 // If this is an FP->Int bitcast and if the sign bit is the only 2030 // thing demanded, turn this into a FGETSIGN. 2031 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2032 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2033 SrcVT.isFloatingPoint()) { 2034 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2035 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2036 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2037 SrcVT != MVT::f128) { 2038 // Cannot eliminate/lower SHL for f128 yet. 2039 EVT Ty = OpVTLegal ? VT : MVT::i32; 2040 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2041 // place. We expect the SHL to be eliminated by other optimizations. 2042 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2043 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2044 if (!OpVTLegal && OpVTSizeInBits > 32) 2045 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2046 unsigned ShVal = Op.getValueSizeInBits() - 1; 2047 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2048 return TLO.CombineTo(Op, 2049 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2050 } 2051 } 2052 2053 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2054 // Demand the elt/bit if any of the original elts/bits are demanded. 2055 // TODO - bigendian once we have test coverage. 2056 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 && 2057 TLO.DAG.getDataLayout().isLittleEndian()) { 2058 unsigned Scale = BitWidth / NumSrcEltBits; 2059 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2060 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2061 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2062 for (unsigned i = 0; i != Scale; ++i) { 2063 unsigned Offset = i * NumSrcEltBits; 2064 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 2065 if (!Sub.isNullValue()) { 2066 DemandedSrcBits |= Sub; 2067 for (unsigned j = 0; j != NumElts; ++j) 2068 if (DemandedElts[j]) 2069 DemandedSrcElts.setBit((j * Scale) + i); 2070 } 2071 } 2072 2073 APInt KnownSrcUndef, KnownSrcZero; 2074 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2075 KnownSrcZero, TLO, Depth + 1)) 2076 return true; 2077 2078 KnownBits KnownSrcBits; 2079 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2080 KnownSrcBits, TLO, Depth + 1)) 2081 return true; 2082 } else if ((NumSrcEltBits % BitWidth) == 0 && 2083 TLO.DAG.getDataLayout().isLittleEndian()) { 2084 unsigned Scale = NumSrcEltBits / BitWidth; 2085 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2086 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2087 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2088 for (unsigned i = 0; i != NumElts; ++i) 2089 if (DemandedElts[i]) { 2090 unsigned Offset = (i % Scale) * BitWidth; 2091 DemandedSrcBits.insertBits(DemandedBits, Offset); 2092 DemandedSrcElts.setBit(i / Scale); 2093 } 2094 2095 if (SrcVT.isVector()) { 2096 APInt KnownSrcUndef, KnownSrcZero; 2097 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2098 KnownSrcZero, TLO, Depth + 1)) 2099 return true; 2100 } 2101 2102 KnownBits KnownSrcBits; 2103 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2104 KnownSrcBits, TLO, Depth + 1)) 2105 return true; 2106 } 2107 2108 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2109 // recursive call where Known may be useful to the caller. 2110 if (Depth > 0) { 2111 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2112 return false; 2113 } 2114 break; 2115 } 2116 case ISD::ADD: 2117 case ISD::MUL: 2118 case ISD::SUB: { 2119 // Add, Sub, and Mul don't demand any bits in positions beyond that 2120 // of the highest bit demanded of them. 2121 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2122 SDNodeFlags Flags = Op.getNode()->getFlags(); 2123 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2124 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2125 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2126 Depth + 1) || 2127 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2128 Depth + 1) || 2129 // See if the operation should be performed at a smaller bit width. 2130 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2131 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2132 // Disable the nsw and nuw flags. We can no longer guarantee that we 2133 // won't wrap after simplification. 2134 Flags.setNoSignedWrap(false); 2135 Flags.setNoUnsignedWrap(false); 2136 SDValue NewOp = 2137 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2138 return TLO.CombineTo(Op, NewOp); 2139 } 2140 return true; 2141 } 2142 2143 // Attempt to avoid multi-use ops if we don't need anything from them. 2144 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 2145 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2146 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2147 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2148 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2149 if (DemandedOp0 || DemandedOp1) { 2150 Flags.setNoSignedWrap(false); 2151 Flags.setNoUnsignedWrap(false); 2152 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2153 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2154 SDValue NewOp = 2155 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2156 return TLO.CombineTo(Op, NewOp); 2157 } 2158 } 2159 2160 // If we have a constant operand, we may be able to turn it into -1 if we 2161 // do not demand the high bits. This can make the constant smaller to 2162 // encode, allow more general folding, or match specialized instruction 2163 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2164 // is probably not useful (and could be detrimental). 2165 ConstantSDNode *C = isConstOrConstSplat(Op1); 2166 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2167 if (C && !C->isAllOnesValue() && !C->isOne() && 2168 (C->getAPIntValue() | HighMask).isAllOnesValue()) { 2169 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2170 // Disable the nsw and nuw flags. We can no longer guarantee that we 2171 // won't wrap after simplification. 2172 Flags.setNoSignedWrap(false); 2173 Flags.setNoUnsignedWrap(false); 2174 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2175 return TLO.CombineTo(Op, NewOp); 2176 } 2177 2178 LLVM_FALLTHROUGH; 2179 } 2180 default: 2181 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2182 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2183 Known, TLO, Depth)) 2184 return true; 2185 break; 2186 } 2187 2188 // Just use computeKnownBits to compute output bits. 2189 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2190 break; 2191 } 2192 2193 // If we know the value of all of the demanded bits, return this as a 2194 // constant. 2195 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2196 // Avoid folding to a constant if any OpaqueConstant is involved. 2197 const SDNode *N = Op.getNode(); 2198 for (SDNodeIterator I = SDNodeIterator::begin(N), 2199 E = SDNodeIterator::end(N); 2200 I != E; ++I) { 2201 SDNode *Op = *I; 2202 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2203 if (C->isOpaque()) 2204 return false; 2205 } 2206 // TODO: Handle float bits as well. 2207 if (VT.isInteger()) 2208 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2209 } 2210 2211 return false; 2212 } 2213 2214 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2215 const APInt &DemandedElts, 2216 APInt &KnownUndef, 2217 APInt &KnownZero, 2218 DAGCombinerInfo &DCI) const { 2219 SelectionDAG &DAG = DCI.DAG; 2220 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2221 !DCI.isBeforeLegalizeOps()); 2222 2223 bool Simplified = 2224 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2225 if (Simplified) { 2226 DCI.AddToWorklist(Op.getNode()); 2227 DCI.CommitTargetLoweringOpt(TLO); 2228 } 2229 2230 return Simplified; 2231 } 2232 2233 /// Given a vector binary operation and known undefined elements for each input 2234 /// operand, compute whether each element of the output is undefined. 2235 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2236 const APInt &UndefOp0, 2237 const APInt &UndefOp1) { 2238 EVT VT = BO.getValueType(); 2239 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2240 "Vector binop only"); 2241 2242 EVT EltVT = VT.getVectorElementType(); 2243 unsigned NumElts = VT.getVectorNumElements(); 2244 assert(UndefOp0.getBitWidth() == NumElts && 2245 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2246 2247 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2248 const APInt &UndefVals) { 2249 if (UndefVals[Index]) 2250 return DAG.getUNDEF(EltVT); 2251 2252 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2253 // Try hard to make sure that the getNode() call is not creating temporary 2254 // nodes. Ignore opaque integers because they do not constant fold. 2255 SDValue Elt = BV->getOperand(Index); 2256 auto *C = dyn_cast<ConstantSDNode>(Elt); 2257 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2258 return Elt; 2259 } 2260 2261 return SDValue(); 2262 }; 2263 2264 APInt KnownUndef = APInt::getNullValue(NumElts); 2265 for (unsigned i = 0; i != NumElts; ++i) { 2266 // If both inputs for this element are either constant or undef and match 2267 // the element type, compute the constant/undef result for this element of 2268 // the vector. 2269 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2270 // not handle FP constants. The code within getNode() should be refactored 2271 // to avoid the danger of creating a bogus temporary node here. 2272 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2273 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2274 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2275 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2276 KnownUndef.setBit(i); 2277 } 2278 return KnownUndef; 2279 } 2280 2281 bool TargetLowering::SimplifyDemandedVectorElts( 2282 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2283 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2284 bool AssumeSingleUse) const { 2285 EVT VT = Op.getValueType(); 2286 unsigned Opcode = Op.getOpcode(); 2287 APInt DemandedElts = OriginalDemandedElts; 2288 unsigned NumElts = DemandedElts.getBitWidth(); 2289 assert(VT.isVector() && "Expected vector op"); 2290 2291 KnownUndef = KnownZero = APInt::getNullValue(NumElts); 2292 2293 // TODO: For now we assume we know nothing about scalable vectors. 2294 if (VT.isScalableVector()) 2295 return false; 2296 2297 assert(VT.getVectorNumElements() == NumElts && 2298 "Mask size mismatches value type element count!"); 2299 2300 // Undef operand. 2301 if (Op.isUndef()) { 2302 KnownUndef.setAllBits(); 2303 return false; 2304 } 2305 2306 // If Op has other users, assume that all elements are needed. 2307 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2308 DemandedElts.setAllBits(); 2309 2310 // Not demanding any elements from Op. 2311 if (DemandedElts == 0) { 2312 KnownUndef.setAllBits(); 2313 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2314 } 2315 2316 // Limit search depth. 2317 if (Depth >= SelectionDAG::MaxRecursionDepth) 2318 return false; 2319 2320 SDLoc DL(Op); 2321 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2322 2323 // Helper for demanding the specified elements and all the bits of both binary 2324 // operands. 2325 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2326 unsigned NumBits0 = Op0.getScalarValueSizeInBits(); 2327 unsigned NumBits1 = Op1.getScalarValueSizeInBits(); 2328 APInt DemandedBits0 = APInt::getAllOnesValue(NumBits0); 2329 APInt DemandedBits1 = APInt::getAllOnesValue(NumBits1); 2330 SDValue NewOp0 = SimplifyMultipleUseDemandedBits( 2331 Op0, DemandedBits0, DemandedElts, TLO.DAG, Depth + 1); 2332 SDValue NewOp1 = SimplifyMultipleUseDemandedBits( 2333 Op1, DemandedBits1, DemandedElts, TLO.DAG, Depth + 1); 2334 if (NewOp0 || NewOp1) { 2335 SDValue NewOp = TLO.DAG.getNode( 2336 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2337 return TLO.CombineTo(Op, NewOp); 2338 } 2339 return false; 2340 }; 2341 2342 switch (Opcode) { 2343 case ISD::SCALAR_TO_VECTOR: { 2344 if (!DemandedElts[0]) { 2345 KnownUndef.setAllBits(); 2346 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2347 } 2348 KnownUndef.setHighBits(NumElts - 1); 2349 break; 2350 } 2351 case ISD::BITCAST: { 2352 SDValue Src = Op.getOperand(0); 2353 EVT SrcVT = Src.getValueType(); 2354 2355 // We only handle vectors here. 2356 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2357 if (!SrcVT.isVector()) 2358 break; 2359 2360 // Fast handling of 'identity' bitcasts. 2361 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2362 if (NumSrcElts == NumElts) 2363 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2364 KnownZero, TLO, Depth + 1); 2365 2366 APInt SrcZero, SrcUndef; 2367 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts); 2368 2369 // Bitcast from 'large element' src vector to 'small element' vector, we 2370 // must demand a source element if any DemandedElt maps to it. 2371 if ((NumElts % NumSrcElts) == 0) { 2372 unsigned Scale = NumElts / NumSrcElts; 2373 for (unsigned i = 0; i != NumElts; ++i) 2374 if (DemandedElts[i]) 2375 SrcDemandedElts.setBit(i / Scale); 2376 2377 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2378 TLO, Depth + 1)) 2379 return true; 2380 2381 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2382 // of the large element. 2383 // TODO - bigendian once we have test coverage. 2384 if (TLO.DAG.getDataLayout().isLittleEndian()) { 2385 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2386 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits); 2387 for (unsigned i = 0; i != NumElts; ++i) 2388 if (DemandedElts[i]) { 2389 unsigned Ofs = (i % Scale) * EltSizeInBits; 2390 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2391 } 2392 2393 KnownBits Known; 2394 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2395 TLO, Depth + 1)) 2396 return true; 2397 } 2398 2399 // If the src element is zero/undef then all the output elements will be - 2400 // only demanded elements are guaranteed to be correct. 2401 for (unsigned i = 0; i != NumSrcElts; ++i) { 2402 if (SrcDemandedElts[i]) { 2403 if (SrcZero[i]) 2404 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2405 if (SrcUndef[i]) 2406 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2407 } 2408 } 2409 } 2410 2411 // Bitcast from 'small element' src vector to 'large element' vector, we 2412 // demand all smaller source elements covered by the larger demanded element 2413 // of this vector. 2414 if ((NumSrcElts % NumElts) == 0) { 2415 unsigned Scale = NumSrcElts / NumElts; 2416 for (unsigned i = 0; i != NumElts; ++i) 2417 if (DemandedElts[i]) 2418 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale); 2419 2420 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2421 TLO, Depth + 1)) 2422 return true; 2423 2424 // If all the src elements covering an output element are zero/undef, then 2425 // the output element will be as well, assuming it was demanded. 2426 for (unsigned i = 0; i != NumElts; ++i) { 2427 if (DemandedElts[i]) { 2428 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue()) 2429 KnownZero.setBit(i); 2430 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue()) 2431 KnownUndef.setBit(i); 2432 } 2433 } 2434 } 2435 break; 2436 } 2437 case ISD::BUILD_VECTOR: { 2438 // Check all elements and simplify any unused elements with UNDEF. 2439 if (!DemandedElts.isAllOnesValue()) { 2440 // Don't simplify BROADCASTS. 2441 if (llvm::any_of(Op->op_values(), 2442 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2443 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2444 bool Updated = false; 2445 for (unsigned i = 0; i != NumElts; ++i) { 2446 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2447 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2448 KnownUndef.setBit(i); 2449 Updated = true; 2450 } 2451 } 2452 if (Updated) 2453 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2454 } 2455 } 2456 for (unsigned i = 0; i != NumElts; ++i) { 2457 SDValue SrcOp = Op.getOperand(i); 2458 if (SrcOp.isUndef()) { 2459 KnownUndef.setBit(i); 2460 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2461 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2462 KnownZero.setBit(i); 2463 } 2464 } 2465 break; 2466 } 2467 case ISD::CONCAT_VECTORS: { 2468 EVT SubVT = Op.getOperand(0).getValueType(); 2469 unsigned NumSubVecs = Op.getNumOperands(); 2470 unsigned NumSubElts = SubVT.getVectorNumElements(); 2471 for (unsigned i = 0; i != NumSubVecs; ++i) { 2472 SDValue SubOp = Op.getOperand(i); 2473 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2474 APInt SubUndef, SubZero; 2475 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2476 Depth + 1)) 2477 return true; 2478 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2479 KnownZero.insertBits(SubZero, i * NumSubElts); 2480 } 2481 break; 2482 } 2483 case ISD::INSERT_SUBVECTOR: { 2484 // Demand any elements from the subvector and the remainder from the src its 2485 // inserted into. 2486 SDValue Src = Op.getOperand(0); 2487 SDValue Sub = Op.getOperand(1); 2488 uint64_t Idx = Op.getConstantOperandVal(2); 2489 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2490 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2491 APInt DemandedSrcElts = DemandedElts; 2492 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2493 2494 APInt SubUndef, SubZero; 2495 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2496 Depth + 1)) 2497 return true; 2498 2499 // If none of the src operand elements are demanded, replace it with undef. 2500 if (!DemandedSrcElts && !Src.isUndef()) 2501 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2502 TLO.DAG.getUNDEF(VT), Sub, 2503 Op.getOperand(2))); 2504 2505 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2506 TLO, Depth + 1)) 2507 return true; 2508 KnownUndef.insertBits(SubUndef, Idx); 2509 KnownZero.insertBits(SubZero, Idx); 2510 2511 // Attempt to avoid multi-use ops if we don't need anything from them. 2512 if (!DemandedSrcElts.isAllOnesValue() || 2513 !DemandedSubElts.isAllOnesValue()) { 2514 APInt DemandedBits = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2515 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2516 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 2517 SDValue NewSub = SimplifyMultipleUseDemandedBits( 2518 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 2519 if (NewSrc || NewSub) { 2520 NewSrc = NewSrc ? NewSrc : Src; 2521 NewSub = NewSub ? NewSub : Sub; 2522 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2523 NewSub, Op.getOperand(2)); 2524 return TLO.CombineTo(Op, NewOp); 2525 } 2526 } 2527 break; 2528 } 2529 case ISD::EXTRACT_SUBVECTOR: { 2530 // Offset the demanded elts by the subvector index. 2531 SDValue Src = Op.getOperand(0); 2532 uint64_t Idx = Op.getConstantOperandVal(1); 2533 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2534 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2535 2536 APInt SrcUndef, SrcZero; 2537 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2538 Depth + 1)) 2539 return true; 2540 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2541 KnownZero = SrcZero.extractBits(NumElts, Idx); 2542 2543 // Attempt to avoid multi-use ops if we don't need anything from them. 2544 if (!DemandedElts.isAllOnesValue()) { 2545 APInt DemandedBits = APInt::getAllOnesValue(VT.getScalarSizeInBits()); 2546 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2547 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 2548 if (NewSrc) { 2549 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2550 Op.getOperand(1)); 2551 return TLO.CombineTo(Op, NewOp); 2552 } 2553 } 2554 break; 2555 } 2556 case ISD::INSERT_VECTOR_ELT: { 2557 SDValue Vec = Op.getOperand(0); 2558 SDValue Scl = Op.getOperand(1); 2559 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2560 2561 // For a legal, constant insertion index, if we don't need this insertion 2562 // then strip it, else remove it from the demanded elts. 2563 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2564 unsigned Idx = CIdx->getZExtValue(); 2565 if (!DemandedElts[Idx]) 2566 return TLO.CombineTo(Op, Vec); 2567 2568 APInt DemandedVecElts(DemandedElts); 2569 DemandedVecElts.clearBit(Idx); 2570 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2571 KnownZero, TLO, Depth + 1)) 2572 return true; 2573 2574 KnownUndef.clearBit(Idx); 2575 if (Scl.isUndef()) 2576 KnownUndef.setBit(Idx); 2577 2578 KnownZero.clearBit(Idx); 2579 if (isNullConstant(Scl) || isNullFPConstant(Scl)) 2580 KnownZero.setBit(Idx); 2581 break; 2582 } 2583 2584 APInt VecUndef, VecZero; 2585 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2586 Depth + 1)) 2587 return true; 2588 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2589 break; 2590 } 2591 case ISD::VSELECT: { 2592 // Try to transform the select condition based on the current demanded 2593 // elements. 2594 // TODO: If a condition element is undef, we can choose from one arm of the 2595 // select (and if one arm is undef, then we can propagate that to the 2596 // result). 2597 // TODO - add support for constant vselect masks (see IR version of this). 2598 APInt UnusedUndef, UnusedZero; 2599 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2600 UnusedZero, TLO, Depth + 1)) 2601 return true; 2602 2603 // See if we can simplify either vselect operand. 2604 APInt DemandedLHS(DemandedElts); 2605 APInt DemandedRHS(DemandedElts); 2606 APInt UndefLHS, ZeroLHS; 2607 APInt UndefRHS, ZeroRHS; 2608 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2609 ZeroLHS, TLO, Depth + 1)) 2610 return true; 2611 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2612 ZeroRHS, TLO, Depth + 1)) 2613 return true; 2614 2615 KnownUndef = UndefLHS & UndefRHS; 2616 KnownZero = ZeroLHS & ZeroRHS; 2617 break; 2618 } 2619 case ISD::VECTOR_SHUFFLE: { 2620 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2621 2622 // Collect demanded elements from shuffle operands.. 2623 APInt DemandedLHS(NumElts, 0); 2624 APInt DemandedRHS(NumElts, 0); 2625 for (unsigned i = 0; i != NumElts; ++i) { 2626 int M = ShuffleMask[i]; 2627 if (M < 0 || !DemandedElts[i]) 2628 continue; 2629 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2630 if (M < (int)NumElts) 2631 DemandedLHS.setBit(M); 2632 else 2633 DemandedRHS.setBit(M - NumElts); 2634 } 2635 2636 // See if we can simplify either shuffle operand. 2637 APInt UndefLHS, ZeroLHS; 2638 APInt UndefRHS, ZeroRHS; 2639 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2640 ZeroLHS, TLO, Depth + 1)) 2641 return true; 2642 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2643 ZeroRHS, TLO, Depth + 1)) 2644 return true; 2645 2646 // Simplify mask using undef elements from LHS/RHS. 2647 bool Updated = false; 2648 bool IdentityLHS = true, IdentityRHS = true; 2649 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2650 for (unsigned i = 0; i != NumElts; ++i) { 2651 int &M = NewMask[i]; 2652 if (M < 0) 2653 continue; 2654 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2655 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2656 Updated = true; 2657 M = -1; 2658 } 2659 IdentityLHS &= (M < 0) || (M == (int)i); 2660 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2661 } 2662 2663 // Update legal shuffle masks based on demanded elements if it won't reduce 2664 // to Identity which can cause premature removal of the shuffle mask. 2665 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2666 SDValue LegalShuffle = 2667 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2668 NewMask, TLO.DAG); 2669 if (LegalShuffle) 2670 return TLO.CombineTo(Op, LegalShuffle); 2671 } 2672 2673 // Propagate undef/zero elements from LHS/RHS. 2674 for (unsigned i = 0; i != NumElts; ++i) { 2675 int M = ShuffleMask[i]; 2676 if (M < 0) { 2677 KnownUndef.setBit(i); 2678 } else if (M < (int)NumElts) { 2679 if (UndefLHS[M]) 2680 KnownUndef.setBit(i); 2681 if (ZeroLHS[M]) 2682 KnownZero.setBit(i); 2683 } else { 2684 if (UndefRHS[M - NumElts]) 2685 KnownUndef.setBit(i); 2686 if (ZeroRHS[M - NumElts]) 2687 KnownZero.setBit(i); 2688 } 2689 } 2690 break; 2691 } 2692 case ISD::ANY_EXTEND_VECTOR_INREG: 2693 case ISD::SIGN_EXTEND_VECTOR_INREG: 2694 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2695 APInt SrcUndef, SrcZero; 2696 SDValue Src = Op.getOperand(0); 2697 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2698 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2699 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2700 Depth + 1)) 2701 return true; 2702 KnownZero = SrcZero.zextOrTrunc(NumElts); 2703 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2704 2705 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2706 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2707 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) { 2708 // aext - if we just need the bottom element then we can bitcast. 2709 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2710 } 2711 2712 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2713 // zext(undef) upper bits are guaranteed to be zero. 2714 if (DemandedElts.isSubsetOf(KnownUndef)) 2715 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2716 KnownUndef.clearAllBits(); 2717 } 2718 break; 2719 } 2720 2721 // TODO: There are more binop opcodes that could be handled here - MIN, 2722 // MAX, saturated math, etc. 2723 case ISD::OR: 2724 case ISD::XOR: 2725 case ISD::ADD: 2726 case ISD::SUB: 2727 case ISD::FADD: 2728 case ISD::FSUB: 2729 case ISD::FMUL: 2730 case ISD::FDIV: 2731 case ISD::FREM: { 2732 SDValue Op0 = Op.getOperand(0); 2733 SDValue Op1 = Op.getOperand(1); 2734 2735 APInt UndefRHS, ZeroRHS; 2736 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2737 Depth + 1)) 2738 return true; 2739 APInt UndefLHS, ZeroLHS; 2740 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2741 Depth + 1)) 2742 return true; 2743 2744 KnownZero = ZeroLHS & ZeroRHS; 2745 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2746 2747 // Attempt to avoid multi-use ops if we don't need anything from them. 2748 // TODO - use KnownUndef to relax the demandedelts? 2749 if (!DemandedElts.isAllOnesValue()) 2750 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2751 return true; 2752 break; 2753 } 2754 case ISD::SHL: 2755 case ISD::SRL: 2756 case ISD::SRA: 2757 case ISD::ROTL: 2758 case ISD::ROTR: { 2759 SDValue Op0 = Op.getOperand(0); 2760 SDValue Op1 = Op.getOperand(1); 2761 2762 APInt UndefRHS, ZeroRHS; 2763 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2764 Depth + 1)) 2765 return true; 2766 APInt UndefLHS, ZeroLHS; 2767 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2768 Depth + 1)) 2769 return true; 2770 2771 KnownZero = ZeroLHS; 2772 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2773 2774 // Attempt to avoid multi-use ops if we don't need anything from them. 2775 // TODO - use KnownUndef to relax the demandedelts? 2776 if (!DemandedElts.isAllOnesValue()) 2777 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2778 return true; 2779 break; 2780 } 2781 case ISD::MUL: 2782 case ISD::AND: { 2783 SDValue Op0 = Op.getOperand(0); 2784 SDValue Op1 = Op.getOperand(1); 2785 2786 APInt SrcUndef, SrcZero; 2787 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 2788 Depth + 1)) 2789 return true; 2790 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 2791 TLO, Depth + 1)) 2792 return true; 2793 2794 // If either side has a zero element, then the result element is zero, even 2795 // if the other is an UNDEF. 2796 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2797 // and then handle 'and' nodes with the rest of the binop opcodes. 2798 KnownZero |= SrcZero; 2799 KnownUndef &= SrcUndef; 2800 KnownUndef &= ~KnownZero; 2801 2802 // Attempt to avoid multi-use ops if we don't need anything from them. 2803 // TODO - use KnownUndef to relax the demandedelts? 2804 if (!DemandedElts.isAllOnesValue()) 2805 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2806 return true; 2807 break; 2808 } 2809 case ISD::TRUNCATE: 2810 case ISD::SIGN_EXTEND: 2811 case ISD::ZERO_EXTEND: 2812 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2813 KnownZero, TLO, Depth + 1)) 2814 return true; 2815 2816 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2817 // zext(undef) upper bits are guaranteed to be zero. 2818 if (DemandedElts.isSubsetOf(KnownUndef)) 2819 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2820 KnownUndef.clearAllBits(); 2821 } 2822 break; 2823 default: { 2824 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2825 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 2826 KnownZero, TLO, Depth)) 2827 return true; 2828 } else { 2829 KnownBits Known; 2830 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits); 2831 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 2832 TLO, Depth, AssumeSingleUse)) 2833 return true; 2834 } 2835 break; 2836 } 2837 } 2838 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 2839 2840 // Constant fold all undef cases. 2841 // TODO: Handle zero cases as well. 2842 if (DemandedElts.isSubsetOf(KnownUndef)) 2843 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2844 2845 return false; 2846 } 2847 2848 /// Determine which of the bits specified in Mask are known to be either zero or 2849 /// one and return them in the Known. 2850 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 2851 KnownBits &Known, 2852 const APInt &DemandedElts, 2853 const SelectionDAG &DAG, 2854 unsigned Depth) const { 2855 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2856 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2857 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2858 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2859 "Should use MaskedValueIsZero if you don't know whether Op" 2860 " is a target node!"); 2861 Known.resetAll(); 2862 } 2863 2864 void TargetLowering::computeKnownBitsForTargetInstr( 2865 GISelKnownBits &Analysis, Register R, KnownBits &Known, 2866 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 2867 unsigned Depth) const { 2868 Known.resetAll(); 2869 } 2870 2871 void TargetLowering::computeKnownBitsForFrameIndex( 2872 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 2873 // The low bits are known zero if the pointer is aligned. 2874 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 2875 } 2876 2877 Align TargetLowering::computeKnownAlignForTargetInstr( 2878 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 2879 unsigned Depth) const { 2880 return Align(1); 2881 } 2882 2883 /// This method can be implemented by targets that want to expose additional 2884 /// information about sign bits to the DAG Combiner. 2885 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 2886 const APInt &, 2887 const SelectionDAG &, 2888 unsigned Depth) const { 2889 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2890 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2891 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2892 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2893 "Should use ComputeNumSignBits if you don't know whether Op" 2894 " is a target node!"); 2895 return 1; 2896 } 2897 2898 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 2899 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 2900 const MachineRegisterInfo &MRI, unsigned Depth) const { 2901 return 1; 2902 } 2903 2904 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 2905 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 2906 TargetLoweringOpt &TLO, unsigned Depth) const { 2907 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2908 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2909 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2910 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2911 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 2912 " is a target node!"); 2913 return false; 2914 } 2915 2916 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 2917 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2918 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 2919 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2920 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2921 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2922 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2923 "Should use SimplifyDemandedBits if you don't know whether Op" 2924 " is a target node!"); 2925 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 2926 return false; 2927 } 2928 2929 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 2930 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2931 SelectionDAG &DAG, unsigned Depth) const { 2932 assert( 2933 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2934 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2935 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2936 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2937 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 2938 " is a target node!"); 2939 return SDValue(); 2940 } 2941 2942 SDValue 2943 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 2944 SDValue N1, MutableArrayRef<int> Mask, 2945 SelectionDAG &DAG) const { 2946 bool LegalMask = isShuffleMaskLegal(Mask, VT); 2947 if (!LegalMask) { 2948 std::swap(N0, N1); 2949 ShuffleVectorSDNode::commuteMask(Mask); 2950 LegalMask = isShuffleMaskLegal(Mask, VT); 2951 } 2952 2953 if (!LegalMask) 2954 return SDValue(); 2955 2956 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 2957 } 2958 2959 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 2960 return nullptr; 2961 } 2962 2963 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 2964 const SelectionDAG &DAG, 2965 bool SNaN, 2966 unsigned Depth) const { 2967 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2968 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2969 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2970 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2971 "Should use isKnownNeverNaN if you don't know whether Op" 2972 " is a target node!"); 2973 return false; 2974 } 2975 2976 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 2977 // work with truncating build vectors and vectors with elements of less than 2978 // 8 bits. 2979 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 2980 if (!N) 2981 return false; 2982 2983 APInt CVal; 2984 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 2985 CVal = CN->getAPIntValue(); 2986 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 2987 auto *CN = BV->getConstantSplatNode(); 2988 if (!CN) 2989 return false; 2990 2991 // If this is a truncating build vector, truncate the splat value. 2992 // Otherwise, we may fail to match the expected values below. 2993 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 2994 CVal = CN->getAPIntValue(); 2995 if (BVEltWidth < CVal.getBitWidth()) 2996 CVal = CVal.trunc(BVEltWidth); 2997 } else { 2998 return false; 2999 } 3000 3001 switch (getBooleanContents(N->getValueType(0))) { 3002 case UndefinedBooleanContent: 3003 return CVal[0]; 3004 case ZeroOrOneBooleanContent: 3005 return CVal.isOneValue(); 3006 case ZeroOrNegativeOneBooleanContent: 3007 return CVal.isAllOnesValue(); 3008 } 3009 3010 llvm_unreachable("Invalid boolean contents"); 3011 } 3012 3013 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 3014 if (!N) 3015 return false; 3016 3017 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3018 if (!CN) { 3019 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3020 if (!BV) 3021 return false; 3022 3023 // Only interested in constant splats, we don't care about undef 3024 // elements in identifying boolean constants and getConstantSplatNode 3025 // returns NULL if all ops are undef; 3026 CN = BV->getConstantSplatNode(); 3027 if (!CN) 3028 return false; 3029 } 3030 3031 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3032 return !CN->getAPIntValue()[0]; 3033 3034 return CN->isNullValue(); 3035 } 3036 3037 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3038 bool SExt) const { 3039 if (VT == MVT::i1) 3040 return N->isOne(); 3041 3042 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3043 switch (Cnt) { 3044 case TargetLowering::ZeroOrOneBooleanContent: 3045 // An extended value of 1 is always true, unless its original type is i1, 3046 // in which case it will be sign extended to -1. 3047 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3048 case TargetLowering::UndefinedBooleanContent: 3049 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3050 return N->isAllOnesValue() && SExt; 3051 } 3052 llvm_unreachable("Unexpected enumeration."); 3053 } 3054 3055 /// This helper function of SimplifySetCC tries to optimize the comparison when 3056 /// either operand of the SetCC node is a bitwise-and instruction. 3057 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3058 ISD::CondCode Cond, const SDLoc &DL, 3059 DAGCombinerInfo &DCI) const { 3060 // Match these patterns in any of their permutations: 3061 // (X & Y) == Y 3062 // (X & Y) != Y 3063 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3064 std::swap(N0, N1); 3065 3066 EVT OpVT = N0.getValueType(); 3067 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3068 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3069 return SDValue(); 3070 3071 SDValue X, Y; 3072 if (N0.getOperand(0) == N1) { 3073 X = N0.getOperand(1); 3074 Y = N0.getOperand(0); 3075 } else if (N0.getOperand(1) == N1) { 3076 X = N0.getOperand(0); 3077 Y = N0.getOperand(1); 3078 } else { 3079 return SDValue(); 3080 } 3081 3082 SelectionDAG &DAG = DCI.DAG; 3083 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3084 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3085 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3086 // Note that where Y is variable and is known to have at most one bit set 3087 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3088 // equivalent when Y == 0. 3089 assert(OpVT.isInteger()); 3090 Cond = ISD::getSetCCInverse(Cond, OpVT); 3091 if (DCI.isBeforeLegalizeOps() || 3092 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3093 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3094 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3095 // If the target supports an 'and-not' or 'and-complement' logic operation, 3096 // try to use that to make a comparison operation more efficient. 3097 // But don't do this transform if the mask is a single bit because there are 3098 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3099 // 'rlwinm' on PPC). 3100 3101 // Bail out if the compare operand that we want to turn into a zero is 3102 // already a zero (otherwise, infinite loop). 3103 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3104 if (YConst && YConst->isNullValue()) 3105 return SDValue(); 3106 3107 // Transform this into: ~X & Y == 0. 3108 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3109 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3110 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3111 } 3112 3113 return SDValue(); 3114 } 3115 3116 /// There are multiple IR patterns that could be checking whether certain 3117 /// truncation of a signed number would be lossy or not. The pattern which is 3118 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3119 /// We are looking for the following pattern: (KeptBits is a constant) 3120 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3121 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3122 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3123 /// We will unfold it into the natural trunc+sext pattern: 3124 /// ((%x << C) a>> C) dstcond %x 3125 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3126 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3127 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3128 const SDLoc &DL) const { 3129 // We must be comparing with a constant. 3130 ConstantSDNode *C1; 3131 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3132 return SDValue(); 3133 3134 // N0 should be: add %x, (1 << (KeptBits-1)) 3135 if (N0->getOpcode() != ISD::ADD) 3136 return SDValue(); 3137 3138 // And we must be 'add'ing a constant. 3139 ConstantSDNode *C01; 3140 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3141 return SDValue(); 3142 3143 SDValue X = N0->getOperand(0); 3144 EVT XVT = X.getValueType(); 3145 3146 // Validate constants ... 3147 3148 APInt I1 = C1->getAPIntValue(); 3149 3150 ISD::CondCode NewCond; 3151 if (Cond == ISD::CondCode::SETULT) { 3152 NewCond = ISD::CondCode::SETEQ; 3153 } else if (Cond == ISD::CondCode::SETULE) { 3154 NewCond = ISD::CondCode::SETEQ; 3155 // But need to 'canonicalize' the constant. 3156 I1 += 1; 3157 } else if (Cond == ISD::CondCode::SETUGT) { 3158 NewCond = ISD::CondCode::SETNE; 3159 // But need to 'canonicalize' the constant. 3160 I1 += 1; 3161 } else if (Cond == ISD::CondCode::SETUGE) { 3162 NewCond = ISD::CondCode::SETNE; 3163 } else 3164 return SDValue(); 3165 3166 APInt I01 = C01->getAPIntValue(); 3167 3168 auto checkConstants = [&I1, &I01]() -> bool { 3169 // Both of them must be power-of-two, and the constant from setcc is bigger. 3170 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3171 }; 3172 3173 if (checkConstants()) { 3174 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3175 } else { 3176 // What if we invert constants? (and the target predicate) 3177 I1.negate(); 3178 I01.negate(); 3179 assert(XVT.isInteger()); 3180 NewCond = getSetCCInverse(NewCond, XVT); 3181 if (!checkConstants()) 3182 return SDValue(); 3183 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3184 } 3185 3186 // They are power-of-two, so which bit is set? 3187 const unsigned KeptBits = I1.logBase2(); 3188 const unsigned KeptBitsMinusOne = I01.logBase2(); 3189 3190 // Magic! 3191 if (KeptBits != (KeptBitsMinusOne + 1)) 3192 return SDValue(); 3193 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3194 3195 // We don't want to do this in every single case. 3196 SelectionDAG &DAG = DCI.DAG; 3197 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3198 XVT, KeptBits)) 3199 return SDValue(); 3200 3201 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3202 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3203 3204 // Unfold into: ((%x << C) a>> C) cond %x 3205 // Where 'cond' will be either 'eq' or 'ne'. 3206 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3207 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3208 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3209 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3210 3211 return T2; 3212 } 3213 3214 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3215 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3216 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3217 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3218 assert(isConstOrConstSplat(N1C) && 3219 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && 3220 "Should be a comparison with 0."); 3221 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3222 "Valid only for [in]equality comparisons."); 3223 3224 unsigned NewShiftOpcode; 3225 SDValue X, C, Y; 3226 3227 SelectionDAG &DAG = DCI.DAG; 3228 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3229 3230 // Look for '(C l>>/<< Y)'. 3231 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3232 // The shift should be one-use. 3233 if (!V.hasOneUse()) 3234 return false; 3235 unsigned OldShiftOpcode = V.getOpcode(); 3236 switch (OldShiftOpcode) { 3237 case ISD::SHL: 3238 NewShiftOpcode = ISD::SRL; 3239 break; 3240 case ISD::SRL: 3241 NewShiftOpcode = ISD::SHL; 3242 break; 3243 default: 3244 return false; // must be a logical shift. 3245 } 3246 // We should be shifting a constant. 3247 // FIXME: best to use isConstantOrConstantVector(). 3248 C = V.getOperand(0); 3249 ConstantSDNode *CC = 3250 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3251 if (!CC) 3252 return false; 3253 Y = V.getOperand(1); 3254 3255 ConstantSDNode *XC = 3256 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3257 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3258 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3259 }; 3260 3261 // LHS of comparison should be an one-use 'and'. 3262 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3263 return SDValue(); 3264 3265 X = N0.getOperand(0); 3266 SDValue Mask = N0.getOperand(1); 3267 3268 // 'and' is commutative! 3269 if (!Match(Mask)) { 3270 std::swap(X, Mask); 3271 if (!Match(Mask)) 3272 return SDValue(); 3273 } 3274 3275 EVT VT = X.getValueType(); 3276 3277 // Produce: 3278 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3279 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3280 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3281 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3282 return T2; 3283 } 3284 3285 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3286 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3287 /// handle the commuted versions of these patterns. 3288 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3289 ISD::CondCode Cond, const SDLoc &DL, 3290 DAGCombinerInfo &DCI) const { 3291 unsigned BOpcode = N0.getOpcode(); 3292 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3293 "Unexpected binop"); 3294 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3295 3296 // (X + Y) == X --> Y == 0 3297 // (X - Y) == X --> Y == 0 3298 // (X ^ Y) == X --> Y == 0 3299 SelectionDAG &DAG = DCI.DAG; 3300 EVT OpVT = N0.getValueType(); 3301 SDValue X = N0.getOperand(0); 3302 SDValue Y = N0.getOperand(1); 3303 if (X == N1) 3304 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3305 3306 if (Y != N1) 3307 return SDValue(); 3308 3309 // (X + Y) == Y --> X == 0 3310 // (X ^ Y) == Y --> X == 0 3311 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3312 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3313 3314 // The shift would not be valid if the operands are boolean (i1). 3315 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3316 return SDValue(); 3317 3318 // (X - Y) == Y --> X == Y << 1 3319 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3320 !DCI.isBeforeLegalize()); 3321 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3322 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3323 if (!DCI.isCalledByLegalizer()) 3324 DCI.AddToWorklist(YShl1.getNode()); 3325 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3326 } 3327 3328 /// Try to simplify a setcc built with the specified operands and cc. If it is 3329 /// unable to simplify it, return a null SDValue. 3330 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3331 ISD::CondCode Cond, bool foldBooleans, 3332 DAGCombinerInfo &DCI, 3333 const SDLoc &dl) const { 3334 SelectionDAG &DAG = DCI.DAG; 3335 const DataLayout &Layout = DAG.getDataLayout(); 3336 EVT OpVT = N0.getValueType(); 3337 3338 // Constant fold or commute setcc. 3339 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3340 return Fold; 3341 3342 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3343 // TODO: Handle non-splat vector constants. All undef causes trouble. 3344 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3345 if (isConstOrConstSplat(N0) && 3346 (DCI.isBeforeLegalizeOps() || 3347 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3348 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3349 3350 // If we have a subtract with the same 2 non-constant operands as this setcc 3351 // -- but in reverse order -- then try to commute the operands of this setcc 3352 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3353 // instruction on some targets. 3354 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3355 (DCI.isBeforeLegalizeOps() || 3356 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3357 DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) && 3358 !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } )) 3359 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3360 3361 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3362 const APInt &C1 = N1C->getAPIntValue(); 3363 3364 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3365 // equality comparison, then we're just comparing whether X itself is 3366 // zero. 3367 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && 3368 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3369 N0.getOperand(1).getOpcode() == ISD::Constant) { 3370 const APInt &ShAmt = N0.getConstantOperandAPInt(1); 3371 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3372 ShAmt == Log2_32(N0.getValueSizeInBits())) { 3373 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3374 // (srl (ctlz x), 5) == 0 -> X != 0 3375 // (srl (ctlz x), 5) != 1 -> X != 0 3376 Cond = ISD::SETNE; 3377 } else { 3378 // (srl (ctlz x), 5) != 0 -> X == 0 3379 // (srl (ctlz x), 5) == 1 -> X == 0 3380 Cond = ISD::SETEQ; 3381 } 3382 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3383 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 3384 Zero, Cond); 3385 } 3386 } 3387 3388 SDValue CTPOP = N0; 3389 // Look through truncs that don't change the value of a ctpop. 3390 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE) 3391 CTPOP = N0.getOperand(0); 3392 3393 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && 3394 (N0 == CTPOP || 3395 N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) { 3396 EVT CTVT = CTPOP.getValueType(); 3397 SDValue CTOp = CTPOP.getOperand(0); 3398 3399 // (ctpop x) u< 2 -> (x & x-1) == 0 3400 // (ctpop x) u> 1 -> (x & x-1) != 0 3401 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){ 3402 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3403 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3404 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3405 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3406 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC); 3407 } 3408 3409 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3410 if (C1 == 1 && !isOperationLegalOrCustom(ISD::CTPOP, CTVT) && 3411 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3412 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3413 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3414 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3415 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3416 assert(CTVT.isInteger()); 3417 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3418 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3419 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3420 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3421 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3422 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3423 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3424 } 3425 } 3426 3427 // (zext x) == C --> x == (trunc C) 3428 // (sext x) == C --> x == (trunc C) 3429 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3430 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3431 unsigned MinBits = N0.getValueSizeInBits(); 3432 SDValue PreExt; 3433 bool Signed = false; 3434 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3435 // ZExt 3436 MinBits = N0->getOperand(0).getValueSizeInBits(); 3437 PreExt = N0->getOperand(0); 3438 } else if (N0->getOpcode() == ISD::AND) { 3439 // DAGCombine turns costly ZExts into ANDs 3440 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3441 if ((C->getAPIntValue()+1).isPowerOf2()) { 3442 MinBits = C->getAPIntValue().countTrailingOnes(); 3443 PreExt = N0->getOperand(0); 3444 } 3445 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3446 // SExt 3447 MinBits = N0->getOperand(0).getValueSizeInBits(); 3448 PreExt = N0->getOperand(0); 3449 Signed = true; 3450 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3451 // ZEXTLOAD / SEXTLOAD 3452 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3453 MinBits = LN0->getMemoryVT().getSizeInBits(); 3454 PreExt = N0; 3455 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3456 Signed = true; 3457 MinBits = LN0->getMemoryVT().getSizeInBits(); 3458 PreExt = N0; 3459 } 3460 } 3461 3462 // Figure out how many bits we need to preserve this constant. 3463 unsigned ReqdBits = Signed ? 3464 C1.getBitWidth() - C1.getNumSignBits() + 1 : 3465 C1.getActiveBits(); 3466 3467 // Make sure we're not losing bits from the constant. 3468 if (MinBits > 0 && 3469 MinBits < C1.getBitWidth() && 3470 MinBits >= ReqdBits) { 3471 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3472 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3473 // Will get folded away. 3474 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3475 if (MinBits == 1 && C1 == 1) 3476 // Invert the condition. 3477 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3478 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3479 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3480 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3481 } 3482 3483 // If truncating the setcc operands is not desirable, we can still 3484 // simplify the expression in some cases: 3485 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3486 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3487 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3488 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3489 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3490 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3491 SDValue TopSetCC = N0->getOperand(0); 3492 unsigned N0Opc = N0->getOpcode(); 3493 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3494 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3495 TopSetCC.getOpcode() == ISD::SETCC && 3496 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3497 (isConstFalseVal(N1C) || 3498 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3499 3500 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) || 3501 (!N1C->isNullValue() && Cond == ISD::SETNE); 3502 3503 if (!Inverse) 3504 return TopSetCC; 3505 3506 ISD::CondCode InvCond = ISD::getSetCCInverse( 3507 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3508 TopSetCC.getOperand(0).getValueType()); 3509 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3510 TopSetCC.getOperand(1), 3511 InvCond); 3512 } 3513 } 3514 } 3515 3516 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3517 // equality or unsigned, and all 1 bits of the const are in the same 3518 // partial word, see if we can shorten the load. 3519 if (DCI.isBeforeLegalize() && 3520 !ISD::isSignedIntSetCC(Cond) && 3521 N0.getOpcode() == ISD::AND && C1 == 0 && 3522 N0.getNode()->hasOneUse() && 3523 isa<LoadSDNode>(N0.getOperand(0)) && 3524 N0.getOperand(0).getNode()->hasOneUse() && 3525 isa<ConstantSDNode>(N0.getOperand(1))) { 3526 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3527 APInt bestMask; 3528 unsigned bestWidth = 0, bestOffset = 0; 3529 if (Lod->isSimple() && Lod->isUnindexed()) { 3530 unsigned origWidth = N0.getValueSizeInBits(); 3531 unsigned maskWidth = origWidth; 3532 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3533 // 8 bits, but have to be careful... 3534 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3535 origWidth = Lod->getMemoryVT().getSizeInBits(); 3536 const APInt &Mask = N0.getConstantOperandAPInt(1); 3537 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3538 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3539 for (unsigned offset=0; offset<origWidth/width; offset++) { 3540 if (Mask.isSubsetOf(newMask)) { 3541 if (Layout.isLittleEndian()) 3542 bestOffset = (uint64_t)offset * (width/8); 3543 else 3544 bestOffset = (origWidth/width - offset - 1) * (width/8); 3545 bestMask = Mask.lshr(offset * (width/8) * 8); 3546 bestWidth = width; 3547 break; 3548 } 3549 newMask <<= width; 3550 } 3551 } 3552 } 3553 if (bestWidth) { 3554 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3555 if (newVT.isRound() && 3556 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3557 SDValue Ptr = Lod->getBasePtr(); 3558 if (bestOffset != 0) 3559 Ptr = DAG.getMemBasePlusOffset(Ptr, bestOffset, dl); 3560 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 3561 SDValue NewLoad = DAG.getLoad( 3562 newVT, dl, Lod->getChain(), Ptr, 3563 Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign); 3564 return DAG.getSetCC(dl, VT, 3565 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3566 DAG.getConstant(bestMask.trunc(bestWidth), 3567 dl, newVT)), 3568 DAG.getConstant(0LL, dl, newVT), Cond); 3569 } 3570 } 3571 } 3572 3573 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3574 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3575 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3576 3577 // If the comparison constant has bits in the upper part, the 3578 // zero-extended value could never match. 3579 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3580 C1.getBitWidth() - InSize))) { 3581 switch (Cond) { 3582 case ISD::SETUGT: 3583 case ISD::SETUGE: 3584 case ISD::SETEQ: 3585 return DAG.getConstant(0, dl, VT); 3586 case ISD::SETULT: 3587 case ISD::SETULE: 3588 case ISD::SETNE: 3589 return DAG.getConstant(1, dl, VT); 3590 case ISD::SETGT: 3591 case ISD::SETGE: 3592 // True if the sign bit of C1 is set. 3593 return DAG.getConstant(C1.isNegative(), dl, VT); 3594 case ISD::SETLT: 3595 case ISD::SETLE: 3596 // True if the sign bit of C1 isn't set. 3597 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3598 default: 3599 break; 3600 } 3601 } 3602 3603 // Otherwise, we can perform the comparison with the low bits. 3604 switch (Cond) { 3605 case ISD::SETEQ: 3606 case ISD::SETNE: 3607 case ISD::SETUGT: 3608 case ISD::SETUGE: 3609 case ISD::SETULT: 3610 case ISD::SETULE: { 3611 EVT newVT = N0.getOperand(0).getValueType(); 3612 if (DCI.isBeforeLegalizeOps() || 3613 (isOperationLegal(ISD::SETCC, newVT) && 3614 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3615 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3616 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3617 3618 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3619 NewConst, Cond); 3620 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3621 } 3622 break; 3623 } 3624 default: 3625 break; // todo, be more careful with signed comparisons 3626 } 3627 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3628 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3629 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3630 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3631 EVT ExtDstTy = N0.getValueType(); 3632 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3633 3634 // If the constant doesn't fit into the number of bits for the source of 3635 // the sign extension, it is impossible for both sides to be equal. 3636 if (C1.getMinSignedBits() > ExtSrcTyBits) 3637 return DAG.getConstant(Cond == ISD::SETNE, dl, VT); 3638 3639 SDValue ZextOp; 3640 EVT Op0Ty = N0.getOperand(0).getValueType(); 3641 if (Op0Ty == ExtSrcTy) { 3642 ZextOp = N0.getOperand(0); 3643 } else { 3644 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3645 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 3646 DAG.getConstant(Imm, dl, Op0Ty)); 3647 } 3648 if (!DCI.isCalledByLegalizer()) 3649 DCI.AddToWorklist(ZextOp.getNode()); 3650 // Otherwise, make this a use of a zext. 3651 return DAG.getSetCC(dl, VT, ZextOp, 3652 DAG.getConstant(C1 & APInt::getLowBitsSet( 3653 ExtDstTyBits, 3654 ExtSrcTyBits), 3655 dl, ExtDstTy), 3656 Cond); 3657 } else if ((N1C->isNullValue() || N1C->isOne()) && 3658 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3659 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3660 if (N0.getOpcode() == ISD::SETCC && 3661 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3662 (N0.getValueType() == MVT::i1 || 3663 getBooleanContents(N0.getOperand(0).getValueType()) == 3664 ZeroOrOneBooleanContent)) { 3665 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3666 if (TrueWhenTrue) 3667 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3668 // Invert the condition. 3669 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3670 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3671 if (DCI.isBeforeLegalizeOps() || 3672 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3673 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3674 } 3675 3676 if ((N0.getOpcode() == ISD::XOR || 3677 (N0.getOpcode() == ISD::AND && 3678 N0.getOperand(0).getOpcode() == ISD::XOR && 3679 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3680 isa<ConstantSDNode>(N0.getOperand(1)) && 3681 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) { 3682 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3683 // can only do this if the top bits are known zero. 3684 unsigned BitWidth = N0.getValueSizeInBits(); 3685 if (DAG.MaskedValueIsZero(N0, 3686 APInt::getHighBitsSet(BitWidth, 3687 BitWidth-1))) { 3688 // Okay, get the un-inverted input value. 3689 SDValue Val; 3690 if (N0.getOpcode() == ISD::XOR) { 3691 Val = N0.getOperand(0); 3692 } else { 3693 assert(N0.getOpcode() == ISD::AND && 3694 N0.getOperand(0).getOpcode() == ISD::XOR); 3695 // ((X^1)&1)^1 -> X & 1 3696 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3697 N0.getOperand(0).getOperand(0), 3698 N0.getOperand(1)); 3699 } 3700 3701 return DAG.getSetCC(dl, VT, Val, N1, 3702 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3703 } 3704 } else if (N1C->isOne()) { 3705 SDValue Op0 = N0; 3706 if (Op0.getOpcode() == ISD::TRUNCATE) 3707 Op0 = Op0.getOperand(0); 3708 3709 if ((Op0.getOpcode() == ISD::XOR) && 3710 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3711 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3712 SDValue XorLHS = Op0.getOperand(0); 3713 SDValue XorRHS = Op0.getOperand(1); 3714 // Ensure that the input setccs return an i1 type or 0/1 value. 3715 if (Op0.getValueType() == MVT::i1 || 3716 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3717 ZeroOrOneBooleanContent && 3718 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3719 ZeroOrOneBooleanContent)) { 3720 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3721 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3722 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3723 } 3724 } 3725 if (Op0.getOpcode() == ISD::AND && 3726 isa<ConstantSDNode>(Op0.getOperand(1)) && 3727 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) { 3728 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3729 if (Op0.getValueType().bitsGT(VT)) 3730 Op0 = DAG.getNode(ISD::AND, dl, VT, 3731 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3732 DAG.getConstant(1, dl, VT)); 3733 else if (Op0.getValueType().bitsLT(VT)) 3734 Op0 = DAG.getNode(ISD::AND, dl, VT, 3735 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3736 DAG.getConstant(1, dl, VT)); 3737 3738 return DAG.getSetCC(dl, VT, Op0, 3739 DAG.getConstant(0, dl, Op0.getValueType()), 3740 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3741 } 3742 if (Op0.getOpcode() == ISD::AssertZext && 3743 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3744 return DAG.getSetCC(dl, VT, Op0, 3745 DAG.getConstant(0, dl, Op0.getValueType()), 3746 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3747 } 3748 } 3749 3750 // Given: 3751 // icmp eq/ne (urem %x, %y), 0 3752 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3753 // icmp eq/ne %x, 0 3754 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() && 3755 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3756 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3757 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3758 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3759 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3760 } 3761 3762 if (SDValue V = 3763 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 3764 return V; 3765 } 3766 3767 // These simplifications apply to splat vectors as well. 3768 // TODO: Handle more splat vector cases. 3769 if (auto *N1C = isConstOrConstSplat(N1)) { 3770 const APInt &C1 = N1C->getAPIntValue(); 3771 3772 APInt MinVal, MaxVal; 3773 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 3774 if (ISD::isSignedIntSetCC(Cond)) { 3775 MinVal = APInt::getSignedMinValue(OperandBitSize); 3776 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 3777 } else { 3778 MinVal = APInt::getMinValue(OperandBitSize); 3779 MaxVal = APInt::getMaxValue(OperandBitSize); 3780 } 3781 3782 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 3783 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 3784 // X >= MIN --> true 3785 if (C1 == MinVal) 3786 return DAG.getBoolConstant(true, dl, VT, OpVT); 3787 3788 if (!VT.isVector()) { // TODO: Support this for vectors. 3789 // X >= C0 --> X > (C0 - 1) 3790 APInt C = C1 - 1; 3791 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 3792 if ((DCI.isBeforeLegalizeOps() || 3793 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3794 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3795 isLegalICmpImmediate(C.getSExtValue())))) { 3796 return DAG.getSetCC(dl, VT, N0, 3797 DAG.getConstant(C, dl, N1.getValueType()), 3798 NewCC); 3799 } 3800 } 3801 } 3802 3803 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 3804 // X <= MAX --> true 3805 if (C1 == MaxVal) 3806 return DAG.getBoolConstant(true, dl, VT, OpVT); 3807 3808 // X <= C0 --> X < (C0 + 1) 3809 if (!VT.isVector()) { // TODO: Support this for vectors. 3810 APInt C = C1 + 1; 3811 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 3812 if ((DCI.isBeforeLegalizeOps() || 3813 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3814 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3815 isLegalICmpImmediate(C.getSExtValue())))) { 3816 return DAG.getSetCC(dl, VT, N0, 3817 DAG.getConstant(C, dl, N1.getValueType()), 3818 NewCC); 3819 } 3820 } 3821 } 3822 3823 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 3824 if (C1 == MinVal) 3825 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 3826 3827 // TODO: Support this for vectors after legalize ops. 3828 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3829 // Canonicalize setlt X, Max --> setne X, Max 3830 if (C1 == MaxVal) 3831 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3832 3833 // If we have setult X, 1, turn it into seteq X, 0 3834 if (C1 == MinVal+1) 3835 return DAG.getSetCC(dl, VT, N0, 3836 DAG.getConstant(MinVal, dl, N0.getValueType()), 3837 ISD::SETEQ); 3838 } 3839 } 3840 3841 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 3842 if (C1 == MaxVal) 3843 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 3844 3845 // TODO: Support this for vectors after legalize ops. 3846 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3847 // Canonicalize setgt X, Min --> setne X, Min 3848 if (C1 == MinVal) 3849 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3850 3851 // If we have setugt X, Max-1, turn it into seteq X, Max 3852 if (C1 == MaxVal-1) 3853 return DAG.getSetCC(dl, VT, N0, 3854 DAG.getConstant(MaxVal, dl, N0.getValueType()), 3855 ISD::SETEQ); 3856 } 3857 } 3858 3859 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 3860 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3861 if (C1.isNullValue()) 3862 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 3863 VT, N0, N1, Cond, DCI, dl)) 3864 return CC; 3865 } 3866 3867 // If we have "setcc X, C0", check to see if we can shrink the immediate 3868 // by changing cc. 3869 // TODO: Support this for vectors after legalize ops. 3870 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3871 // SETUGT X, SINTMAX -> SETLT X, 0 3872 if (Cond == ISD::SETUGT && 3873 C1 == APInt::getSignedMaxValue(OperandBitSize)) 3874 return DAG.getSetCC(dl, VT, N0, 3875 DAG.getConstant(0, dl, N1.getValueType()), 3876 ISD::SETLT); 3877 3878 // SETULT X, SINTMIN -> SETGT X, -1 3879 if (Cond == ISD::SETULT && 3880 C1 == APInt::getSignedMinValue(OperandBitSize)) { 3881 SDValue ConstMinusOne = 3882 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl, 3883 N1.getValueType()); 3884 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 3885 } 3886 } 3887 } 3888 3889 // Back to non-vector simplifications. 3890 // TODO: Can we do these for vector splats? 3891 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3892 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3893 const APInt &C1 = N1C->getAPIntValue(); 3894 EVT ShValTy = N0.getValueType(); 3895 3896 // Fold bit comparisons when we can. 3897 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3898 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 3899 N0.getOpcode() == ISD::AND) { 3900 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3901 EVT ShiftTy = 3902 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3903 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 3904 // Perform the xform if the AND RHS is a single bit. 3905 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 3906 if (AndRHS->getAPIntValue().isPowerOf2() && 3907 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3908 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3909 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3910 DAG.getConstant(ShCt, dl, ShiftTy))); 3911 } 3912 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 3913 // (X & 8) == 8 --> (X & 8) >> 3 3914 // Perform the xform if C1 is a single bit. 3915 unsigned ShCt = C1.logBase2(); 3916 if (C1.isPowerOf2() && 3917 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3918 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3919 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3920 DAG.getConstant(ShCt, dl, ShiftTy))); 3921 } 3922 } 3923 } 3924 } 3925 3926 if (C1.getMinSignedBits() <= 64 && 3927 !isLegalICmpImmediate(C1.getSExtValue())) { 3928 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3929 // (X & -256) == 256 -> (X >> 8) == 1 3930 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3931 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 3932 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3933 const APInt &AndRHSC = AndRHS->getAPIntValue(); 3934 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) { 3935 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 3936 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3937 SDValue Shift = 3938 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 3939 DAG.getConstant(ShiftBits, dl, ShiftTy)); 3940 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 3941 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 3942 } 3943 } 3944 } 3945 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 3946 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 3947 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 3948 // X < 0x100000000 -> (X >> 32) < 1 3949 // X >= 0x100000000 -> (X >> 32) >= 1 3950 // X <= 0x0ffffffff -> (X >> 32) < 1 3951 // X > 0x0ffffffff -> (X >> 32) >= 1 3952 unsigned ShiftBits; 3953 APInt NewC = C1; 3954 ISD::CondCode NewCond = Cond; 3955 if (AdjOne) { 3956 ShiftBits = C1.countTrailingOnes(); 3957 NewC = NewC + 1; 3958 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3959 } else { 3960 ShiftBits = C1.countTrailingZeros(); 3961 } 3962 NewC.lshrInPlace(ShiftBits); 3963 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 3964 isLegalICmpImmediate(NewC.getSExtValue()) && 3965 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3966 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3967 DAG.getConstant(ShiftBits, dl, ShiftTy)); 3968 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 3969 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 3970 } 3971 } 3972 } 3973 } 3974 3975 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 3976 auto *CFP = cast<ConstantFPSDNode>(N1); 3977 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 3978 3979 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 3980 // constant if knowing that the operand is non-nan is enough. We prefer to 3981 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 3982 // materialize 0.0. 3983 if (Cond == ISD::SETO || Cond == ISD::SETUO) 3984 return DAG.getSetCC(dl, VT, N0, N0, Cond); 3985 3986 // setcc (fneg x), C -> setcc swap(pred) x, -C 3987 if (N0.getOpcode() == ISD::FNEG) { 3988 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 3989 if (DCI.isBeforeLegalizeOps() || 3990 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 3991 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 3992 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 3993 } 3994 } 3995 3996 // If the condition is not legal, see if we can find an equivalent one 3997 // which is legal. 3998 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 3999 // If the comparison was an awkward floating-point == or != and one of 4000 // the comparison operands is infinity or negative infinity, convert the 4001 // condition to a less-awkward <= or >=. 4002 if (CFP->getValueAPF().isInfinity()) { 4003 bool IsNegInf = CFP->getValueAPF().isNegative(); 4004 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4005 switch (Cond) { 4006 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4007 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4008 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4009 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4010 default: break; 4011 } 4012 if (NewCond != ISD::SETCC_INVALID && 4013 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4014 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4015 } 4016 } 4017 } 4018 4019 if (N0 == N1) { 4020 // The sext(setcc()) => setcc() optimization relies on the appropriate 4021 // constant being emitted. 4022 assert(!N0.getValueType().isInteger() && 4023 "Integer types should be handled by FoldSetCC"); 4024 4025 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4026 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4027 if (UOF == 2) // FP operators that are undefined on NaNs. 4028 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4029 if (UOF == unsigned(EqTrue)) 4030 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4031 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4032 // if it is not already. 4033 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4034 if (NewCond != Cond && 4035 (DCI.isBeforeLegalizeOps() || 4036 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4037 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4038 } 4039 4040 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4041 N0.getValueType().isInteger()) { 4042 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4043 N0.getOpcode() == ISD::XOR) { 4044 // Simplify (X+Y) == (X+Z) --> Y == Z 4045 if (N0.getOpcode() == N1.getOpcode()) { 4046 if (N0.getOperand(0) == N1.getOperand(0)) 4047 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4048 if (N0.getOperand(1) == N1.getOperand(1)) 4049 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4050 if (isCommutativeBinOp(N0.getOpcode())) { 4051 // If X op Y == Y op X, try other combinations. 4052 if (N0.getOperand(0) == N1.getOperand(1)) 4053 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4054 Cond); 4055 if (N0.getOperand(1) == N1.getOperand(0)) 4056 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4057 Cond); 4058 } 4059 } 4060 4061 // If RHS is a legal immediate value for a compare instruction, we need 4062 // to be careful about increasing register pressure needlessly. 4063 bool LegalRHSImm = false; 4064 4065 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4066 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4067 // Turn (X+C1) == C2 --> X == C2-C1 4068 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 4069 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4070 DAG.getConstant(RHSC->getAPIntValue()- 4071 LHSR->getAPIntValue(), 4072 dl, N0.getValueType()), Cond); 4073 } 4074 4075 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 4076 if (N0.getOpcode() == ISD::XOR) 4077 // If we know that all of the inverted bits are zero, don't bother 4078 // performing the inversion. 4079 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 4080 return 4081 DAG.getSetCC(dl, VT, N0.getOperand(0), 4082 DAG.getConstant(LHSR->getAPIntValue() ^ 4083 RHSC->getAPIntValue(), 4084 dl, N0.getValueType()), 4085 Cond); 4086 } 4087 4088 // Turn (C1-X) == C2 --> X == C1-C2 4089 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 4090 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 4091 return 4092 DAG.getSetCC(dl, VT, N0.getOperand(1), 4093 DAG.getConstant(SUBC->getAPIntValue() - 4094 RHSC->getAPIntValue(), 4095 dl, N0.getValueType()), 4096 Cond); 4097 } 4098 } 4099 4100 // Could RHSC fold directly into a compare? 4101 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4102 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4103 } 4104 4105 // (X+Y) == X --> Y == 0 and similar folds. 4106 // Don't do this if X is an immediate that can fold into a cmp 4107 // instruction and X+Y has other uses. It could be an induction variable 4108 // chain, and the transform would increase register pressure. 4109 if (!LegalRHSImm || N0.hasOneUse()) 4110 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4111 return V; 4112 } 4113 4114 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4115 N1.getOpcode() == ISD::XOR) 4116 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4117 return V; 4118 4119 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4120 return V; 4121 } 4122 4123 // Fold remainder of division by a constant. 4124 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4125 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4126 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4127 4128 // When division is cheap or optimizing for minimum size, 4129 // fall through to DIVREM creation by skipping this fold. 4130 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) { 4131 if (N0.getOpcode() == ISD::UREM) { 4132 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4133 return Folded; 4134 } else if (N0.getOpcode() == ISD::SREM) { 4135 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4136 return Folded; 4137 } 4138 } 4139 } 4140 4141 // Fold away ALL boolean setcc's. 4142 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4143 SDValue Temp; 4144 switch (Cond) { 4145 default: llvm_unreachable("Unknown integer setcc!"); 4146 case ISD::SETEQ: // X == Y -> ~(X^Y) 4147 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4148 N0 = DAG.getNOT(dl, Temp, OpVT); 4149 if (!DCI.isCalledByLegalizer()) 4150 DCI.AddToWorklist(Temp.getNode()); 4151 break; 4152 case ISD::SETNE: // X != Y --> (X^Y) 4153 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4154 break; 4155 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4156 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4157 Temp = DAG.getNOT(dl, N0, OpVT); 4158 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4159 if (!DCI.isCalledByLegalizer()) 4160 DCI.AddToWorklist(Temp.getNode()); 4161 break; 4162 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4163 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4164 Temp = DAG.getNOT(dl, N1, OpVT); 4165 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4166 if (!DCI.isCalledByLegalizer()) 4167 DCI.AddToWorklist(Temp.getNode()); 4168 break; 4169 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4170 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4171 Temp = DAG.getNOT(dl, N0, OpVT); 4172 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4173 if (!DCI.isCalledByLegalizer()) 4174 DCI.AddToWorklist(Temp.getNode()); 4175 break; 4176 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4177 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4178 Temp = DAG.getNOT(dl, N1, OpVT); 4179 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4180 break; 4181 } 4182 if (VT.getScalarType() != MVT::i1) { 4183 if (!DCI.isCalledByLegalizer()) 4184 DCI.AddToWorklist(N0.getNode()); 4185 // FIXME: If running after legalize, we probably can't do this. 4186 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4187 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4188 } 4189 return N0; 4190 } 4191 4192 // Could not fold it. 4193 return SDValue(); 4194 } 4195 4196 /// Returns true (and the GlobalValue and the offset) if the node is a 4197 /// GlobalAddress + offset. 4198 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4199 int64_t &Offset) const { 4200 4201 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4202 4203 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4204 GA = GASD->getGlobal(); 4205 Offset += GASD->getOffset(); 4206 return true; 4207 } 4208 4209 if (N->getOpcode() == ISD::ADD) { 4210 SDValue N1 = N->getOperand(0); 4211 SDValue N2 = N->getOperand(1); 4212 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4213 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4214 Offset += V->getSExtValue(); 4215 return true; 4216 } 4217 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4218 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4219 Offset += V->getSExtValue(); 4220 return true; 4221 } 4222 } 4223 } 4224 4225 return false; 4226 } 4227 4228 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4229 DAGCombinerInfo &DCI) const { 4230 // Default implementation: no optimization. 4231 return SDValue(); 4232 } 4233 4234 //===----------------------------------------------------------------------===// 4235 // Inline Assembler Implementation Methods 4236 //===----------------------------------------------------------------------===// 4237 4238 TargetLowering::ConstraintType 4239 TargetLowering::getConstraintType(StringRef Constraint) const { 4240 unsigned S = Constraint.size(); 4241 4242 if (S == 1) { 4243 switch (Constraint[0]) { 4244 default: break; 4245 case 'r': 4246 return C_RegisterClass; 4247 case 'm': // memory 4248 case 'o': // offsetable 4249 case 'V': // not offsetable 4250 return C_Memory; 4251 case 'n': // Simple Integer 4252 case 'E': // Floating Point Constant 4253 case 'F': // Floating Point Constant 4254 return C_Immediate; 4255 case 'i': // Simple Integer or Relocatable Constant 4256 case 's': // Relocatable Constant 4257 case 'p': // Address. 4258 case 'X': // Allow ANY value. 4259 case 'I': // Target registers. 4260 case 'J': 4261 case 'K': 4262 case 'L': 4263 case 'M': 4264 case 'N': 4265 case 'O': 4266 case 'P': 4267 case '<': 4268 case '>': 4269 return C_Other; 4270 } 4271 } 4272 4273 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4274 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4275 return C_Memory; 4276 return C_Register; 4277 } 4278 return C_Unknown; 4279 } 4280 4281 /// Try to replace an X constraint, which matches anything, with another that 4282 /// has more specific requirements based on the type of the corresponding 4283 /// operand. 4284 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4285 if (ConstraintVT.isInteger()) 4286 return "r"; 4287 if (ConstraintVT.isFloatingPoint()) 4288 return "f"; // works for many targets 4289 return nullptr; 4290 } 4291 4292 SDValue TargetLowering::LowerAsmOutputForConstraint( 4293 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo, 4294 SelectionDAG &DAG) const { 4295 return SDValue(); 4296 } 4297 4298 /// Lower the specified operand into the Ops vector. 4299 /// If it is invalid, don't add anything to Ops. 4300 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4301 std::string &Constraint, 4302 std::vector<SDValue> &Ops, 4303 SelectionDAG &DAG) const { 4304 4305 if (Constraint.length() > 1) return; 4306 4307 char ConstraintLetter = Constraint[0]; 4308 switch (ConstraintLetter) { 4309 default: break; 4310 case 'X': // Allows any operand; labels (basic block) use this. 4311 if (Op.getOpcode() == ISD::BasicBlock || 4312 Op.getOpcode() == ISD::TargetBlockAddress) { 4313 Ops.push_back(Op); 4314 return; 4315 } 4316 LLVM_FALLTHROUGH; 4317 case 'i': // Simple Integer or Relocatable Constant 4318 case 'n': // Simple Integer 4319 case 's': { // Relocatable Constant 4320 4321 GlobalAddressSDNode *GA; 4322 ConstantSDNode *C; 4323 BlockAddressSDNode *BA; 4324 uint64_t Offset = 0; 4325 4326 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4327 // etc., since getelementpointer is variadic. We can't use 4328 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4329 // while in this case the GA may be furthest from the root node which is 4330 // likely an ISD::ADD. 4331 while (1) { 4332 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') { 4333 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4334 GA->getValueType(0), 4335 Offset + GA->getOffset())); 4336 return; 4337 } else if ((C = dyn_cast<ConstantSDNode>(Op)) && 4338 ConstraintLetter != 's') { 4339 // gcc prints these as sign extended. Sign extend value to 64 bits 4340 // now; without this it would get ZExt'd later in 4341 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4342 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4343 BooleanContent BCont = getBooleanContents(MVT::i64); 4344 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont) 4345 : ISD::SIGN_EXTEND; 4346 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() 4347 : C->getSExtValue(); 4348 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal, 4349 SDLoc(C), MVT::i64)); 4350 return; 4351 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && 4352 ConstraintLetter != 'n') { 4353 Ops.push_back(DAG.getTargetBlockAddress( 4354 BA->getBlockAddress(), BA->getValueType(0), 4355 Offset + BA->getOffset(), BA->getTargetFlags())); 4356 return; 4357 } else { 4358 const unsigned OpCode = Op.getOpcode(); 4359 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4360 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4361 Op = Op.getOperand(1); 4362 // Subtraction is not commutative. 4363 else if (OpCode == ISD::ADD && 4364 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4365 Op = Op.getOperand(0); 4366 else 4367 return; 4368 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4369 continue; 4370 } 4371 } 4372 return; 4373 } 4374 break; 4375 } 4376 } 4377 } 4378 4379 std::pair<unsigned, const TargetRegisterClass *> 4380 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4381 StringRef Constraint, 4382 MVT VT) const { 4383 if (Constraint.empty() || Constraint[0] != '{') 4384 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4385 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4386 4387 // Remove the braces from around the name. 4388 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4389 4390 std::pair<unsigned, const TargetRegisterClass *> R = 4391 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4392 4393 // Figure out which register class contains this reg. 4394 for (const TargetRegisterClass *RC : RI->regclasses()) { 4395 // If none of the value types for this register class are valid, we 4396 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4397 if (!isLegalRC(*RI, *RC)) 4398 continue; 4399 4400 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 4401 I != E; ++I) { 4402 if (RegName.equals_lower(RI->getRegAsmName(*I))) { 4403 std::pair<unsigned, const TargetRegisterClass *> S = 4404 std::make_pair(*I, RC); 4405 4406 // If this register class has the requested value type, return it, 4407 // otherwise keep searching and return the first class found 4408 // if no other is found which explicitly has the requested type. 4409 if (RI->isTypeLegalForClass(*RC, VT)) 4410 return S; 4411 if (!R.second) 4412 R = S; 4413 } 4414 } 4415 } 4416 4417 return R; 4418 } 4419 4420 //===----------------------------------------------------------------------===// 4421 // Constraint Selection. 4422 4423 /// Return true of this is an input operand that is a matching constraint like 4424 /// "4". 4425 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4426 assert(!ConstraintCode.empty() && "No known constraint!"); 4427 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4428 } 4429 4430 /// If this is an input matching constraint, this method returns the output 4431 /// operand it matches. 4432 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4433 assert(!ConstraintCode.empty() && "No known constraint!"); 4434 return atoi(ConstraintCode.c_str()); 4435 } 4436 4437 /// Split up the constraint string from the inline assembly value into the 4438 /// specific constraints and their prefixes, and also tie in the associated 4439 /// operand values. 4440 /// If this returns an empty vector, and if the constraint string itself 4441 /// isn't empty, there was an error parsing. 4442 TargetLowering::AsmOperandInfoVector 4443 TargetLowering::ParseConstraints(const DataLayout &DL, 4444 const TargetRegisterInfo *TRI, 4445 const CallBase &Call) const { 4446 /// Information about all of the constraints. 4447 AsmOperandInfoVector ConstraintOperands; 4448 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 4449 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4450 4451 // Do a prepass over the constraints, canonicalizing them, and building up the 4452 // ConstraintOperands list. 4453 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4454 unsigned ResNo = 0; // ResNo - The result number of the next output. 4455 4456 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4457 ConstraintOperands.emplace_back(std::move(CI)); 4458 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4459 4460 // Update multiple alternative constraint count. 4461 if (OpInfo.multipleAlternatives.size() > maCount) 4462 maCount = OpInfo.multipleAlternatives.size(); 4463 4464 OpInfo.ConstraintVT = MVT::Other; 4465 4466 // Compute the value type for each operand. 4467 switch (OpInfo.Type) { 4468 case InlineAsm::isOutput: 4469 // Indirect outputs just consume an argument. 4470 if (OpInfo.isIndirect) { 4471 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4472 break; 4473 } 4474 4475 // The return value of the call is this value. As such, there is no 4476 // corresponding argument. 4477 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 4478 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 4479 OpInfo.ConstraintVT = 4480 getSimpleValueType(DL, STy->getElementType(ResNo)); 4481 } else { 4482 assert(ResNo == 0 && "Asm only has one result!"); 4483 OpInfo.ConstraintVT = getSimpleValueType(DL, Call.getType()); 4484 } 4485 ++ResNo; 4486 break; 4487 case InlineAsm::isInput: 4488 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4489 break; 4490 case InlineAsm::isClobber: 4491 // Nothing to do. 4492 break; 4493 } 4494 4495 if (OpInfo.CallOperandVal) { 4496 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4497 if (OpInfo.isIndirect) { 4498 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 4499 if (!PtrTy) 4500 report_fatal_error("Indirect operand for inline asm not a pointer!"); 4501 OpTy = PtrTy->getElementType(); 4502 } 4503 4504 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4505 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4506 if (STy->getNumElements() == 1) 4507 OpTy = STy->getElementType(0); 4508 4509 // If OpTy is not a single value, it may be a struct/union that we 4510 // can tile with integers. 4511 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4512 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4513 switch (BitSize) { 4514 default: break; 4515 case 1: 4516 case 8: 4517 case 16: 4518 case 32: 4519 case 64: 4520 case 128: 4521 OpInfo.ConstraintVT = 4522 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4523 break; 4524 } 4525 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4526 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4527 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4528 } else { 4529 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4530 } 4531 } 4532 } 4533 4534 // If we have multiple alternative constraints, select the best alternative. 4535 if (!ConstraintOperands.empty()) { 4536 if (maCount) { 4537 unsigned bestMAIndex = 0; 4538 int bestWeight = -1; 4539 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4540 int weight = -1; 4541 unsigned maIndex; 4542 // Compute the sums of the weights for each alternative, keeping track 4543 // of the best (highest weight) one so far. 4544 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4545 int weightSum = 0; 4546 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4547 cIndex != eIndex; ++cIndex) { 4548 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4549 if (OpInfo.Type == InlineAsm::isClobber) 4550 continue; 4551 4552 // If this is an output operand with a matching input operand, 4553 // look up the matching input. If their types mismatch, e.g. one 4554 // is an integer, the other is floating point, or their sizes are 4555 // different, flag it as an maCantMatch. 4556 if (OpInfo.hasMatchingInput()) { 4557 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4558 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4559 if ((OpInfo.ConstraintVT.isInteger() != 4560 Input.ConstraintVT.isInteger()) || 4561 (OpInfo.ConstraintVT.getSizeInBits() != 4562 Input.ConstraintVT.getSizeInBits())) { 4563 weightSum = -1; // Can't match. 4564 break; 4565 } 4566 } 4567 } 4568 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4569 if (weight == -1) { 4570 weightSum = -1; 4571 break; 4572 } 4573 weightSum += weight; 4574 } 4575 // Update best. 4576 if (weightSum > bestWeight) { 4577 bestWeight = weightSum; 4578 bestMAIndex = maIndex; 4579 } 4580 } 4581 4582 // Now select chosen alternative in each constraint. 4583 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4584 cIndex != eIndex; ++cIndex) { 4585 AsmOperandInfo &cInfo = ConstraintOperands[cIndex]; 4586 if (cInfo.Type == InlineAsm::isClobber) 4587 continue; 4588 cInfo.selectAlternative(bestMAIndex); 4589 } 4590 } 4591 } 4592 4593 // Check and hook up tied operands, choose constraint code to use. 4594 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4595 cIndex != eIndex; ++cIndex) { 4596 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4597 4598 // If this is an output operand with a matching input operand, look up the 4599 // matching input. If their types mismatch, e.g. one is an integer, the 4600 // other is floating point, or their sizes are different, flag it as an 4601 // error. 4602 if (OpInfo.hasMatchingInput()) { 4603 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4604 4605 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4606 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4607 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4608 OpInfo.ConstraintVT); 4609 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4610 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4611 Input.ConstraintVT); 4612 if ((OpInfo.ConstraintVT.isInteger() != 4613 Input.ConstraintVT.isInteger()) || 4614 (MatchRC.second != InputRC.second)) { 4615 report_fatal_error("Unsupported asm: input constraint" 4616 " with a matching output constraint of" 4617 " incompatible type!"); 4618 } 4619 } 4620 } 4621 } 4622 4623 return ConstraintOperands; 4624 } 4625 4626 /// Return an integer indicating how general CT is. 4627 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4628 switch (CT) { 4629 case TargetLowering::C_Immediate: 4630 case TargetLowering::C_Other: 4631 case TargetLowering::C_Unknown: 4632 return 0; 4633 case TargetLowering::C_Register: 4634 return 1; 4635 case TargetLowering::C_RegisterClass: 4636 return 2; 4637 case TargetLowering::C_Memory: 4638 return 3; 4639 } 4640 llvm_unreachable("Invalid constraint type"); 4641 } 4642 4643 /// Examine constraint type and operand type and determine a weight value. 4644 /// This object must already have been set up with the operand type 4645 /// and the current alternative constraint selected. 4646 TargetLowering::ConstraintWeight 4647 TargetLowering::getMultipleConstraintMatchWeight( 4648 AsmOperandInfo &info, int maIndex) const { 4649 InlineAsm::ConstraintCodeVector *rCodes; 4650 if (maIndex >= (int)info.multipleAlternatives.size()) 4651 rCodes = &info.Codes; 4652 else 4653 rCodes = &info.multipleAlternatives[maIndex].Codes; 4654 ConstraintWeight BestWeight = CW_Invalid; 4655 4656 // Loop over the options, keeping track of the most general one. 4657 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 4658 ConstraintWeight weight = 4659 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 4660 if (weight > BestWeight) 4661 BestWeight = weight; 4662 } 4663 4664 return BestWeight; 4665 } 4666 4667 /// Examine constraint type and operand type and determine a weight value. 4668 /// This object must already have been set up with the operand type 4669 /// and the current alternative constraint selected. 4670 TargetLowering::ConstraintWeight 4671 TargetLowering::getSingleConstraintMatchWeight( 4672 AsmOperandInfo &info, const char *constraint) const { 4673 ConstraintWeight weight = CW_Invalid; 4674 Value *CallOperandVal = info.CallOperandVal; 4675 // If we don't have a value, we can't do a match, 4676 // but allow it at the lowest weight. 4677 if (!CallOperandVal) 4678 return CW_Default; 4679 // Look at the constraint type. 4680 switch (*constraint) { 4681 case 'i': // immediate integer. 4682 case 'n': // immediate integer with a known value. 4683 if (isa<ConstantInt>(CallOperandVal)) 4684 weight = CW_Constant; 4685 break; 4686 case 's': // non-explicit intregal immediate. 4687 if (isa<GlobalValue>(CallOperandVal)) 4688 weight = CW_Constant; 4689 break; 4690 case 'E': // immediate float if host format. 4691 case 'F': // immediate float. 4692 if (isa<ConstantFP>(CallOperandVal)) 4693 weight = CW_Constant; 4694 break; 4695 case '<': // memory operand with autodecrement. 4696 case '>': // memory operand with autoincrement. 4697 case 'm': // memory operand. 4698 case 'o': // offsettable memory operand 4699 case 'V': // non-offsettable memory operand 4700 weight = CW_Memory; 4701 break; 4702 case 'r': // general register. 4703 case 'g': // general register, memory operand or immediate integer. 4704 // note: Clang converts "g" to "imr". 4705 if (CallOperandVal->getType()->isIntegerTy()) 4706 weight = CW_Register; 4707 break; 4708 case 'X': // any operand. 4709 default: 4710 weight = CW_Default; 4711 break; 4712 } 4713 return weight; 4714 } 4715 4716 /// If there are multiple different constraints that we could pick for this 4717 /// operand (e.g. "imr") try to pick the 'best' one. 4718 /// This is somewhat tricky: constraints fall into four classes: 4719 /// Other -> immediates and magic values 4720 /// Register -> one specific register 4721 /// RegisterClass -> a group of regs 4722 /// Memory -> memory 4723 /// Ideally, we would pick the most specific constraint possible: if we have 4724 /// something that fits into a register, we would pick it. The problem here 4725 /// is that if we have something that could either be in a register or in 4726 /// memory that use of the register could cause selection of *other* 4727 /// operands to fail: they might only succeed if we pick memory. Because of 4728 /// this the heuristic we use is: 4729 /// 4730 /// 1) If there is an 'other' constraint, and if the operand is valid for 4731 /// that constraint, use it. This makes us take advantage of 'i' 4732 /// constraints when available. 4733 /// 2) Otherwise, pick the most general constraint present. This prefers 4734 /// 'm' over 'r', for example. 4735 /// 4736 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 4737 const TargetLowering &TLI, 4738 SDValue Op, SelectionDAG *DAG) { 4739 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 4740 unsigned BestIdx = 0; 4741 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 4742 int BestGenerality = -1; 4743 4744 // Loop over the options, keeping track of the most general one. 4745 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 4746 TargetLowering::ConstraintType CType = 4747 TLI.getConstraintType(OpInfo.Codes[i]); 4748 4749 // Indirect 'other' or 'immediate' constraints are not allowed. 4750 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 4751 CType == TargetLowering::C_Register || 4752 CType == TargetLowering::C_RegisterClass)) 4753 continue; 4754 4755 // If this is an 'other' or 'immediate' constraint, see if the operand is 4756 // valid for it. For example, on X86 we might have an 'rI' constraint. If 4757 // the operand is an integer in the range [0..31] we want to use I (saving a 4758 // load of a register), otherwise we must use 'r'. 4759 if ((CType == TargetLowering::C_Other || 4760 CType == TargetLowering::C_Immediate) && Op.getNode()) { 4761 assert(OpInfo.Codes[i].size() == 1 && 4762 "Unhandled multi-letter 'other' constraint"); 4763 std::vector<SDValue> ResultOps; 4764 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 4765 ResultOps, *DAG); 4766 if (!ResultOps.empty()) { 4767 BestType = CType; 4768 BestIdx = i; 4769 break; 4770 } 4771 } 4772 4773 // Things with matching constraints can only be registers, per gcc 4774 // documentation. This mainly affects "g" constraints. 4775 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 4776 continue; 4777 4778 // This constraint letter is more general than the previous one, use it. 4779 int Generality = getConstraintGenerality(CType); 4780 if (Generality > BestGenerality) { 4781 BestType = CType; 4782 BestIdx = i; 4783 BestGenerality = Generality; 4784 } 4785 } 4786 4787 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 4788 OpInfo.ConstraintType = BestType; 4789 } 4790 4791 /// Determines the constraint code and constraint type to use for the specific 4792 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4793 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4794 SDValue Op, 4795 SelectionDAG *DAG) const { 4796 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 4797 4798 // Single-letter constraints ('r') are very common. 4799 if (OpInfo.Codes.size() == 1) { 4800 OpInfo.ConstraintCode = OpInfo.Codes[0]; 4801 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4802 } else { 4803 ChooseConstraint(OpInfo, *this, Op, DAG); 4804 } 4805 4806 // 'X' matches anything. 4807 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 4808 // Labels and constants are handled elsewhere ('X' is the only thing 4809 // that matches labels). For Functions, the type here is the type of 4810 // the result, which is not what we want to look at; leave them alone. 4811 Value *v = OpInfo.CallOperandVal; 4812 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 4813 OpInfo.CallOperandVal = v; 4814 return; 4815 } 4816 4817 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress) 4818 return; 4819 4820 // Otherwise, try to resolve it to something we know about by looking at 4821 // the actual operand type. 4822 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 4823 OpInfo.ConstraintCode = Repl; 4824 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4825 } 4826 } 4827 } 4828 4829 /// Given an exact SDIV by a constant, create a multiplication 4830 /// with the multiplicative inverse of the constant. 4831 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 4832 const SDLoc &dl, SelectionDAG &DAG, 4833 SmallVectorImpl<SDNode *> &Created) { 4834 SDValue Op0 = N->getOperand(0); 4835 SDValue Op1 = N->getOperand(1); 4836 EVT VT = N->getValueType(0); 4837 EVT SVT = VT.getScalarType(); 4838 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 4839 EVT ShSVT = ShVT.getScalarType(); 4840 4841 bool UseSRA = false; 4842 SmallVector<SDValue, 16> Shifts, Factors; 4843 4844 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4845 if (C->isNullValue()) 4846 return false; 4847 APInt Divisor = C->getAPIntValue(); 4848 unsigned Shift = Divisor.countTrailingZeros(); 4849 if (Shift) { 4850 Divisor.ashrInPlace(Shift); 4851 UseSRA = true; 4852 } 4853 // Calculate the multiplicative inverse, using Newton's method. 4854 APInt t; 4855 APInt Factor = Divisor; 4856 while ((t = Divisor * Factor) != 1) 4857 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 4858 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 4859 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 4860 return true; 4861 }; 4862 4863 // Collect all magic values from the build vector. 4864 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 4865 return SDValue(); 4866 4867 SDValue Shift, Factor; 4868 if (VT.isVector()) { 4869 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4870 Factor = DAG.getBuildVector(VT, dl, Factors); 4871 } else { 4872 Shift = Shifts[0]; 4873 Factor = Factors[0]; 4874 } 4875 4876 SDValue Res = Op0; 4877 4878 // Shift the value upfront if it is even, so the LSB is one. 4879 if (UseSRA) { 4880 // TODO: For UDIV use SRL instead of SRA. 4881 SDNodeFlags Flags; 4882 Flags.setExact(true); 4883 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 4884 Created.push_back(Res.getNode()); 4885 } 4886 4887 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 4888 } 4889 4890 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4891 SelectionDAG &DAG, 4892 SmallVectorImpl<SDNode *> &Created) const { 4893 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4894 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4895 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 4896 return SDValue(N, 0); // Lower SDIV as SDIV 4897 return SDValue(); 4898 } 4899 4900 /// Given an ISD::SDIV node expressing a divide by constant, 4901 /// return a DAG expression to select that will generate the same value by 4902 /// multiplying by a magic number. 4903 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 4904 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 4905 bool IsAfterLegalization, 4906 SmallVectorImpl<SDNode *> &Created) const { 4907 SDLoc dl(N); 4908 EVT VT = N->getValueType(0); 4909 EVT SVT = VT.getScalarType(); 4910 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 4911 EVT ShSVT = ShVT.getScalarType(); 4912 unsigned EltBits = VT.getScalarSizeInBits(); 4913 4914 // Check to see if we can do this. 4915 // FIXME: We should be more aggressive here. 4916 if (!isTypeLegal(VT)) 4917 return SDValue(); 4918 4919 // If the sdiv has an 'exact' bit we can use a simpler lowering. 4920 if (N->getFlags().hasExact()) 4921 return BuildExactSDIV(*this, N, dl, DAG, Created); 4922 4923 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 4924 4925 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4926 if (C->isNullValue()) 4927 return false; 4928 4929 const APInt &Divisor = C->getAPIntValue(); 4930 APInt::ms magics = Divisor.magic(); 4931 int NumeratorFactor = 0; 4932 int ShiftMask = -1; 4933 4934 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) { 4935 // If d is +1/-1, we just multiply the numerator by +1/-1. 4936 NumeratorFactor = Divisor.getSExtValue(); 4937 magics.m = 0; 4938 magics.s = 0; 4939 ShiftMask = 0; 4940 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { 4941 // If d > 0 and m < 0, add the numerator. 4942 NumeratorFactor = 1; 4943 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { 4944 // If d < 0 and m > 0, subtract the numerator. 4945 NumeratorFactor = -1; 4946 } 4947 4948 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); 4949 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 4950 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); 4951 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 4952 return true; 4953 }; 4954 4955 SDValue N0 = N->getOperand(0); 4956 SDValue N1 = N->getOperand(1); 4957 4958 // Collect the shifts / magic values from each element. 4959 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 4960 return SDValue(); 4961 4962 SDValue MagicFactor, Factor, Shift, ShiftMask; 4963 if (VT.isVector()) { 4964 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 4965 Factor = DAG.getBuildVector(VT, dl, Factors); 4966 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4967 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 4968 } else { 4969 MagicFactor = MagicFactors[0]; 4970 Factor = Factors[0]; 4971 Shift = Shifts[0]; 4972 ShiftMask = ShiftMasks[0]; 4973 } 4974 4975 // Multiply the numerator (operand 0) by the magic value. 4976 // FIXME: We should support doing a MUL in a wider type. 4977 SDValue Q; 4978 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) 4979 : isOperationLegalOrCustom(ISD::MULHS, VT)) 4980 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); 4981 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) 4982 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { 4983 SDValue LoHi = 4984 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); 4985 Q = SDValue(LoHi.getNode(), 1); 4986 } else 4987 return SDValue(); // No mulhs or equivalent. 4988 Created.push_back(Q.getNode()); 4989 4990 // (Optionally) Add/subtract the numerator using Factor. 4991 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 4992 Created.push_back(Factor.getNode()); 4993 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 4994 Created.push_back(Q.getNode()); 4995 4996 // Shift right algebraic by shift value. 4997 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 4998 Created.push_back(Q.getNode()); 4999 5000 // Extract the sign bit, mask it and add it to the quotient. 5001 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5002 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5003 Created.push_back(T.getNode()); 5004 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5005 Created.push_back(T.getNode()); 5006 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5007 } 5008 5009 /// Given an ISD::UDIV node expressing a divide by constant, 5010 /// return a DAG expression to select that will generate the same value by 5011 /// multiplying by a magic number. 5012 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5013 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5014 bool IsAfterLegalization, 5015 SmallVectorImpl<SDNode *> &Created) const { 5016 SDLoc dl(N); 5017 EVT VT = N->getValueType(0); 5018 EVT SVT = VT.getScalarType(); 5019 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5020 EVT ShSVT = ShVT.getScalarType(); 5021 unsigned EltBits = VT.getScalarSizeInBits(); 5022 5023 // Check to see if we can do this. 5024 // FIXME: We should be more aggressive here. 5025 if (!isTypeLegal(VT)) 5026 return SDValue(); 5027 5028 bool UseNPQ = false; 5029 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5030 5031 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5032 if (C->isNullValue()) 5033 return false; 5034 // FIXME: We should use a narrower constant when the upper 5035 // bits are known to be zero. 5036 APInt Divisor = C->getAPIntValue(); 5037 APInt::mu magics = Divisor.magicu(); 5038 unsigned PreShift = 0, PostShift = 0; 5039 5040 // If the divisor is even, we can avoid using the expensive fixup by 5041 // shifting the divided value upfront. 5042 if (magics.a != 0 && !Divisor[0]) { 5043 PreShift = Divisor.countTrailingZeros(); 5044 // Get magic number for the shifted divisor. 5045 magics = Divisor.lshr(PreShift).magicu(PreShift); 5046 assert(magics.a == 0 && "Should use cheap fixup now"); 5047 } 5048 5049 APInt Magic = magics.m; 5050 5051 unsigned SelNPQ; 5052 if (magics.a == 0 || Divisor.isOneValue()) { 5053 assert(magics.s < Divisor.getBitWidth() && 5054 "We shouldn't generate an undefined shift!"); 5055 PostShift = magics.s; 5056 SelNPQ = false; 5057 } else { 5058 PostShift = magics.s - 1; 5059 SelNPQ = true; 5060 } 5061 5062 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5063 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5064 NPQFactors.push_back( 5065 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5066 : APInt::getNullValue(EltBits), 5067 dl, SVT)); 5068 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5069 UseNPQ |= SelNPQ; 5070 return true; 5071 }; 5072 5073 SDValue N0 = N->getOperand(0); 5074 SDValue N1 = N->getOperand(1); 5075 5076 // Collect the shifts/magic values from each element. 5077 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5078 return SDValue(); 5079 5080 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5081 if (VT.isVector()) { 5082 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5083 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5084 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5085 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5086 } else { 5087 PreShift = PreShifts[0]; 5088 MagicFactor = MagicFactors[0]; 5089 PostShift = PostShifts[0]; 5090 } 5091 5092 SDValue Q = N0; 5093 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5094 Created.push_back(Q.getNode()); 5095 5096 // FIXME: We should support doing a MUL in a wider type. 5097 auto GetMULHU = [&](SDValue X, SDValue Y) { 5098 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) 5099 : isOperationLegalOrCustom(ISD::MULHU, VT)) 5100 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5101 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) 5102 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) { 5103 SDValue LoHi = 5104 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5105 return SDValue(LoHi.getNode(), 1); 5106 } 5107 return SDValue(); // No mulhu or equivalent 5108 }; 5109 5110 // Multiply the numerator (operand 0) by the magic value. 5111 Q = GetMULHU(Q, MagicFactor); 5112 if (!Q) 5113 return SDValue(); 5114 5115 Created.push_back(Q.getNode()); 5116 5117 if (UseNPQ) { 5118 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5119 Created.push_back(NPQ.getNode()); 5120 5121 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5122 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5123 if (VT.isVector()) 5124 NPQ = GetMULHU(NPQ, NPQFactor); 5125 else 5126 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5127 5128 Created.push_back(NPQ.getNode()); 5129 5130 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5131 Created.push_back(Q.getNode()); 5132 } 5133 5134 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5135 Created.push_back(Q.getNode()); 5136 5137 SDValue One = DAG.getConstant(1, dl, VT); 5138 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ); 5139 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5140 } 5141 5142 /// If all values in Values that *don't* match the predicate are same 'splat' 5143 /// value, then replace all values with that splat value. 5144 /// Else, if AlternativeReplacement was provided, then replace all values that 5145 /// do match predicate with AlternativeReplacement value. 5146 static void 5147 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5148 std::function<bool(SDValue)> Predicate, 5149 SDValue AlternativeReplacement = SDValue()) { 5150 SDValue Replacement; 5151 // Is there a value for which the Predicate does *NOT* match? What is it? 5152 auto SplatValue = llvm::find_if_not(Values, Predicate); 5153 if (SplatValue != Values.end()) { 5154 // Does Values consist only of SplatValue's and values matching Predicate? 5155 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5156 return Value == *SplatValue || Predicate(Value); 5157 })) // Then we shall replace values matching predicate with SplatValue. 5158 Replacement = *SplatValue; 5159 } 5160 if (!Replacement) { 5161 // Oops, we did not find the "baseline" splat value. 5162 if (!AlternativeReplacement) 5163 return; // Nothing to do. 5164 // Let's replace with provided value then. 5165 Replacement = AlternativeReplacement; 5166 } 5167 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5168 } 5169 5170 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5171 /// where the divisor is constant and the comparison target is zero, 5172 /// return a DAG expression that will generate the same comparison result 5173 /// using only multiplications, additions and shifts/rotations. 5174 /// Ref: "Hacker's Delight" 10-17. 5175 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5176 SDValue CompTargetNode, 5177 ISD::CondCode Cond, 5178 DAGCombinerInfo &DCI, 5179 const SDLoc &DL) const { 5180 SmallVector<SDNode *, 5> Built; 5181 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5182 DCI, DL, Built)) { 5183 for (SDNode *N : Built) 5184 DCI.AddToWorklist(N); 5185 return Folded; 5186 } 5187 5188 return SDValue(); 5189 } 5190 5191 SDValue 5192 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5193 SDValue CompTargetNode, ISD::CondCode Cond, 5194 DAGCombinerInfo &DCI, const SDLoc &DL, 5195 SmallVectorImpl<SDNode *> &Created) const { 5196 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5197 // - D must be constant, with D = D0 * 2^K where D0 is odd 5198 // - P is the multiplicative inverse of D0 modulo 2^W 5199 // - Q = floor(((2^W) - 1) / D) 5200 // where W is the width of the common type of N and D. 5201 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5202 "Only applicable for (in)equality comparisons."); 5203 5204 SelectionDAG &DAG = DCI.DAG; 5205 5206 EVT VT = REMNode.getValueType(); 5207 EVT SVT = VT.getScalarType(); 5208 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5209 EVT ShSVT = ShVT.getScalarType(); 5210 5211 // If MUL is unavailable, we cannot proceed in any case. 5212 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5213 return SDValue(); 5214 5215 bool ComparingWithAllZeros = true; 5216 bool AllComparisonsWithNonZerosAreTautological = true; 5217 bool HadTautologicalLanes = false; 5218 bool AllLanesAreTautological = true; 5219 bool HadEvenDivisor = false; 5220 bool AllDivisorsArePowerOfTwo = true; 5221 bool HadTautologicalInvertedLanes = false; 5222 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5223 5224 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5225 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5226 if (CDiv->isNullValue()) 5227 return false; 5228 5229 const APInt &D = CDiv->getAPIntValue(); 5230 const APInt &Cmp = CCmp->getAPIntValue(); 5231 5232 ComparingWithAllZeros &= Cmp.isNullValue(); 5233 5234 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5235 // if C2 is not less than C1, the comparison is always false. 5236 // But we will only be able to produce the comparison that will give the 5237 // opposive tautological answer. So this lane would need to be fixed up. 5238 bool TautologicalInvertedLane = D.ule(Cmp); 5239 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5240 5241 // If all lanes are tautological (either all divisors are ones, or divisor 5242 // is not greater than the constant we are comparing with), 5243 // we will prefer to avoid the fold. 5244 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; 5245 HadTautologicalLanes |= TautologicalLane; 5246 AllLanesAreTautological &= TautologicalLane; 5247 5248 // If we are comparing with non-zero, we need'll need to subtract said 5249 // comparison value from the LHS. But there is no point in doing that if 5250 // every lane where we are comparing with non-zero is tautological.. 5251 if (!Cmp.isNullValue()) 5252 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5253 5254 // Decompose D into D0 * 2^K 5255 unsigned K = D.countTrailingZeros(); 5256 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5257 APInt D0 = D.lshr(K); 5258 5259 // D is even if it has trailing zeros. 5260 HadEvenDivisor |= (K != 0); 5261 // D is a power-of-two if D0 is one. 5262 // If all divisors are power-of-two, we will prefer to avoid the fold. 5263 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5264 5265 // P = inv(D0, 2^W) 5266 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5267 unsigned W = D.getBitWidth(); 5268 APInt P = D0.zext(W + 1) 5269 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5270 .trunc(W); 5271 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5272 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5273 5274 // Q = floor((2^W - 1) u/ D) 5275 // R = ((2^W - 1) u% D) 5276 APInt Q, R; 5277 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R); 5278 5279 // If we are comparing with zero, then that comparison constant is okay, 5280 // else it may need to be one less than that. 5281 if (Cmp.ugt(R)) 5282 Q -= 1; 5283 5284 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5285 "We are expecting that K is always less than all-ones for ShSVT"); 5286 5287 // If the lane is tautological the result can be constant-folded. 5288 if (TautologicalLane) { 5289 // Set P and K amount to a bogus values so we can try to splat them. 5290 P = 0; 5291 K = -1; 5292 // And ensure that comparison constant is tautological, 5293 // it will always compare true/false. 5294 Q = -1; 5295 } 5296 5297 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5298 KAmts.push_back( 5299 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5300 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5301 return true; 5302 }; 5303 5304 SDValue N = REMNode.getOperand(0); 5305 SDValue D = REMNode.getOperand(1); 5306 5307 // Collect the values from each element. 5308 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5309 return SDValue(); 5310 5311 // If all lanes are tautological, the result can be constant-folded. 5312 if (AllLanesAreTautological) 5313 return SDValue(); 5314 5315 // If this is a urem by a powers-of-two, avoid the fold since it can be 5316 // best implemented as a bit test. 5317 if (AllDivisorsArePowerOfTwo) 5318 return SDValue(); 5319 5320 SDValue PVal, KVal, QVal; 5321 if (VT.isVector()) { 5322 if (HadTautologicalLanes) { 5323 // Try to turn PAmts into a splat, since we don't care about the values 5324 // that are currently '0'. If we can't, just keep '0'`s. 5325 turnVectorIntoSplatVector(PAmts, isNullConstant); 5326 // Try to turn KAmts into a splat, since we don't care about the values 5327 // that are currently '-1'. If we can't, change them to '0'`s. 5328 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5329 DAG.getConstant(0, DL, ShSVT)); 5330 } 5331 5332 PVal = DAG.getBuildVector(VT, DL, PAmts); 5333 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5334 QVal = DAG.getBuildVector(VT, DL, QAmts); 5335 } else { 5336 PVal = PAmts[0]; 5337 KVal = KAmts[0]; 5338 QVal = QAmts[0]; 5339 } 5340 5341 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5342 if (!isOperationLegalOrCustom(ISD::SUB, VT)) 5343 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5344 assert(CompTargetNode.getValueType() == N.getValueType() && 5345 "Expecting that the types on LHS and RHS of comparisons match."); 5346 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5347 } 5348 5349 // (mul N, P) 5350 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5351 Created.push_back(Op0.getNode()); 5352 5353 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5354 // divisors as a performance improvement, since rotating by 0 is a no-op. 5355 if (HadEvenDivisor) { 5356 // We need ROTR to do this. 5357 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5358 return SDValue(); 5359 SDNodeFlags Flags; 5360 Flags.setExact(true); 5361 // UREM: (rotr (mul N, P), K) 5362 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5363 Created.push_back(Op0.getNode()); 5364 } 5365 5366 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5367 SDValue NewCC = 5368 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5369 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5370 if (!HadTautologicalInvertedLanes) 5371 return NewCC; 5372 5373 // If any lanes previously compared always-false, the NewCC will give 5374 // always-true result for them, so we need to fixup those lanes. 5375 // Or the other way around for inequality predicate. 5376 assert(VT.isVector() && "Can/should only get here for vectors."); 5377 Created.push_back(NewCC.getNode()); 5378 5379 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5380 // if C2 is not less than C1, the comparison is always false. 5381 // But we have produced the comparison that will give the 5382 // opposive tautological answer. So these lanes would need to be fixed up. 5383 SDValue TautologicalInvertedChannels = 5384 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5385 Created.push_back(TautologicalInvertedChannels.getNode()); 5386 5387 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5388 // If we have a vector select, let's replace the comparison results in the 5389 // affected lanes with the correct tautological result. 5390 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5391 DL, SETCCVT, SETCCVT); 5392 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5393 Replacement, NewCC); 5394 } 5395 5396 // Else, we can just invert the comparison result in the appropriate lanes. 5397 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5398 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5399 TautologicalInvertedChannels); 5400 5401 return SDValue(); // Don't know how to lower. 5402 } 5403 5404 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5405 /// where the divisor is constant and the comparison target is zero, 5406 /// return a DAG expression that will generate the same comparison result 5407 /// using only multiplications, additions and shifts/rotations. 5408 /// Ref: "Hacker's Delight" 10-17. 5409 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5410 SDValue CompTargetNode, 5411 ISD::CondCode Cond, 5412 DAGCombinerInfo &DCI, 5413 const SDLoc &DL) const { 5414 SmallVector<SDNode *, 7> Built; 5415 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5416 DCI, DL, Built)) { 5417 assert(Built.size() <= 7 && "Max size prediction failed."); 5418 for (SDNode *N : Built) 5419 DCI.AddToWorklist(N); 5420 return Folded; 5421 } 5422 5423 return SDValue(); 5424 } 5425 5426 SDValue 5427 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5428 SDValue CompTargetNode, ISD::CondCode Cond, 5429 DAGCombinerInfo &DCI, const SDLoc &DL, 5430 SmallVectorImpl<SDNode *> &Created) const { 5431 // Fold: 5432 // (seteq/ne (srem N, D), 0) 5433 // To: 5434 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5435 // 5436 // - D must be constant, with D = D0 * 2^K where D0 is odd 5437 // - P is the multiplicative inverse of D0 modulo 2^W 5438 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5439 // - Q = floor((2 * A) / (2^K)) 5440 // where W is the width of the common type of N and D. 5441 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5442 "Only applicable for (in)equality comparisons."); 5443 5444 SelectionDAG &DAG = DCI.DAG; 5445 5446 EVT VT = REMNode.getValueType(); 5447 EVT SVT = VT.getScalarType(); 5448 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5449 EVT ShSVT = ShVT.getScalarType(); 5450 5451 // If MUL is unavailable, we cannot proceed in any case. 5452 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5453 return SDValue(); 5454 5455 // TODO: Could support comparing with non-zero too. 5456 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5457 if (!CompTarget || !CompTarget->isNullValue()) 5458 return SDValue(); 5459 5460 bool HadIntMinDivisor = false; 5461 bool HadOneDivisor = false; 5462 bool AllDivisorsAreOnes = true; 5463 bool HadEvenDivisor = false; 5464 bool NeedToApplyOffset = false; 5465 bool AllDivisorsArePowerOfTwo = true; 5466 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5467 5468 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5469 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5470 if (C->isNullValue()) 5471 return false; 5472 5473 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5474 5475 // WARNING: this fold is only valid for positive divisors! 5476 APInt D = C->getAPIntValue(); 5477 if (D.isNegative()) 5478 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5479 5480 HadIntMinDivisor |= D.isMinSignedValue(); 5481 5482 // If all divisors are ones, we will prefer to avoid the fold. 5483 HadOneDivisor |= D.isOneValue(); 5484 AllDivisorsAreOnes &= D.isOneValue(); 5485 5486 // Decompose D into D0 * 2^K 5487 unsigned K = D.countTrailingZeros(); 5488 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5489 APInt D0 = D.lshr(K); 5490 5491 if (!D.isMinSignedValue()) { 5492 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5493 // we don't care about this lane in this fold, we'll special-handle it. 5494 HadEvenDivisor |= (K != 0); 5495 } 5496 5497 // D is a power-of-two if D0 is one. This includes INT_MIN. 5498 // If all divisors are power-of-two, we will prefer to avoid the fold. 5499 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5500 5501 // P = inv(D0, 2^W) 5502 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5503 unsigned W = D.getBitWidth(); 5504 APInt P = D0.zext(W + 1) 5505 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5506 .trunc(W); 5507 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5508 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5509 5510 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5511 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5512 A.clearLowBits(K); 5513 5514 if (!D.isMinSignedValue()) { 5515 // If divisor INT_MIN, then we don't care about this lane in this fold, 5516 // we'll special-handle it. 5517 NeedToApplyOffset |= A != 0; 5518 } 5519 5520 // Q = floor((2 * A) / (2^K)) 5521 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5522 5523 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) && 5524 "We are expecting that A is always less than all-ones for SVT"); 5525 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5526 "We are expecting that K is always less than all-ones for ShSVT"); 5527 5528 // If the divisor is 1 the result can be constant-folded. Likewise, we 5529 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5530 if (D.isOneValue()) { 5531 // Set P, A and K to a bogus values so we can try to splat them. 5532 P = 0; 5533 A = -1; 5534 K = -1; 5535 5536 // x ?% 1 == 0 <--> true <--> x u<= -1 5537 Q = -1; 5538 } 5539 5540 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5541 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5542 KAmts.push_back( 5543 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5544 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5545 return true; 5546 }; 5547 5548 SDValue N = REMNode.getOperand(0); 5549 SDValue D = REMNode.getOperand(1); 5550 5551 // Collect the values from each element. 5552 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5553 return SDValue(); 5554 5555 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5556 if (AllDivisorsAreOnes) 5557 return SDValue(); 5558 5559 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5560 // since it can be best implemented as a bit test. 5561 if (AllDivisorsArePowerOfTwo) 5562 return SDValue(); 5563 5564 SDValue PVal, AVal, KVal, QVal; 5565 if (VT.isVector()) { 5566 if (HadOneDivisor) { 5567 // Try to turn PAmts into a splat, since we don't care about the values 5568 // that are currently '0'. If we can't, just keep '0'`s. 5569 turnVectorIntoSplatVector(PAmts, isNullConstant); 5570 // Try to turn AAmts into a splat, since we don't care about the 5571 // values that are currently '-1'. If we can't, change them to '0'`s. 5572 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5573 DAG.getConstant(0, DL, SVT)); 5574 // Try to turn KAmts into a splat, since we don't care about the values 5575 // that are currently '-1'. If we can't, change them to '0'`s. 5576 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5577 DAG.getConstant(0, DL, ShSVT)); 5578 } 5579 5580 PVal = DAG.getBuildVector(VT, DL, PAmts); 5581 AVal = DAG.getBuildVector(VT, DL, AAmts); 5582 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5583 QVal = DAG.getBuildVector(VT, DL, QAmts); 5584 } else { 5585 PVal = PAmts[0]; 5586 AVal = AAmts[0]; 5587 KVal = KAmts[0]; 5588 QVal = QAmts[0]; 5589 } 5590 5591 // (mul N, P) 5592 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5593 Created.push_back(Op0.getNode()); 5594 5595 if (NeedToApplyOffset) { 5596 // We need ADD to do this. 5597 if (!isOperationLegalOrCustom(ISD::ADD, VT)) 5598 return SDValue(); 5599 5600 // (add (mul N, P), A) 5601 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 5602 Created.push_back(Op0.getNode()); 5603 } 5604 5605 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5606 // divisors as a performance improvement, since rotating by 0 is a no-op. 5607 if (HadEvenDivisor) { 5608 // We need ROTR to do this. 5609 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5610 return SDValue(); 5611 SDNodeFlags Flags; 5612 Flags.setExact(true); 5613 // SREM: (rotr (add (mul N, P), A), K) 5614 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5615 Created.push_back(Op0.getNode()); 5616 } 5617 5618 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 5619 SDValue Fold = 5620 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5621 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5622 5623 // If we didn't have lanes with INT_MIN divisor, then we're done. 5624 if (!HadIntMinDivisor) 5625 return Fold; 5626 5627 // That fold is only valid for positive divisors. Which effectively means, 5628 // it is invalid for INT_MIN divisors. So if we have such a lane, 5629 // we must fix-up results for said lanes. 5630 assert(VT.isVector() && "Can/should only get here for vectors."); 5631 5632 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 5633 !isOperationLegalOrCustom(ISD::AND, VT) || 5634 !isOperationLegalOrCustom(Cond, VT) || 5635 !isOperationLegalOrCustom(ISD::VSELECT, VT)) 5636 return SDValue(); 5637 5638 Created.push_back(Fold.getNode()); 5639 5640 SDValue IntMin = DAG.getConstant( 5641 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 5642 SDValue IntMax = DAG.getConstant( 5643 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 5644 SDValue Zero = 5645 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT); 5646 5647 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 5648 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 5649 Created.push_back(DivisorIsIntMin.getNode()); 5650 5651 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 5652 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 5653 Created.push_back(Masked.getNode()); 5654 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 5655 Created.push_back(MaskedIsZero.getNode()); 5656 5657 // To produce final result we need to blend 2 vectors: 'SetCC' and 5658 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 5659 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 5660 // constant-folded, select can get lowered to a shuffle with constant mask. 5661 SDValue Blended = 5662 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold); 5663 5664 return Blended; 5665 } 5666 5667 bool TargetLowering:: 5668 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 5669 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 5670 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 5671 "be a constant integer"); 5672 return true; 5673 } 5674 5675 return false; 5676 } 5677 5678 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 5679 bool LegalOps, bool OptForSize, 5680 NegatibleCost &Cost, 5681 unsigned Depth) const { 5682 // fneg is removable even if it has multiple uses. 5683 if (Op.getOpcode() == ISD::FNEG) { 5684 Cost = NegatibleCost::Cheaper; 5685 return Op.getOperand(0); 5686 } 5687 5688 // Don't recurse exponentially. 5689 if (Depth > SelectionDAG::MaxRecursionDepth) 5690 return SDValue(); 5691 5692 // Pre-increment recursion depth for use in recursive calls. 5693 ++Depth; 5694 const SDNodeFlags Flags = Op->getFlags(); 5695 const TargetOptions &Options = DAG.getTarget().Options; 5696 EVT VT = Op.getValueType(); 5697 unsigned Opcode = Op.getOpcode(); 5698 5699 // Don't allow anything with multiple uses unless we know it is free. 5700 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 5701 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 5702 isFPExtFree(VT, Op.getOperand(0).getValueType()); 5703 if (!IsFreeExtend) 5704 return SDValue(); 5705 } 5706 5707 SDLoc DL(Op); 5708 5709 switch (Opcode) { 5710 case ISD::ConstantFP: { 5711 // Don't invert constant FP values after legalization unless the target says 5712 // the negated constant is legal. 5713 bool IsOpLegal = 5714 isOperationLegal(ISD::ConstantFP, VT) || 5715 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 5716 OptForSize); 5717 5718 if (LegalOps && !IsOpLegal) 5719 break; 5720 5721 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5722 V.changeSign(); 5723 SDValue CFP = DAG.getConstantFP(V, DL, VT); 5724 5725 // If we already have the use of the negated floating constant, it is free 5726 // to negate it even it has multiple uses. 5727 if (!Op.hasOneUse() && CFP.use_empty()) 5728 break; 5729 Cost = NegatibleCost::Neutral; 5730 return CFP; 5731 } 5732 case ISD::BUILD_VECTOR: { 5733 // Only permit BUILD_VECTOR of constants. 5734 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 5735 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 5736 })) 5737 break; 5738 5739 bool IsOpLegal = 5740 (isOperationLegal(ISD::ConstantFP, VT) && 5741 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 5742 llvm::all_of(Op->op_values(), [&](SDValue N) { 5743 return N.isUndef() || 5744 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 5745 OptForSize); 5746 }); 5747 5748 if (LegalOps && !IsOpLegal) 5749 break; 5750 5751 SmallVector<SDValue, 4> Ops; 5752 for (SDValue C : Op->op_values()) { 5753 if (C.isUndef()) { 5754 Ops.push_back(C); 5755 continue; 5756 } 5757 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 5758 V.changeSign(); 5759 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 5760 } 5761 Cost = NegatibleCost::Neutral; 5762 return DAG.getBuildVector(VT, DL, Ops); 5763 } 5764 case ISD::FADD: { 5765 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5766 break; 5767 5768 // After operation legalization, it might not be legal to create new FSUBs. 5769 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 5770 break; 5771 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5772 5773 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 5774 NegatibleCost CostX = NegatibleCost::Expensive; 5775 SDValue NegX = 5776 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5777 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 5778 NegatibleCost CostY = NegatibleCost::Expensive; 5779 SDValue NegY = 5780 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5781 5782 // Negate the X if its cost is less or equal than Y. 5783 if (NegX && (CostX <= CostY)) { 5784 Cost = CostX; 5785 return DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 5786 } 5787 5788 // Negate the Y if it is not expensive. 5789 if (NegY) { 5790 Cost = CostY; 5791 return DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 5792 } 5793 break; 5794 } 5795 case ISD::FSUB: { 5796 // We can't turn -(A-B) into B-A when we honor signed zeros. 5797 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5798 break; 5799 5800 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5801 // fold (fneg (fsub 0, Y)) -> Y 5802 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 5803 if (C->isZero()) { 5804 Cost = NegatibleCost::Cheaper; 5805 return Y; 5806 } 5807 5808 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 5809 Cost = NegatibleCost::Neutral; 5810 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 5811 } 5812 case ISD::FMUL: 5813 case ISD::FDIV: { 5814 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5815 5816 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 5817 NegatibleCost CostX = NegatibleCost::Expensive; 5818 SDValue NegX = 5819 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5820 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 5821 NegatibleCost CostY = NegatibleCost::Expensive; 5822 SDValue NegY = 5823 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5824 5825 // Negate the X if its cost is less or equal than Y. 5826 if (NegX && (CostX <= CostY)) { 5827 Cost = CostX; 5828 return DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 5829 } 5830 5831 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 5832 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 5833 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 5834 break; 5835 5836 // Negate the Y if it is not expensive. 5837 if (NegY) { 5838 Cost = CostY; 5839 return DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 5840 } 5841 break; 5842 } 5843 case ISD::FMA: 5844 case ISD::FMAD: { 5845 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5846 break; 5847 5848 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 5849 NegatibleCost CostZ = NegatibleCost::Expensive; 5850 SDValue NegZ = 5851 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 5852 // Give up if fail to negate the Z. 5853 if (!NegZ) 5854 break; 5855 5856 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 5857 NegatibleCost CostX = NegatibleCost::Expensive; 5858 SDValue NegX = 5859 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5860 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 5861 NegatibleCost CostY = NegatibleCost::Expensive; 5862 SDValue NegY = 5863 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5864 5865 // Negate the X if its cost is less or equal than Y. 5866 if (NegX && (CostX <= CostY)) { 5867 Cost = std::min(CostX, CostZ); 5868 return DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 5869 } 5870 5871 // Negate the Y if it is not expensive. 5872 if (NegY) { 5873 Cost = std::min(CostY, CostZ); 5874 return DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 5875 } 5876 break; 5877 } 5878 5879 case ISD::FP_EXTEND: 5880 case ISD::FSIN: 5881 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5882 OptForSize, Cost, Depth)) 5883 return DAG.getNode(Opcode, DL, VT, NegV); 5884 break; 5885 case ISD::FP_ROUND: 5886 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5887 OptForSize, Cost, Depth)) 5888 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 5889 break; 5890 } 5891 5892 return SDValue(); 5893 } 5894 5895 //===----------------------------------------------------------------------===// 5896 // Legalization Utilities 5897 //===----------------------------------------------------------------------===// 5898 5899 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, 5900 SDValue LHS, SDValue RHS, 5901 SmallVectorImpl<SDValue> &Result, 5902 EVT HiLoVT, SelectionDAG &DAG, 5903 MulExpansionKind Kind, SDValue LL, 5904 SDValue LH, SDValue RL, SDValue RH) const { 5905 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 5906 Opcode == ISD::SMUL_LOHI); 5907 5908 bool HasMULHS = (Kind == MulExpansionKind::Always) || 5909 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 5910 bool HasMULHU = (Kind == MulExpansionKind::Always) || 5911 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 5912 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 5913 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 5914 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 5915 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 5916 5917 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 5918 return false; 5919 5920 unsigned OuterBitSize = VT.getScalarSizeInBits(); 5921 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 5922 unsigned LHSSB = DAG.ComputeNumSignBits(LHS); 5923 unsigned RHSSB = DAG.ComputeNumSignBits(RHS); 5924 5925 // LL, LH, RL, and RH must be either all NULL or all set to a value. 5926 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 5927 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 5928 5929 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 5930 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 5931 bool Signed) -> bool { 5932 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 5933 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 5934 Hi = SDValue(Lo.getNode(), 1); 5935 return true; 5936 } 5937 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 5938 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 5939 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 5940 return true; 5941 } 5942 return false; 5943 }; 5944 5945 SDValue Lo, Hi; 5946 5947 if (!LL.getNode() && !RL.getNode() && 5948 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 5949 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 5950 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 5951 } 5952 5953 if (!LL.getNode()) 5954 return false; 5955 5956 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 5957 if (DAG.MaskedValueIsZero(LHS, HighMask) && 5958 DAG.MaskedValueIsZero(RHS, HighMask)) { 5959 // The inputs are both zero-extended. 5960 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 5961 Result.push_back(Lo); 5962 Result.push_back(Hi); 5963 if (Opcode != ISD::MUL) { 5964 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 5965 Result.push_back(Zero); 5966 Result.push_back(Zero); 5967 } 5968 return true; 5969 } 5970 } 5971 5972 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize && 5973 RHSSB > InnerBitSize) { 5974 // The input values are both sign-extended. 5975 // TODO non-MUL case? 5976 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 5977 Result.push_back(Lo); 5978 Result.push_back(Hi); 5979 return true; 5980 } 5981 } 5982 5983 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 5984 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 5985 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) { 5986 // FIXME getShiftAmountTy does not always return a sensible result when VT 5987 // is an illegal type, and so the type may be too small to fit the shift 5988 // amount. Override it with i32. The shift will have to be legalized. 5989 ShiftAmountTy = MVT::i32; 5990 } 5991 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 5992 5993 if (!LH.getNode() && !RH.getNode() && 5994 isOperationLegalOrCustom(ISD::SRL, VT) && 5995 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 5996 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 5997 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 5998 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 5999 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6000 } 6001 6002 if (!LH.getNode()) 6003 return false; 6004 6005 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6006 return false; 6007 6008 Result.push_back(Lo); 6009 6010 if (Opcode == ISD::MUL) { 6011 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6012 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6013 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6014 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6015 Result.push_back(Hi); 6016 return true; 6017 } 6018 6019 // Compute the full width result. 6020 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6021 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6022 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6023 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6024 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6025 }; 6026 6027 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6028 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6029 return false; 6030 6031 // This is effectively the add part of a multiply-add of half-sized operands, 6032 // so it cannot overflow. 6033 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6034 6035 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6036 return false; 6037 6038 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6039 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6040 6041 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6042 isOperationLegalOrCustom(ISD::ADDE, VT)); 6043 if (UseGlue) 6044 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6045 Merge(Lo, Hi)); 6046 else 6047 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6048 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6049 6050 SDValue Carry = Next.getValue(1); 6051 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6052 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6053 6054 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6055 return false; 6056 6057 if (UseGlue) 6058 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6059 Carry); 6060 else 6061 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6062 Zero, Carry); 6063 6064 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6065 6066 if (Opcode == ISD::SMUL_LOHI) { 6067 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6068 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6069 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6070 6071 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6072 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6073 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6074 } 6075 6076 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6077 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6078 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6079 return true; 6080 } 6081 6082 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6083 SelectionDAG &DAG, MulExpansionKind Kind, 6084 SDValue LL, SDValue LH, SDValue RL, 6085 SDValue RH) const { 6086 SmallVector<SDValue, 2> Result; 6087 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N, 6088 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6089 DAG, Kind, LL, LH, RL, RH); 6090 if (Ok) { 6091 assert(Result.size() == 2); 6092 Lo = Result[0]; 6093 Hi = Result[1]; 6094 } 6095 return Ok; 6096 } 6097 6098 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result, 6099 SelectionDAG &DAG) const { 6100 EVT VT = Node->getValueType(0); 6101 6102 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6103 !isOperationLegalOrCustom(ISD::SRL, VT) || 6104 !isOperationLegalOrCustom(ISD::SUB, VT) || 6105 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6106 return false; 6107 6108 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6109 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6110 SDValue X = Node->getOperand(0); 6111 SDValue Y = Node->getOperand(1); 6112 SDValue Z = Node->getOperand(2); 6113 6114 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6115 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6116 SDLoc DL(SDValue(Node, 0)); 6117 6118 EVT ShVT = Z.getValueType(); 6119 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6120 SDValue ShAmt, InvShAmt; 6121 if (isPowerOf2_32(EltSizeInBits)) { 6122 // Z % BW -> Z & (BW - 1) 6123 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6124 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6125 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6126 } else { 6127 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 6128 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6129 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6130 } 6131 6132 SDValue One = DAG.getConstant(1, DL, ShVT); 6133 SDValue ShX, ShY; 6134 if (IsFSHL) { 6135 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 6136 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 6137 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 6138 } else { 6139 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 6140 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 6141 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 6142 } 6143 Result = DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 6144 return true; 6145 } 6146 6147 // TODO: Merge with expandFunnelShift. 6148 bool TargetLowering::expandROT(SDNode *Node, SDValue &Result, 6149 SelectionDAG &DAG) const { 6150 EVT VT = Node->getValueType(0); 6151 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6152 bool IsLeft = Node->getOpcode() == ISD::ROTL; 6153 SDValue Op0 = Node->getOperand(0); 6154 SDValue Op1 = Node->getOperand(1); 6155 SDLoc DL(SDValue(Node, 0)); 6156 6157 EVT ShVT = Op1.getValueType(); 6158 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 6159 6160 // If a rotate in the other direction is legal, use it. 6161 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 6162 if (isOperationLegal(RevRot, VT)) { 6163 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1); 6164 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub); 6165 return true; 6166 } 6167 6168 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6169 !isOperationLegalOrCustom(ISD::SRL, VT) || 6170 !isOperationLegalOrCustom(ISD::SUB, VT) || 6171 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6172 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6173 return false; 6174 6175 // Otherwise, 6176 // (rotl x, c) -> (or (shl x, (and c, w-1)), (srl x, (and w-c, w-1))) 6177 // (rotr x, c) -> (or (srl x, (and c, w-1)), (shl x, (and w-c, w-1))) 6178 // 6179 assert(isPowerOf2_32(EltSizeInBits) && EltSizeInBits > 1 && 6180 "Expecting the type bitwidth to be a power of 2"); 6181 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6182 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6183 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6184 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1); 6185 SDValue And0 = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6186 SDValue And1 = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6187 Result = DAG.getNode(ISD::OR, DL, VT, DAG.getNode(ShOpc, DL, VT, Op0, And0), 6188 DAG.getNode(HsOpc, DL, VT, Op0, And1)); 6189 return true; 6190 } 6191 6192 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6193 SelectionDAG &DAG) const { 6194 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6195 SDValue Src = Node->getOperand(OpNo); 6196 EVT SrcVT = Src.getValueType(); 6197 EVT DstVT = Node->getValueType(0); 6198 SDLoc dl(SDValue(Node, 0)); 6199 6200 // FIXME: Only f32 to i64 conversions are supported. 6201 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6202 return false; 6203 6204 if (Node->isStrictFPOpcode()) 6205 // When a NaN is converted to an integer a trap is allowed. We can't 6206 // use this expansion here because it would eliminate that trap. Other 6207 // traps are also allowed and cannot be eliminated. See 6208 // IEEE 754-2008 sec 5.8. 6209 return false; 6210 6211 // Expand f32 -> i64 conversion 6212 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6213 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c 6214 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6215 EVT IntVT = SrcVT.changeTypeToInteger(); 6216 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6217 6218 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6219 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6220 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6221 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6222 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6223 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6224 6225 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6226 6227 SDValue ExponentBits = DAG.getNode( 6228 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6229 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6230 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6231 6232 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6233 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6234 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6235 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6236 6237 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6238 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6239 DAG.getConstant(0x00800000, dl, IntVT)); 6240 6241 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6242 6243 R = DAG.getSelectCC( 6244 dl, Exponent, ExponentLoBit, 6245 DAG.getNode(ISD::SHL, dl, DstVT, R, 6246 DAG.getZExtOrTrunc( 6247 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6248 dl, IntShVT)), 6249 DAG.getNode(ISD::SRL, dl, DstVT, R, 6250 DAG.getZExtOrTrunc( 6251 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6252 dl, IntShVT)), 6253 ISD::SETGT); 6254 6255 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6256 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6257 6258 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6259 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6260 return true; 6261 } 6262 6263 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6264 SDValue &Chain, 6265 SelectionDAG &DAG) const { 6266 SDLoc dl(SDValue(Node, 0)); 6267 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6268 SDValue Src = Node->getOperand(OpNo); 6269 6270 EVT SrcVT = Src.getValueType(); 6271 EVT DstVT = Node->getValueType(0); 6272 EVT SetCCVT = 6273 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6274 EVT DstSetCCVT = 6275 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6276 6277 // Only expand vector types if we have the appropriate vector bit operations. 6278 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6279 ISD::FP_TO_SINT; 6280 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6281 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6282 return false; 6283 6284 // If the maximum float value is smaller then the signed integer range, 6285 // the destination signmask can't be represented by the float, so we can 6286 // just use FP_TO_SINT directly. 6287 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6288 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits())); 6289 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6290 if (APFloat::opOverflow & 6291 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6292 if (Node->isStrictFPOpcode()) { 6293 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6294 { Node->getOperand(0), Src }); 6295 Chain = Result.getValue(1); 6296 } else 6297 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6298 return true; 6299 } 6300 6301 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6302 SDValue Sel; 6303 6304 if (Node->isStrictFPOpcode()) { 6305 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6306 Node->getOperand(0), /*IsSignaling*/ true); 6307 Chain = Sel.getValue(1); 6308 } else { 6309 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6310 } 6311 6312 bool Strict = Node->isStrictFPOpcode() || 6313 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6314 6315 if (Strict) { 6316 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6317 // signmask then offset (the result of which should be fully representable). 6318 // Sel = Src < 0x8000000000000000 6319 // FltOfs = select Sel, 0, 0x8000000000000000 6320 // IntOfs = select Sel, 0, 0x8000000000000000 6321 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6322 6323 // TODO: Should any fast-math-flags be set for the FSUB? 6324 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6325 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6326 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6327 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6328 DAG.getConstant(0, dl, DstVT), 6329 DAG.getConstant(SignMask, dl, DstVT)); 6330 SDValue SInt; 6331 if (Node->isStrictFPOpcode()) { 6332 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6333 { Chain, Src, FltOfs }); 6334 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6335 { Val.getValue(1), Val }); 6336 Chain = SInt.getValue(1); 6337 } else { 6338 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6339 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6340 } 6341 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6342 } else { 6343 // Expand based on maximum range of FP_TO_SINT: 6344 // True = fp_to_sint(Src) 6345 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6346 // Result = select (Src < 0x8000000000000000), True, False 6347 6348 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6349 // TODO: Should any fast-math-flags be set for the FSUB? 6350 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6351 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6352 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6353 DAG.getConstant(SignMask, dl, DstVT)); 6354 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6355 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6356 } 6357 return true; 6358 } 6359 6360 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6361 SDValue &Chain, 6362 SelectionDAG &DAG) const { 6363 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6364 SDValue Src = Node->getOperand(OpNo); 6365 EVT SrcVT = Src.getValueType(); 6366 EVT DstVT = Node->getValueType(0); 6367 6368 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6369 return false; 6370 6371 // Only expand vector types if we have the appropriate vector bit operations. 6372 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6373 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6374 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6375 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6376 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6377 return false; 6378 6379 SDLoc dl(SDValue(Node, 0)); 6380 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6381 6382 // Implementation of unsigned i64 to f64 following the algorithm in 6383 // __floatundidf in compiler_rt. This implementation has the advantage 6384 // of performing rounding correctly, both in the default rounding mode 6385 // and in all alternate rounding modes. 6386 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6387 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6388 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6389 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6390 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6391 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6392 6393 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6394 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6395 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6396 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6397 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6398 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6399 if (Node->isStrictFPOpcode()) { 6400 SDValue HiSub = 6401 DAG.getNode(ISD::STRICT_FSUB, dl, {DstVT, MVT::Other}, 6402 {Node->getOperand(0), HiFlt, TwoP84PlusTwoP52}); 6403 Result = DAG.getNode(ISD::STRICT_FADD, dl, {DstVT, MVT::Other}, 6404 {HiSub.getValue(1), LoFlt, HiSub}); 6405 Chain = Result.getValue(1); 6406 } else { 6407 SDValue HiSub = 6408 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6409 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6410 } 6411 return true; 6412 } 6413 6414 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 6415 SelectionDAG &DAG) const { 6416 SDLoc dl(Node); 6417 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 6418 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 6419 EVT VT = Node->getValueType(0); 6420 if (isOperationLegalOrCustom(NewOp, VT)) { 6421 SDValue Quiet0 = Node->getOperand(0); 6422 SDValue Quiet1 = Node->getOperand(1); 6423 6424 if (!Node->getFlags().hasNoNaNs()) { 6425 // Insert canonicalizes if it's possible we need to quiet to get correct 6426 // sNaN behavior. 6427 if (!DAG.isKnownNeverSNaN(Quiet0)) { 6428 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 6429 Node->getFlags()); 6430 } 6431 if (!DAG.isKnownNeverSNaN(Quiet1)) { 6432 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 6433 Node->getFlags()); 6434 } 6435 } 6436 6437 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 6438 } 6439 6440 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 6441 // instead if there are no NaNs. 6442 if (Node->getFlags().hasNoNaNs()) { 6443 unsigned IEEE2018Op = 6444 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 6445 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 6446 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 6447 Node->getOperand(1), Node->getFlags()); 6448 } 6449 } 6450 6451 // If none of the above worked, but there are no NaNs, then expand to 6452 // a compare/select sequence. This is required for correctness since 6453 // InstCombine might have canonicalized a fcmp+select sequence to a 6454 // FMINNUM/FMAXNUM node. If we were to fall through to the default 6455 // expansion to libcall, we might introduce a link-time dependency 6456 // on libm into a file that originally did not have one. 6457 if (Node->getFlags().hasNoNaNs()) { 6458 ISD::CondCode Pred = 6459 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 6460 SDValue Op1 = Node->getOperand(0); 6461 SDValue Op2 = Node->getOperand(1); 6462 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 6463 // Copy FMF flags, but always set the no-signed-zeros flag 6464 // as this is implied by the FMINNUM/FMAXNUM semantics. 6465 SDNodeFlags Flags = Node->getFlags(); 6466 Flags.setNoSignedZeros(true); 6467 SelCC->setFlags(Flags); 6468 return SelCC; 6469 } 6470 6471 return SDValue(); 6472 } 6473 6474 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result, 6475 SelectionDAG &DAG) const { 6476 SDLoc dl(Node); 6477 EVT VT = Node->getValueType(0); 6478 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6479 SDValue Op = Node->getOperand(0); 6480 unsigned Len = VT.getScalarSizeInBits(); 6481 assert(VT.isInteger() && "CTPOP not implemented for this type."); 6482 6483 // TODO: Add support for irregular type lengths. 6484 if (!(Len <= 128 && Len % 8 == 0)) 6485 return false; 6486 6487 // Only expand vector types if we have the appropriate vector bit operations. 6488 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) || 6489 !isOperationLegalOrCustom(ISD::SUB, VT) || 6490 !isOperationLegalOrCustom(ISD::SRL, VT) || 6491 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) || 6492 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6493 return false; 6494 6495 // This is the "best" algorithm from 6496 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 6497 SDValue Mask55 = 6498 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 6499 SDValue Mask33 = 6500 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 6501 SDValue Mask0F = 6502 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 6503 SDValue Mask01 = 6504 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 6505 6506 // v = v - ((v >> 1) & 0x55555555...) 6507 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 6508 DAG.getNode(ISD::AND, dl, VT, 6509 DAG.getNode(ISD::SRL, dl, VT, Op, 6510 DAG.getConstant(1, dl, ShVT)), 6511 Mask55)); 6512 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 6513 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 6514 DAG.getNode(ISD::AND, dl, VT, 6515 DAG.getNode(ISD::SRL, dl, VT, Op, 6516 DAG.getConstant(2, dl, ShVT)), 6517 Mask33)); 6518 // v = (v + (v >> 4)) & 0x0F0F0F0F... 6519 Op = DAG.getNode(ISD::AND, dl, VT, 6520 DAG.getNode(ISD::ADD, dl, VT, Op, 6521 DAG.getNode(ISD::SRL, dl, VT, Op, 6522 DAG.getConstant(4, dl, ShVT))), 6523 Mask0F); 6524 // v = (v * 0x01010101...) >> (Len - 8) 6525 if (Len > 8) 6526 Op = 6527 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 6528 DAG.getConstant(Len - 8, dl, ShVT)); 6529 6530 Result = Op; 6531 return true; 6532 } 6533 6534 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result, 6535 SelectionDAG &DAG) const { 6536 SDLoc dl(Node); 6537 EVT VT = Node->getValueType(0); 6538 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6539 SDValue Op = Node->getOperand(0); 6540 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6541 6542 // If the non-ZERO_UNDEF version is supported we can use that instead. 6543 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 6544 isOperationLegalOrCustom(ISD::CTLZ, VT)) { 6545 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op); 6546 return true; 6547 } 6548 6549 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6550 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 6551 EVT SetCCVT = 6552 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6553 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 6554 SDValue Zero = DAG.getConstant(0, dl, VT); 6555 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6556 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6557 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 6558 return true; 6559 } 6560 6561 // Only expand vector types if we have the appropriate vector bit operations. 6562 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6563 !isOperationLegalOrCustom(ISD::CTPOP, VT) || 6564 !isOperationLegalOrCustom(ISD::SRL, VT) || 6565 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6566 return false; 6567 6568 // for now, we do this: 6569 // x = x | (x >> 1); 6570 // x = x | (x >> 2); 6571 // ... 6572 // x = x | (x >>16); 6573 // x = x | (x >>32); // for 64-bit input 6574 // return popcount(~x); 6575 // 6576 // Ref: "Hacker's Delight" by Henry Warren 6577 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 6578 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 6579 Op = DAG.getNode(ISD::OR, dl, VT, Op, 6580 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 6581 } 6582 Op = DAG.getNOT(dl, Op, VT); 6583 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op); 6584 return true; 6585 } 6586 6587 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result, 6588 SelectionDAG &DAG) const { 6589 SDLoc dl(Node); 6590 EVT VT = Node->getValueType(0); 6591 SDValue Op = Node->getOperand(0); 6592 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6593 6594 // If the non-ZERO_UNDEF version is supported we can use that instead. 6595 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 6596 isOperationLegalOrCustom(ISD::CTTZ, VT)) { 6597 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op); 6598 return true; 6599 } 6600 6601 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6602 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 6603 EVT SetCCVT = 6604 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6605 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 6606 SDValue Zero = DAG.getConstant(0, dl, VT); 6607 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6608 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6609 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 6610 return true; 6611 } 6612 6613 // Only expand vector types if we have the appropriate vector bit operations. 6614 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6615 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 6616 !isOperationLegalOrCustom(ISD::CTLZ, VT)) || 6617 !isOperationLegalOrCustom(ISD::SUB, VT) || 6618 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 6619 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6620 return false; 6621 6622 // for now, we use: { return popcount(~x & (x - 1)); } 6623 // unless the target has ctlz but not ctpop, in which case we use: 6624 // { return 32 - nlz(~x & (x-1)); } 6625 // Ref: "Hacker's Delight" by Henry Warren 6626 SDValue Tmp = DAG.getNode( 6627 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 6628 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 6629 6630 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 6631 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 6632 Result = 6633 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 6634 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 6635 return true; 6636 } 6637 6638 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 6639 return true; 6640 } 6641 6642 bool TargetLowering::expandABS(SDNode *N, SDValue &Result, 6643 SelectionDAG &DAG) const { 6644 SDLoc dl(N); 6645 EVT VT = N->getValueType(0); 6646 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6647 SDValue Op = N->getOperand(0); 6648 6649 // Only expand vector types if we have the appropriate vector operations. 6650 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) || 6651 !isOperationLegalOrCustom(ISD::ADD, VT) || 6652 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6653 return false; 6654 6655 SDValue Shift = 6656 DAG.getNode(ISD::SRA, dl, VT, Op, 6657 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 6658 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 6659 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 6660 return true; 6661 } 6662 6663 std::pair<SDValue, SDValue> 6664 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 6665 SelectionDAG &DAG) const { 6666 SDLoc SL(LD); 6667 SDValue Chain = LD->getChain(); 6668 SDValue BasePTR = LD->getBasePtr(); 6669 EVT SrcVT = LD->getMemoryVT(); 6670 EVT DstVT = LD->getValueType(0); 6671 ISD::LoadExtType ExtType = LD->getExtensionType(); 6672 6673 unsigned NumElem = SrcVT.getVectorNumElements(); 6674 6675 EVT SrcEltVT = SrcVT.getScalarType(); 6676 EVT DstEltVT = DstVT.getScalarType(); 6677 6678 // A vector must always be stored in memory as-is, i.e. without any padding 6679 // between the elements, since various code depend on it, e.g. in the 6680 // handling of a bitcast of a vector type to int, which may be done with a 6681 // vector store followed by an integer load. A vector that does not have 6682 // elements that are byte-sized must therefore be stored as an integer 6683 // built out of the extracted vector elements. 6684 if (!SrcEltVT.isByteSized()) { 6685 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 6686 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 6687 6688 unsigned NumSrcBits = SrcVT.getSizeInBits(); 6689 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 6690 6691 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 6692 SDValue SrcEltBitMask = DAG.getConstant( 6693 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 6694 6695 // Load the whole vector and avoid masking off the top bits as it makes 6696 // the codegen worse. 6697 SDValue Load = 6698 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 6699 LD->getPointerInfo(), SrcIntVT, LD->getAlignment(), 6700 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6701 6702 SmallVector<SDValue, 8> Vals; 6703 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6704 unsigned ShiftIntoIdx = 6705 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6706 SDValue ShiftAmount = 6707 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 6708 LoadVT, SL, /*LegalTypes=*/false); 6709 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 6710 SDValue Elt = 6711 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 6712 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 6713 6714 if (ExtType != ISD::NON_EXTLOAD) { 6715 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 6716 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 6717 } 6718 6719 Vals.push_back(Scalar); 6720 } 6721 6722 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6723 return std::make_pair(Value, Load.getValue(1)); 6724 } 6725 6726 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 6727 assert(SrcEltVT.isByteSized()); 6728 6729 SmallVector<SDValue, 8> Vals; 6730 SmallVector<SDValue, 8> LoadChains; 6731 6732 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6733 SDValue ScalarLoad = 6734 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 6735 LD->getPointerInfo().getWithOffset(Idx * Stride), 6736 SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride), 6737 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6738 6739 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, Stride); 6740 6741 Vals.push_back(ScalarLoad.getValue(0)); 6742 LoadChains.push_back(ScalarLoad.getValue(1)); 6743 } 6744 6745 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 6746 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6747 6748 return std::make_pair(Value, NewChain); 6749 } 6750 6751 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 6752 SelectionDAG &DAG) const { 6753 SDLoc SL(ST); 6754 6755 SDValue Chain = ST->getChain(); 6756 SDValue BasePtr = ST->getBasePtr(); 6757 SDValue Value = ST->getValue(); 6758 EVT StVT = ST->getMemoryVT(); 6759 6760 // The type of the data we want to save 6761 EVT RegVT = Value.getValueType(); 6762 EVT RegSclVT = RegVT.getScalarType(); 6763 6764 // The type of data as saved in memory. 6765 EVT MemSclVT = StVT.getScalarType(); 6766 6767 unsigned NumElem = StVT.getVectorNumElements(); 6768 6769 // A vector must always be stored in memory as-is, i.e. without any padding 6770 // between the elements, since various code depend on it, e.g. in the 6771 // handling of a bitcast of a vector type to int, which may be done with a 6772 // vector store followed by an integer load. A vector that does not have 6773 // elements that are byte-sized must therefore be stored as an integer 6774 // built out of the extracted vector elements. 6775 if (!MemSclVT.isByteSized()) { 6776 unsigned NumBits = StVT.getSizeInBits(); 6777 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 6778 6779 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 6780 6781 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6782 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6783 DAG.getVectorIdxConstant(Idx, SL)); 6784 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 6785 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 6786 unsigned ShiftIntoIdx = 6787 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6788 SDValue ShiftAmount = 6789 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 6790 SDValue ShiftedElt = 6791 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 6792 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 6793 } 6794 6795 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 6796 ST->getAlignment(), ST->getMemOperand()->getFlags(), 6797 ST->getAAInfo()); 6798 } 6799 6800 // Store Stride in bytes 6801 unsigned Stride = MemSclVT.getSizeInBits() / 8; 6802 assert(Stride && "Zero stride!"); 6803 // Extract each of the elements from the original vector and save them into 6804 // memory individually. 6805 SmallVector<SDValue, 8> Stores; 6806 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6807 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6808 DAG.getVectorIdxConstant(Idx, SL)); 6809 6810 SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride); 6811 6812 // This scalar TruncStore may be illegal, but we legalize it later. 6813 SDValue Store = DAG.getTruncStore( 6814 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 6815 MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride), 6816 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 6817 6818 Stores.push_back(Store); 6819 } 6820 6821 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 6822 } 6823 6824 std::pair<SDValue, SDValue> 6825 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 6826 assert(LD->getAddressingMode() == ISD::UNINDEXED && 6827 "unaligned indexed loads not implemented!"); 6828 SDValue Chain = LD->getChain(); 6829 SDValue Ptr = LD->getBasePtr(); 6830 EVT VT = LD->getValueType(0); 6831 EVT LoadedVT = LD->getMemoryVT(); 6832 SDLoc dl(LD); 6833 auto &MF = DAG.getMachineFunction(); 6834 6835 if (VT.isFloatingPoint() || VT.isVector()) { 6836 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 6837 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 6838 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 6839 LoadedVT.isVector()) { 6840 // Scalarize the load and let the individual components be handled. 6841 return scalarizeVectorLoad(LD, DAG); 6842 } 6843 6844 // Expand to a (misaligned) integer load of the same size, 6845 // then bitconvert to floating point or vector. 6846 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 6847 LD->getMemOperand()); 6848 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 6849 if (LoadedVT != VT) 6850 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 6851 ISD::ANY_EXTEND, dl, VT, Result); 6852 6853 return std::make_pair(Result, newLoad.getValue(1)); 6854 } 6855 6856 // Copy the value to a (aligned) stack slot using (unaligned) integer 6857 // loads and stores, then do a (aligned) load from the stack slot. 6858 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 6859 unsigned LoadedBytes = LoadedVT.getStoreSize(); 6860 unsigned RegBytes = RegVT.getSizeInBits() / 8; 6861 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 6862 6863 // Make sure the stack slot is also aligned for the register type. 6864 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 6865 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 6866 SmallVector<SDValue, 8> Stores; 6867 SDValue StackPtr = StackBase; 6868 unsigned Offset = 0; 6869 6870 EVT PtrVT = Ptr.getValueType(); 6871 EVT StackPtrVT = StackPtr.getValueType(); 6872 6873 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 6874 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 6875 6876 // Do all but one copies using the full register width. 6877 for (unsigned i = 1; i < NumRegs; i++) { 6878 // Load one integer register's worth from the original location. 6879 SDValue Load = DAG.getLoad( 6880 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 6881 MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(), 6882 LD->getAAInfo()); 6883 // Follow the load with a store to the stack slot. Remember the store. 6884 Stores.push_back(DAG.getStore( 6885 Load.getValue(1), dl, Load, StackPtr, 6886 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 6887 // Increment the pointers. 6888 Offset += RegBytes; 6889 6890 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 6891 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 6892 } 6893 6894 // The last copy may be partial. Do an extending load. 6895 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 6896 8 * (LoadedBytes - Offset)); 6897 SDValue Load = 6898 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 6899 LD->getPointerInfo().getWithOffset(Offset), MemVT, 6900 MinAlign(LD->getAlignment(), Offset), 6901 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6902 // Follow the load with a store to the stack slot. Remember the store. 6903 // On big-endian machines this requires a truncating store to ensure 6904 // that the bits end up in the right place. 6905 Stores.push_back(DAG.getTruncStore( 6906 Load.getValue(1), dl, Load, StackPtr, 6907 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 6908 6909 // The order of the stores doesn't matter - say it with a TokenFactor. 6910 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6911 6912 // Finally, perform the original load only redirected to the stack slot. 6913 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 6914 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 6915 LoadedVT); 6916 6917 // Callers expect a MERGE_VALUES node. 6918 return std::make_pair(Load, TF); 6919 } 6920 6921 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 6922 "Unaligned load of unsupported type."); 6923 6924 // Compute the new VT that is half the size of the old one. This is an 6925 // integer MVT. 6926 unsigned NumBits = LoadedVT.getSizeInBits(); 6927 EVT NewLoadedVT; 6928 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 6929 NumBits >>= 1; 6930 6931 unsigned Alignment = LD->getAlignment(); 6932 unsigned IncrementSize = NumBits / 8; 6933 ISD::LoadExtType HiExtType = LD->getExtensionType(); 6934 6935 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 6936 if (HiExtType == ISD::NON_EXTLOAD) 6937 HiExtType = ISD::ZEXTLOAD; 6938 6939 // Load the value in two parts 6940 SDValue Lo, Hi; 6941 if (DAG.getDataLayout().isLittleEndian()) { 6942 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 6943 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 6944 LD->getAAInfo()); 6945 6946 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 6947 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 6948 LD->getPointerInfo().getWithOffset(IncrementSize), 6949 NewLoadedVT, MinAlign(Alignment, IncrementSize), 6950 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6951 } else { 6952 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 6953 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 6954 LD->getAAInfo()); 6955 6956 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 6957 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 6958 LD->getPointerInfo().getWithOffset(IncrementSize), 6959 NewLoadedVT, MinAlign(Alignment, IncrementSize), 6960 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6961 } 6962 6963 // aggregate the two parts 6964 SDValue ShiftAmount = 6965 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 6966 DAG.getDataLayout())); 6967 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 6968 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 6969 6970 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 6971 Hi.getValue(1)); 6972 6973 return std::make_pair(Result, TF); 6974 } 6975 6976 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 6977 SelectionDAG &DAG) const { 6978 assert(ST->getAddressingMode() == ISD::UNINDEXED && 6979 "unaligned indexed stores not implemented!"); 6980 SDValue Chain = ST->getChain(); 6981 SDValue Ptr = ST->getBasePtr(); 6982 SDValue Val = ST->getValue(); 6983 EVT VT = Val.getValueType(); 6984 int Alignment = ST->getAlignment(); 6985 auto &MF = DAG.getMachineFunction(); 6986 EVT StoreMemVT = ST->getMemoryVT(); 6987 6988 SDLoc dl(ST); 6989 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 6990 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 6991 if (isTypeLegal(intVT)) { 6992 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 6993 StoreMemVT.isVector()) { 6994 // Scalarize the store and let the individual components be handled. 6995 SDValue Result = scalarizeVectorStore(ST, DAG); 6996 return Result; 6997 } 6998 // Expand to a bitconvert of the value to the integer type of the 6999 // same size, then a (misaligned) int store. 7000 // FIXME: Does not handle truncating floating point stores! 7001 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 7002 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 7003 Alignment, ST->getMemOperand()->getFlags()); 7004 return Result; 7005 } 7006 // Do a (aligned) store to a stack slot, then copy from the stack slot 7007 // to the final destination using (unaligned) integer loads and stores. 7008 MVT RegVT = getRegisterType( 7009 *DAG.getContext(), 7010 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 7011 EVT PtrVT = Ptr.getValueType(); 7012 unsigned StoredBytes = StoreMemVT.getStoreSize(); 7013 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7014 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 7015 7016 // Make sure the stack slot is also aligned for the register type. 7017 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 7018 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 7019 7020 // Perform the original store, only redirected to the stack slot. 7021 SDValue Store = DAG.getTruncStore( 7022 Chain, dl, Val, StackPtr, 7023 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 7024 7025 EVT StackPtrVT = StackPtr.getValueType(); 7026 7027 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7028 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7029 SmallVector<SDValue, 8> Stores; 7030 unsigned Offset = 0; 7031 7032 // Do all but one copies using the full register width. 7033 for (unsigned i = 1; i < NumRegs; i++) { 7034 // Load one integer register's worth from the stack slot. 7035 SDValue Load = DAG.getLoad( 7036 RegVT, dl, Store, StackPtr, 7037 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 7038 // Store it to the final location. Remember the store. 7039 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 7040 ST->getPointerInfo().getWithOffset(Offset), 7041 MinAlign(ST->getAlignment(), Offset), 7042 ST->getMemOperand()->getFlags())); 7043 // Increment the pointers. 7044 Offset += RegBytes; 7045 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7046 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7047 } 7048 7049 // The last store may be partial. Do a truncating store. On big-endian 7050 // machines this requires an extending load from the stack slot to ensure 7051 // that the bits are in the right place. 7052 EVT LoadMemVT = 7053 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 7054 7055 // Load from the stack slot. 7056 SDValue Load = DAG.getExtLoad( 7057 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 7058 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 7059 7060 Stores.push_back( 7061 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 7062 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 7063 MinAlign(ST->getAlignment(), Offset), 7064 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 7065 // The order of the stores doesn't matter - say it with a TokenFactor. 7066 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7067 return Result; 7068 } 7069 7070 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 7071 "Unaligned store of unknown type."); 7072 // Get the half-size VT 7073 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 7074 int NumBits = NewStoredVT.getSizeInBits(); 7075 int IncrementSize = NumBits / 8; 7076 7077 // Divide the stored value in two parts. 7078 SDValue ShiftAmount = DAG.getConstant( 7079 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 7080 SDValue Lo = Val; 7081 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 7082 7083 // Store the two parts 7084 SDValue Store1, Store2; 7085 Store1 = DAG.getTruncStore(Chain, dl, 7086 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 7087 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 7088 ST->getMemOperand()->getFlags()); 7089 7090 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 7091 Alignment = MinAlign(Alignment, IncrementSize); 7092 Store2 = DAG.getTruncStore( 7093 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 7094 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 7095 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 7096 7097 SDValue Result = 7098 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 7099 return Result; 7100 } 7101 7102 SDValue 7103 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 7104 const SDLoc &DL, EVT DataVT, 7105 SelectionDAG &DAG, 7106 bool IsCompressedMemory) const { 7107 SDValue Increment; 7108 EVT AddrVT = Addr.getValueType(); 7109 EVT MaskVT = Mask.getValueType(); 7110 assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() && 7111 "Incompatible types of Data and Mask"); 7112 if (IsCompressedMemory) { 7113 // Incrementing the pointer according to number of '1's in the mask. 7114 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 7115 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 7116 if (MaskIntVT.getSizeInBits() < 32) { 7117 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 7118 MaskIntVT = MVT::i32; 7119 } 7120 7121 // Count '1's with POPCNT. 7122 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 7123 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 7124 // Scale is an element size in bytes. 7125 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 7126 AddrVT); 7127 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 7128 } else 7129 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 7130 7131 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 7132 } 7133 7134 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, 7135 SDValue Idx, 7136 EVT VecVT, 7137 const SDLoc &dl) { 7138 if (isa<ConstantSDNode>(Idx)) 7139 return Idx; 7140 7141 EVT IdxVT = Idx.getValueType(); 7142 unsigned NElts = VecVT.getVectorNumElements(); 7143 if (isPowerOf2_32(NElts)) { 7144 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), 7145 Log2_32(NElts)); 7146 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 7147 DAG.getConstant(Imm, dl, IdxVT)); 7148 } 7149 7150 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 7151 DAG.getConstant(NElts - 1, dl, IdxVT)); 7152 } 7153 7154 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 7155 SDValue VecPtr, EVT VecVT, 7156 SDValue Index) const { 7157 SDLoc dl(Index); 7158 // Make sure the index type is big enough to compute in. 7159 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 7160 7161 EVT EltVT = VecVT.getVectorElementType(); 7162 7163 // Calculate the element offset and add it to the pointer. 7164 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size. 7165 assert(EltSize * 8 == EltVT.getSizeInBits() && 7166 "Converting bits to bytes lost precision"); 7167 7168 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl); 7169 7170 EVT IdxVT = Index.getValueType(); 7171 7172 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7173 DAG.getConstant(EltSize, dl, IdxVT)); 7174 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 7175 } 7176 7177 //===----------------------------------------------------------------------===// 7178 // Implementation of Emulated TLS Model 7179 //===----------------------------------------------------------------------===// 7180 7181 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 7182 SelectionDAG &DAG) const { 7183 // Access to address of TLS varialbe xyz is lowered to a function call: 7184 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 7185 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7186 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 7187 SDLoc dl(GA); 7188 7189 ArgListTy Args; 7190 ArgListEntry Entry; 7191 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 7192 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 7193 StringRef EmuTlsVarName(NameString); 7194 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 7195 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 7196 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 7197 Entry.Ty = VoidPtrType; 7198 Args.push_back(Entry); 7199 7200 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 7201 7202 TargetLowering::CallLoweringInfo CLI(DAG); 7203 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 7204 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 7205 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7206 7207 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7208 // At last for X86 targets, maybe good for other targets too? 7209 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7210 MFI.setAdjustsStack(true); // Is this only for X86 target? 7211 MFI.setHasCalls(true); 7212 7213 assert((GA->getOffset() == 0) && 7214 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7215 return CallResult.first; 7216 } 7217 7218 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 7219 SelectionDAG &DAG) const { 7220 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 7221 if (!isCtlzFast()) 7222 return SDValue(); 7223 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7224 SDLoc dl(Op); 7225 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 7226 if (C->isNullValue() && CC == ISD::SETEQ) { 7227 EVT VT = Op.getOperand(0).getValueType(); 7228 SDValue Zext = Op.getOperand(0); 7229 if (VT.bitsLT(MVT::i32)) { 7230 VT = MVT::i32; 7231 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 7232 } 7233 unsigned Log2b = Log2_32(VT.getSizeInBits()); 7234 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 7235 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 7236 DAG.getConstant(Log2b, dl, MVT::i32)); 7237 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 7238 } 7239 } 7240 return SDValue(); 7241 } 7242 7243 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 7244 unsigned Opcode = Node->getOpcode(); 7245 SDValue LHS = Node->getOperand(0); 7246 SDValue RHS = Node->getOperand(1); 7247 EVT VT = LHS.getValueType(); 7248 SDLoc dl(Node); 7249 7250 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7251 assert(VT.isInteger() && "Expected operands to be integers"); 7252 7253 // usub.sat(a, b) -> umax(a, b) - b 7254 if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) { 7255 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 7256 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 7257 } 7258 7259 if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) { 7260 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 7261 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 7262 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 7263 } 7264 7265 unsigned OverflowOp; 7266 switch (Opcode) { 7267 case ISD::SADDSAT: 7268 OverflowOp = ISD::SADDO; 7269 break; 7270 case ISD::UADDSAT: 7271 OverflowOp = ISD::UADDO; 7272 break; 7273 case ISD::SSUBSAT: 7274 OverflowOp = ISD::SSUBO; 7275 break; 7276 case ISD::USUBSAT: 7277 OverflowOp = ISD::USUBO; 7278 break; 7279 default: 7280 llvm_unreachable("Expected method to receive signed or unsigned saturation " 7281 "addition or subtraction node."); 7282 } 7283 7284 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 7285 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7286 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), 7287 LHS, RHS); 7288 SDValue SumDiff = Result.getValue(0); 7289 SDValue Overflow = Result.getValue(1); 7290 SDValue Zero = DAG.getConstant(0, dl, VT); 7291 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 7292 7293 if (Opcode == ISD::UADDSAT) { 7294 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7295 // (LHS + RHS) | OverflowMask 7296 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7297 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 7298 } 7299 // Overflow ? 0xffff.... : (LHS + RHS) 7300 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 7301 } else if (Opcode == ISD::USUBSAT) { 7302 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7303 // (LHS - RHS) & ~OverflowMask 7304 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7305 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 7306 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 7307 } 7308 // Overflow ? 0 : (LHS - RHS) 7309 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 7310 } else { 7311 // SatMax -> Overflow && SumDiff < 0 7312 // SatMin -> Overflow && SumDiff >= 0 7313 APInt MinVal = APInt::getSignedMinValue(BitWidth); 7314 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 7315 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7316 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7317 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); 7318 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin); 7319 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 7320 } 7321 } 7322 7323 SDValue 7324 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 7325 assert((Node->getOpcode() == ISD::SMULFIX || 7326 Node->getOpcode() == ISD::UMULFIX || 7327 Node->getOpcode() == ISD::SMULFIXSAT || 7328 Node->getOpcode() == ISD::UMULFIXSAT) && 7329 "Expected a fixed point multiplication opcode"); 7330 7331 SDLoc dl(Node); 7332 SDValue LHS = Node->getOperand(0); 7333 SDValue RHS = Node->getOperand(1); 7334 EVT VT = LHS.getValueType(); 7335 unsigned Scale = Node->getConstantOperandVal(2); 7336 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 7337 Node->getOpcode() == ISD::UMULFIXSAT); 7338 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 7339 Node->getOpcode() == ISD::SMULFIXSAT); 7340 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7341 unsigned VTSize = VT.getScalarSizeInBits(); 7342 7343 if (!Scale) { 7344 // [us]mul.fix(a, b, 0) -> mul(a, b) 7345 if (!Saturating) { 7346 if (isOperationLegalOrCustom(ISD::MUL, VT)) 7347 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7348 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 7349 SDValue Result = 7350 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7351 SDValue Product = Result.getValue(0); 7352 SDValue Overflow = Result.getValue(1); 7353 SDValue Zero = DAG.getConstant(0, dl, VT); 7354 7355 APInt MinVal = APInt::getSignedMinValue(VTSize); 7356 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 7357 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7358 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7359 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT); 7360 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin); 7361 return DAG.getSelect(dl, VT, Overflow, Result, Product); 7362 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 7363 SDValue Result = 7364 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7365 SDValue Product = Result.getValue(0); 7366 SDValue Overflow = Result.getValue(1); 7367 7368 APInt MaxVal = APInt::getMaxValue(VTSize); 7369 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7370 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 7371 } 7372 } 7373 7374 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 7375 "Expected scale to be less than the number of bits if signed or at " 7376 "most the number of bits if unsigned."); 7377 assert(LHS.getValueType() == RHS.getValueType() && 7378 "Expected both operands to be the same type"); 7379 7380 // Get the upper and lower bits of the result. 7381 SDValue Lo, Hi; 7382 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 7383 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 7384 if (isOperationLegalOrCustom(LoHiOp, VT)) { 7385 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 7386 Lo = Result.getValue(0); 7387 Hi = Result.getValue(1); 7388 } else if (isOperationLegalOrCustom(HiOp, VT)) { 7389 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7390 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 7391 } else if (VT.isVector()) { 7392 return SDValue(); 7393 } else { 7394 report_fatal_error("Unable to expand fixed point multiplication."); 7395 } 7396 7397 if (Scale == VTSize) 7398 // Result is just the top half since we'd be shifting by the width of the 7399 // operand. Overflow impossible so this works for both UMULFIX and 7400 // UMULFIXSAT. 7401 return Hi; 7402 7403 // The result will need to be shifted right by the scale since both operands 7404 // are scaled. The result is given to us in 2 halves, so we only want part of 7405 // both in the result. 7406 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7407 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 7408 DAG.getConstant(Scale, dl, ShiftTy)); 7409 if (!Saturating) 7410 return Result; 7411 7412 if (!Signed) { 7413 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 7414 // widened multiplication) aren't all zeroes. 7415 7416 // Saturate to max if ((Hi >> Scale) != 0), 7417 // which is the same as if (Hi > ((1 << Scale) - 1)) 7418 APInt MaxVal = APInt::getMaxValue(VTSize); 7419 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 7420 dl, VT); 7421 Result = DAG.getSelectCC(dl, Hi, LowMask, 7422 DAG.getConstant(MaxVal, dl, VT), Result, 7423 ISD::SETUGT); 7424 7425 return Result; 7426 } 7427 7428 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 7429 // widened multiplication) aren't all ones or all zeroes. 7430 7431 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 7432 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 7433 7434 if (Scale == 0) { 7435 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 7436 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 7437 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 7438 // Saturated to SatMin if wide product is negative, and SatMax if wide 7439 // product is positive ... 7440 SDValue Zero = DAG.getConstant(0, dl, VT); 7441 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 7442 ISD::SETLT); 7443 // ... but only if we overflowed. 7444 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 7445 } 7446 7447 // We handled Scale==0 above so all the bits to examine is in Hi. 7448 7449 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 7450 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 7451 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 7452 dl, VT); 7453 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 7454 // Saturate to min if (Hi >> (Scale - 1)) < -1), 7455 // which is the same as if (HI < (-1 << (Scale - 1)) 7456 SDValue HighMask = 7457 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 7458 dl, VT); 7459 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 7460 return Result; 7461 } 7462 7463 SDValue 7464 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 7465 SDValue LHS, SDValue RHS, 7466 unsigned Scale, SelectionDAG &DAG) const { 7467 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 7468 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 7469 "Expected a fixed point division opcode"); 7470 7471 EVT VT = LHS.getValueType(); 7472 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 7473 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 7474 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7475 7476 // If there is enough room in the type to upscale the LHS or downscale the 7477 // RHS before the division, we can perform it in this type without having to 7478 // resize. For signed operations, the LHS headroom is the number of 7479 // redundant sign bits, and for unsigned ones it is the number of zeroes. 7480 // The headroom for the RHS is the number of trailing zeroes. 7481 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 7482 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 7483 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 7484 7485 // For signed saturating operations, we need to be able to detect true integer 7486 // division overflow; that is, when you have MIN / -EPS. However, this 7487 // is undefined behavior and if we emit divisions that could take such 7488 // values it may cause undesired behavior (arithmetic exceptions on x86, for 7489 // example). 7490 // Avoid this by requiring an extra bit so that we never get this case. 7491 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 7492 // signed saturating division, we need to emit a whopping 32-bit division. 7493 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 7494 return SDValue(); 7495 7496 unsigned LHSShift = std::min(LHSLead, Scale); 7497 unsigned RHSShift = Scale - LHSShift; 7498 7499 // At this point, we know that if we shift the LHS up by LHSShift and the 7500 // RHS down by RHSShift, we can emit a regular division with a final scaling 7501 // factor of Scale. 7502 7503 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7504 if (LHSShift) 7505 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 7506 DAG.getConstant(LHSShift, dl, ShiftTy)); 7507 if (RHSShift) 7508 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 7509 DAG.getConstant(RHSShift, dl, ShiftTy)); 7510 7511 SDValue Quot; 7512 if (Signed) { 7513 // For signed operations, if the resulting quotient is negative and the 7514 // remainder is nonzero, subtract 1 from the quotient to round towards 7515 // negative infinity. 7516 SDValue Rem; 7517 // FIXME: Ideally we would always produce an SDIVREM here, but if the 7518 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 7519 // we couldn't just form a libcall, but the type legalizer doesn't do it. 7520 if (isTypeLegal(VT) && 7521 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 7522 Quot = DAG.getNode(ISD::SDIVREM, dl, 7523 DAG.getVTList(VT, VT), 7524 LHS, RHS); 7525 Rem = Quot.getValue(1); 7526 Quot = Quot.getValue(0); 7527 } else { 7528 Quot = DAG.getNode(ISD::SDIV, dl, VT, 7529 LHS, RHS); 7530 Rem = DAG.getNode(ISD::SREM, dl, VT, 7531 LHS, RHS); 7532 } 7533 SDValue Zero = DAG.getConstant(0, dl, VT); 7534 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 7535 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 7536 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 7537 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 7538 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 7539 DAG.getConstant(1, dl, VT)); 7540 Quot = DAG.getSelect(dl, VT, 7541 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 7542 Sub1, Quot); 7543 } else 7544 Quot = DAG.getNode(ISD::UDIV, dl, VT, 7545 LHS, RHS); 7546 7547 return Quot; 7548 } 7549 7550 void TargetLowering::expandUADDSUBO( 7551 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7552 SDLoc dl(Node); 7553 SDValue LHS = Node->getOperand(0); 7554 SDValue RHS = Node->getOperand(1); 7555 bool IsAdd = Node->getOpcode() == ISD::UADDO; 7556 7557 // If ADD/SUBCARRY is legal, use that instead. 7558 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 7559 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 7560 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 7561 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 7562 { LHS, RHS, CarryIn }); 7563 Result = SDValue(NodeCarry.getNode(), 0); 7564 Overflow = SDValue(NodeCarry.getNode(), 1); 7565 return; 7566 } 7567 7568 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7569 LHS.getValueType(), LHS, RHS); 7570 7571 EVT ResultType = Node->getValueType(1); 7572 EVT SetCCType = getSetCCResultType( 7573 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7574 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 7575 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 7576 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7577 } 7578 7579 void TargetLowering::expandSADDSUBO( 7580 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7581 SDLoc dl(Node); 7582 SDValue LHS = Node->getOperand(0); 7583 SDValue RHS = Node->getOperand(1); 7584 bool IsAdd = Node->getOpcode() == ISD::SADDO; 7585 7586 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7587 LHS.getValueType(), LHS, RHS); 7588 7589 EVT ResultType = Node->getValueType(1); 7590 EVT OType = getSetCCResultType( 7591 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7592 7593 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 7594 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 7595 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) { 7596 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 7597 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 7598 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7599 return; 7600 } 7601 7602 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 7603 7604 // For an addition, the result should be less than one of the operands (LHS) 7605 // if and only if the other operand (RHS) is negative, otherwise there will 7606 // be overflow. 7607 // For a subtraction, the result should be less than one of the operands 7608 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 7609 // otherwise there will be overflow. 7610 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 7611 SDValue ConditionRHS = 7612 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 7613 7614 Overflow = DAG.getBoolExtOrTrunc( 7615 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 7616 ResultType, ResultType); 7617 } 7618 7619 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 7620 SDValue &Overflow, SelectionDAG &DAG) const { 7621 SDLoc dl(Node); 7622 EVT VT = Node->getValueType(0); 7623 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7624 SDValue LHS = Node->getOperand(0); 7625 SDValue RHS = Node->getOperand(1); 7626 bool isSigned = Node->getOpcode() == ISD::SMULO; 7627 7628 // For power-of-two multiplications we can use a simpler shift expansion. 7629 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 7630 const APInt &C = RHSC->getAPIntValue(); 7631 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 7632 if (C.isPowerOf2()) { 7633 // smulo(x, signed_min) is same as umulo(x, signed_min). 7634 bool UseArithShift = isSigned && !C.isMinSignedValue(); 7635 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7636 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 7637 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 7638 Overflow = DAG.getSetCC(dl, SetCCVT, 7639 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 7640 dl, VT, Result, ShiftAmt), 7641 LHS, ISD::SETNE); 7642 return true; 7643 } 7644 } 7645 7646 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 7647 if (VT.isVector()) 7648 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 7649 VT.getVectorNumElements()); 7650 7651 SDValue BottomHalf; 7652 SDValue TopHalf; 7653 static const unsigned Ops[2][3] = 7654 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 7655 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 7656 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 7657 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7658 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 7659 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 7660 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 7661 RHS); 7662 TopHalf = BottomHalf.getValue(1); 7663 } else if (isTypeLegal(WideVT)) { 7664 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 7665 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 7666 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 7667 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 7668 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 7669 getShiftAmountTy(WideVT, DAG.getDataLayout())); 7670 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 7671 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 7672 } else { 7673 if (VT.isVector()) 7674 return false; 7675 7676 // We can fall back to a libcall with an illegal type for the MUL if we 7677 // have a libcall big enough. 7678 // Also, we can fall back to a division in some cases, but that's a big 7679 // performance hit in the general case. 7680 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 7681 if (WideVT == MVT::i16) 7682 LC = RTLIB::MUL_I16; 7683 else if (WideVT == MVT::i32) 7684 LC = RTLIB::MUL_I32; 7685 else if (WideVT == MVT::i64) 7686 LC = RTLIB::MUL_I64; 7687 else if (WideVT == MVT::i128) 7688 LC = RTLIB::MUL_I128; 7689 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 7690 7691 SDValue HiLHS; 7692 SDValue HiRHS; 7693 if (isSigned) { 7694 // The high part is obtained by SRA'ing all but one of the bits of low 7695 // part. 7696 unsigned LoSize = VT.getSizeInBits(); 7697 HiLHS = 7698 DAG.getNode(ISD::SRA, dl, VT, LHS, 7699 DAG.getConstant(LoSize - 1, dl, 7700 getPointerTy(DAG.getDataLayout()))); 7701 HiRHS = 7702 DAG.getNode(ISD::SRA, dl, VT, RHS, 7703 DAG.getConstant(LoSize - 1, dl, 7704 getPointerTy(DAG.getDataLayout()))); 7705 } else { 7706 HiLHS = DAG.getConstant(0, dl, VT); 7707 HiRHS = DAG.getConstant(0, dl, VT); 7708 } 7709 7710 // Here we're passing the 2 arguments explicitly as 4 arguments that are 7711 // pre-lowered to the correct types. This all depends upon WideVT not 7712 // being a legal type for the architecture and thus has to be split to 7713 // two arguments. 7714 SDValue Ret; 7715 TargetLowering::MakeLibCallOptions CallOptions; 7716 CallOptions.setSExt(isSigned); 7717 CallOptions.setIsPostTypeLegalization(true); 7718 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 7719 // Halves of WideVT are packed into registers in different order 7720 // depending on platform endianness. This is usually handled by 7721 // the C calling convention, but we can't defer to it in 7722 // the legalizer. 7723 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 7724 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7725 } else { 7726 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 7727 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7728 } 7729 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 7730 "Ret value is a collection of constituent nodes holding result."); 7731 if (DAG.getDataLayout().isLittleEndian()) { 7732 // Same as above. 7733 BottomHalf = Ret.getOperand(0); 7734 TopHalf = Ret.getOperand(1); 7735 } else { 7736 BottomHalf = Ret.getOperand(1); 7737 TopHalf = Ret.getOperand(0); 7738 } 7739 } 7740 7741 Result = BottomHalf; 7742 if (isSigned) { 7743 SDValue ShiftAmt = DAG.getConstant( 7744 VT.getScalarSizeInBits() - 1, dl, 7745 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 7746 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 7747 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 7748 } else { 7749 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 7750 DAG.getConstant(0, dl, VT), ISD::SETNE); 7751 } 7752 7753 // Truncate the result if SetCC returns a larger type than needed. 7754 EVT RType = Node->getValueType(1); 7755 if (RType.getSizeInBits() < Overflow.getValueSizeInBits()) 7756 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 7757 7758 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 7759 "Unexpected result type for S/UMULO legalization"); 7760 return true; 7761 } 7762 7763 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 7764 SDLoc dl(Node); 7765 bool NoNaN = Node->getFlags().hasNoNaNs(); 7766 unsigned BaseOpcode = 0; 7767 switch (Node->getOpcode()) { 7768 default: llvm_unreachable("Expected VECREDUCE opcode"); 7769 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 7770 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 7771 case ISD::VECREDUCE_ADD: BaseOpcode = ISD::ADD; break; 7772 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 7773 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 7774 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 7775 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 7776 case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break; 7777 case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break; 7778 case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break; 7779 case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break; 7780 case ISD::VECREDUCE_FMAX: 7781 BaseOpcode = NoNaN ? ISD::FMAXNUM : ISD::FMAXIMUM; 7782 break; 7783 case ISD::VECREDUCE_FMIN: 7784 BaseOpcode = NoNaN ? ISD::FMINNUM : ISD::FMINIMUM; 7785 break; 7786 } 7787 7788 SDValue Op = Node->getOperand(0); 7789 EVT VT = Op.getValueType(); 7790 7791 // Try to use a shuffle reduction for power of two vectors. 7792 if (VT.isPow2VectorType()) { 7793 while (VT.getVectorNumElements() > 1) { 7794 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 7795 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 7796 break; 7797 7798 SDValue Lo, Hi; 7799 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 7800 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 7801 VT = HalfVT; 7802 } 7803 } 7804 7805 EVT EltVT = VT.getVectorElementType(); 7806 unsigned NumElts = VT.getVectorNumElements(); 7807 7808 SmallVector<SDValue, 8> Ops; 7809 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 7810 7811 SDValue Res = Ops[0]; 7812 for (unsigned i = 1; i < NumElts; i++) 7813 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 7814 7815 // Result type may be wider than element type. 7816 if (EltVT != Node->getValueType(0)) 7817 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 7818 return Res; 7819 } 7820