1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/GlobalVariable.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/MC/MCAsmInfo.h" 27 #include "llvm/MC/MCExpr.h" 28 #include "llvm/Support/DivisionByConstantInfo.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cctype> 34 using namespace llvm; 35 36 /// NOTE: The TargetMachine owns TLOF. 37 TargetLowering::TargetLowering(const TargetMachine &tm) 38 : TargetLoweringBase(tm) {} 39 40 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 41 return nullptr; 42 } 43 44 bool TargetLowering::isPositionIndependent() const { 45 return getTargetMachine().isPositionIndependent(); 46 } 47 48 /// Check whether a given call node is in tail position within its function. If 49 /// so, it sets Chain to the input chain of the tail call. 50 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 51 SDValue &Chain) const { 52 const Function &F = DAG.getMachineFunction().getFunction(); 53 54 // First, check if tail calls have been disabled in this function. 55 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 56 return false; 57 58 // Conservatively require the attributes of the call to match those of 59 // the return. Ignore following attributes because they don't affect the 60 // call sequence. 61 AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs()); 62 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable, 63 Attribute::DereferenceableOrNull, Attribute::NoAlias, 64 Attribute::NonNull, Attribute::NoUndef}) 65 CallerAttrs.removeAttribute(Attr); 66 67 if (CallerAttrs.hasAttributes()) 68 return false; 69 70 // It's not safe to eliminate the sign / zero extension of the return value. 71 if (CallerAttrs.contains(Attribute::ZExt) || 72 CallerAttrs.contains(Attribute::SExt)) 73 return false; 74 75 // Check if the only use is a function return node. 76 return isUsedByReturnOnly(Node, Chain); 77 } 78 79 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 80 const uint32_t *CallerPreservedMask, 81 const SmallVectorImpl<CCValAssign> &ArgLocs, 82 const SmallVectorImpl<SDValue> &OutVals) const { 83 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 84 const CCValAssign &ArgLoc = ArgLocs[I]; 85 if (!ArgLoc.isRegLoc()) 86 continue; 87 MCRegister Reg = ArgLoc.getLocReg(); 88 // Only look at callee saved registers. 89 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 90 continue; 91 // Check that we pass the value used for the caller. 92 // (We look for a CopyFromReg reading a virtual register that is used 93 // for the function live-in value of register Reg) 94 SDValue Value = OutVals[I]; 95 if (Value->getOpcode() == ISD::AssertZext) 96 Value = Value.getOperand(0); 97 if (Value->getOpcode() != ISD::CopyFromReg) 98 return false; 99 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 100 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 101 return false; 102 } 103 return true; 104 } 105 106 /// Set CallLoweringInfo attribute flags based on a call instruction 107 /// and called function attributes. 108 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 109 unsigned ArgIdx) { 110 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 111 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 112 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 113 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 114 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 115 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 116 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 117 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 118 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 119 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 120 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync); 121 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 122 Alignment = Call->getParamStackAlign(ArgIdx); 123 IndirectType = nullptr; 124 assert(IsByVal + IsPreallocated + IsInAlloca + IsSRet <= 1 && 125 "multiple ABI attributes?"); 126 if (IsByVal) { 127 IndirectType = Call->getParamByValType(ArgIdx); 128 if (!Alignment) 129 Alignment = Call->getParamAlign(ArgIdx); 130 } 131 if (IsPreallocated) 132 IndirectType = Call->getParamPreallocatedType(ArgIdx); 133 if (IsInAlloca) 134 IndirectType = Call->getParamInAllocaType(ArgIdx); 135 if (IsSRet) 136 IndirectType = Call->getParamStructRetType(ArgIdx); 137 } 138 139 /// Generate a libcall taking the given operands as arguments and returning a 140 /// result of type RetVT. 141 std::pair<SDValue, SDValue> 142 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 143 ArrayRef<SDValue> Ops, 144 MakeLibCallOptions CallOptions, 145 const SDLoc &dl, 146 SDValue InChain) const { 147 if (!InChain) 148 InChain = DAG.getEntryNode(); 149 150 TargetLowering::ArgListTy Args; 151 Args.reserve(Ops.size()); 152 153 TargetLowering::ArgListEntry Entry; 154 for (unsigned i = 0; i < Ops.size(); ++i) { 155 SDValue NewOp = Ops[i]; 156 Entry.Node = NewOp; 157 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 158 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 159 CallOptions.IsSExt); 160 Entry.IsZExt = !Entry.IsSExt; 161 162 if (CallOptions.IsSoften && 163 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 164 Entry.IsSExt = Entry.IsZExt = false; 165 } 166 Args.push_back(Entry); 167 } 168 169 if (LC == RTLIB::UNKNOWN_LIBCALL) 170 report_fatal_error("Unsupported library call operation!"); 171 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 172 getPointerTy(DAG.getDataLayout())); 173 174 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 175 TargetLowering::CallLoweringInfo CLI(DAG); 176 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 177 bool zeroExtend = !signExtend; 178 179 if (CallOptions.IsSoften && 180 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 181 signExtend = zeroExtend = false; 182 } 183 184 CLI.setDebugLoc(dl) 185 .setChain(InChain) 186 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 187 .setNoReturn(CallOptions.DoesNotReturn) 188 .setDiscardResult(!CallOptions.IsReturnValueUsed) 189 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 190 .setSExtResult(signExtend) 191 .setZExtResult(zeroExtend); 192 return LowerCallTo(CLI); 193 } 194 195 bool TargetLowering::findOptimalMemOpLowering( 196 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 197 unsigned SrcAS, const AttributeList &FuncAttributes) const { 198 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 199 return false; 200 201 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 202 203 if (VT == MVT::Other) { 204 // Use the largest integer type whose alignment constraints are satisfied. 205 // We only need to check DstAlign here as SrcAlign is always greater or 206 // equal to DstAlign (or zero). 207 VT = MVT::i64; 208 if (Op.isFixedDstAlign()) 209 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && 210 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) 211 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 212 assert(VT.isInteger()); 213 214 // Find the largest legal integer type. 215 MVT LVT = MVT::i64; 216 while (!isTypeLegal(LVT)) 217 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 218 assert(LVT.isInteger()); 219 220 // If the type we've chosen is larger than the largest legal integer type 221 // then use that instead. 222 if (VT.bitsGT(LVT)) 223 VT = LVT; 224 } 225 226 unsigned NumMemOps = 0; 227 uint64_t Size = Op.size(); 228 while (Size) { 229 unsigned VTSize = VT.getSizeInBits() / 8; 230 while (VTSize > Size) { 231 // For now, only use non-vector load / store's for the left-over pieces. 232 EVT NewVT = VT; 233 unsigned NewVTSize; 234 235 bool Found = false; 236 if (VT.isVector() || VT.isFloatingPoint()) { 237 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 238 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 239 isSafeMemOpType(NewVT.getSimpleVT())) 240 Found = true; 241 else if (NewVT == MVT::i64 && 242 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 243 isSafeMemOpType(MVT::f64)) { 244 // i64 is usually not legal on 32-bit targets, but f64 may be. 245 NewVT = MVT::f64; 246 Found = true; 247 } 248 } 249 250 if (!Found) { 251 do { 252 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 253 if (NewVT == MVT::i8) 254 break; 255 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 256 } 257 NewVTSize = NewVT.getSizeInBits() / 8; 258 259 // If the new VT cannot cover all of the remaining bits, then consider 260 // issuing a (or a pair of) unaligned and overlapping load / store. 261 bool Fast; 262 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 263 allowsMisalignedMemoryAccesses( 264 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), 265 MachineMemOperand::MONone, &Fast) && 266 Fast) 267 VTSize = Size; 268 else { 269 VT = NewVT; 270 VTSize = NewVTSize; 271 } 272 } 273 274 if (++NumMemOps > Limit) 275 return false; 276 277 MemOps.push_back(VT); 278 Size -= VTSize; 279 } 280 281 return true; 282 } 283 284 /// Soften the operands of a comparison. This code is shared among BR_CC, 285 /// SELECT_CC, and SETCC handlers. 286 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 287 SDValue &NewLHS, SDValue &NewRHS, 288 ISD::CondCode &CCCode, 289 const SDLoc &dl, const SDValue OldLHS, 290 const SDValue OldRHS) const { 291 SDValue Chain; 292 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 293 OldRHS, Chain); 294 } 295 296 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 297 SDValue &NewLHS, SDValue &NewRHS, 298 ISD::CondCode &CCCode, 299 const SDLoc &dl, const SDValue OldLHS, 300 const SDValue OldRHS, 301 SDValue &Chain, 302 bool IsSignaling) const { 303 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 304 // not supporting it. We can update this code when libgcc provides such 305 // functions. 306 307 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 308 && "Unsupported setcc type!"); 309 310 // Expand into one or more soft-fp libcall(s). 311 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 312 bool ShouldInvertCC = false; 313 switch (CCCode) { 314 case ISD::SETEQ: 315 case ISD::SETOEQ: 316 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 317 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 318 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 319 break; 320 case ISD::SETNE: 321 case ISD::SETUNE: 322 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 323 (VT == MVT::f64) ? RTLIB::UNE_F64 : 324 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 325 break; 326 case ISD::SETGE: 327 case ISD::SETOGE: 328 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 329 (VT == MVT::f64) ? RTLIB::OGE_F64 : 330 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 331 break; 332 case ISD::SETLT: 333 case ISD::SETOLT: 334 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 335 (VT == MVT::f64) ? RTLIB::OLT_F64 : 336 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 337 break; 338 case ISD::SETLE: 339 case ISD::SETOLE: 340 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 341 (VT == MVT::f64) ? RTLIB::OLE_F64 : 342 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 343 break; 344 case ISD::SETGT: 345 case ISD::SETOGT: 346 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 347 (VT == MVT::f64) ? RTLIB::OGT_F64 : 348 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 349 break; 350 case ISD::SETO: 351 ShouldInvertCC = true; 352 LLVM_FALLTHROUGH; 353 case ISD::SETUO: 354 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 355 (VT == MVT::f64) ? RTLIB::UO_F64 : 356 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 357 break; 358 case ISD::SETONE: 359 // SETONE = O && UNE 360 ShouldInvertCC = true; 361 LLVM_FALLTHROUGH; 362 case ISD::SETUEQ: 363 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 364 (VT == MVT::f64) ? RTLIB::UO_F64 : 365 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 366 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 367 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 368 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 369 break; 370 default: 371 // Invert CC for unordered comparisons 372 ShouldInvertCC = true; 373 switch (CCCode) { 374 case ISD::SETULT: 375 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 376 (VT == MVT::f64) ? RTLIB::OGE_F64 : 377 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 378 break; 379 case ISD::SETULE: 380 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 381 (VT == MVT::f64) ? RTLIB::OGT_F64 : 382 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 383 break; 384 case ISD::SETUGT: 385 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 386 (VT == MVT::f64) ? RTLIB::OLE_F64 : 387 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 388 break; 389 case ISD::SETUGE: 390 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 391 (VT == MVT::f64) ? RTLIB::OLT_F64 : 392 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 393 break; 394 default: llvm_unreachable("Do not know how to soften this setcc!"); 395 } 396 } 397 398 // Use the target specific return value for comparions lib calls. 399 EVT RetVT = getCmpLibcallReturnType(); 400 SDValue Ops[2] = {NewLHS, NewRHS}; 401 TargetLowering::MakeLibCallOptions CallOptions; 402 EVT OpsVT[2] = { OldLHS.getValueType(), 403 OldRHS.getValueType() }; 404 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 405 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 406 NewLHS = Call.first; 407 NewRHS = DAG.getConstant(0, dl, RetVT); 408 409 CCCode = getCmpLibcallCC(LC1); 410 if (ShouldInvertCC) { 411 assert(RetVT.isInteger()); 412 CCCode = getSetCCInverse(CCCode, RetVT); 413 } 414 415 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 416 // Update Chain. 417 Chain = Call.second; 418 } else { 419 EVT SetCCVT = 420 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 421 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 422 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 423 CCCode = getCmpLibcallCC(LC2); 424 if (ShouldInvertCC) 425 CCCode = getSetCCInverse(CCCode, RetVT); 426 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 427 if (Chain) 428 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 429 Call2.second); 430 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 431 Tmp.getValueType(), Tmp, NewLHS); 432 NewRHS = SDValue(); 433 } 434 } 435 436 /// Return the entry encoding for a jump table in the current function. The 437 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 438 unsigned TargetLowering::getJumpTableEncoding() const { 439 // In non-pic modes, just use the address of a block. 440 if (!isPositionIndependent()) 441 return MachineJumpTableInfo::EK_BlockAddress; 442 443 // In PIC mode, if the target supports a GPRel32 directive, use it. 444 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 445 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 446 447 // Otherwise, use a label difference. 448 return MachineJumpTableInfo::EK_LabelDifference32; 449 } 450 451 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 452 SelectionDAG &DAG) const { 453 // If our PIC model is GP relative, use the global offset table as the base. 454 unsigned JTEncoding = getJumpTableEncoding(); 455 456 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 457 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 458 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 459 460 return Table; 461 } 462 463 /// This returns the relocation base for the given PIC jumptable, the same as 464 /// getPICJumpTableRelocBase, but as an MCExpr. 465 const MCExpr * 466 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 467 unsigned JTI,MCContext &Ctx) const{ 468 // The normal PIC reloc base is the label at the start of the jump table. 469 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 470 } 471 472 bool 473 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 474 const TargetMachine &TM = getTargetMachine(); 475 const GlobalValue *GV = GA->getGlobal(); 476 477 // If the address is not even local to this DSO we will have to load it from 478 // a got and then add the offset. 479 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 480 return false; 481 482 // If the code is position independent we will have to add a base register. 483 if (isPositionIndependent()) 484 return false; 485 486 // Otherwise we can do it. 487 return true; 488 } 489 490 //===----------------------------------------------------------------------===// 491 // Optimization Methods 492 //===----------------------------------------------------------------------===// 493 494 /// If the specified instruction has a constant integer operand and there are 495 /// bits set in that constant that are not demanded, then clear those bits and 496 /// return true. 497 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 498 const APInt &DemandedBits, 499 const APInt &DemandedElts, 500 TargetLoweringOpt &TLO) const { 501 SDLoc DL(Op); 502 unsigned Opcode = Op.getOpcode(); 503 504 // Do target-specific constant optimization. 505 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 506 return TLO.New.getNode(); 507 508 // FIXME: ISD::SELECT, ISD::SELECT_CC 509 switch (Opcode) { 510 default: 511 break; 512 case ISD::XOR: 513 case ISD::AND: 514 case ISD::OR: { 515 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 516 if (!Op1C || Op1C->isOpaque()) 517 return false; 518 519 // If this is a 'not' op, don't touch it because that's a canonical form. 520 const APInt &C = Op1C->getAPIntValue(); 521 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 522 return false; 523 524 if (!C.isSubsetOf(DemandedBits)) { 525 EVT VT = Op.getValueType(); 526 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 527 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 528 return TLO.CombineTo(Op, NewOp); 529 } 530 531 break; 532 } 533 } 534 535 return false; 536 } 537 538 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 539 const APInt &DemandedBits, 540 TargetLoweringOpt &TLO) const { 541 EVT VT = Op.getValueType(); 542 APInt DemandedElts = VT.isVector() 543 ? APInt::getAllOnes(VT.getVectorNumElements()) 544 : APInt(1, 1); 545 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 546 } 547 548 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 549 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 550 /// generalized for targets with other types of implicit widening casts. 551 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 552 const APInt &Demanded, 553 TargetLoweringOpt &TLO) const { 554 assert(Op.getNumOperands() == 2 && 555 "ShrinkDemandedOp only supports binary operators!"); 556 assert(Op.getNode()->getNumValues() == 1 && 557 "ShrinkDemandedOp only supports nodes with one result!"); 558 559 SelectionDAG &DAG = TLO.DAG; 560 SDLoc dl(Op); 561 562 // Early return, as this function cannot handle vector types. 563 if (Op.getValueType().isVector()) 564 return false; 565 566 // Don't do this if the node has another user, which may require the 567 // full value. 568 if (!Op.getNode()->hasOneUse()) 569 return false; 570 571 // Search for the smallest integer type with free casts to and from 572 // Op's type. For expedience, just check power-of-2 integer types. 573 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 574 unsigned DemandedSize = Demanded.getActiveBits(); 575 unsigned SmallVTBits = DemandedSize; 576 if (!isPowerOf2_32(SmallVTBits)) 577 SmallVTBits = NextPowerOf2(SmallVTBits); 578 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 579 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 580 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 581 TLI.isZExtFree(SmallVT, Op.getValueType())) { 582 // We found a type with free casts. 583 SDValue X = DAG.getNode( 584 Op.getOpcode(), dl, SmallVT, 585 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 586 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 587 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 588 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 589 return TLO.CombineTo(Op, Z); 590 } 591 } 592 return false; 593 } 594 595 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 596 DAGCombinerInfo &DCI) const { 597 SelectionDAG &DAG = DCI.DAG; 598 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 599 !DCI.isBeforeLegalizeOps()); 600 KnownBits Known; 601 602 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 603 if (Simplified) { 604 DCI.AddToWorklist(Op.getNode()); 605 DCI.CommitTargetLoweringOpt(TLO); 606 } 607 return Simplified; 608 } 609 610 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 611 const APInt &DemandedElts, 612 DAGCombinerInfo &DCI) const { 613 SelectionDAG &DAG = DCI.DAG; 614 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 615 !DCI.isBeforeLegalizeOps()); 616 KnownBits Known; 617 618 bool Simplified = 619 SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO); 620 if (Simplified) { 621 DCI.AddToWorklist(Op.getNode()); 622 DCI.CommitTargetLoweringOpt(TLO); 623 } 624 return Simplified; 625 } 626 627 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 628 KnownBits &Known, 629 TargetLoweringOpt &TLO, 630 unsigned Depth, 631 bool AssumeSingleUse) const { 632 EVT VT = Op.getValueType(); 633 634 // TODO: We can probably do more work on calculating the known bits and 635 // simplifying the operations for scalable vectors, but for now we just 636 // bail out. 637 if (VT.isScalableVector()) { 638 // Pretend we don't know anything for now. 639 Known = KnownBits(DemandedBits.getBitWidth()); 640 return false; 641 } 642 643 APInt DemandedElts = VT.isVector() 644 ? APInt::getAllOnes(VT.getVectorNumElements()) 645 : APInt(1, 1); 646 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 647 AssumeSingleUse); 648 } 649 650 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 651 // TODO: Under what circumstances can we create nodes? Constant folding? 652 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 653 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 654 SelectionDAG &DAG, unsigned Depth) const { 655 // Limit search depth. 656 if (Depth >= SelectionDAG::MaxRecursionDepth) 657 return SDValue(); 658 659 // Ignore UNDEFs. 660 if (Op.isUndef()) 661 return SDValue(); 662 663 // Not demanding any bits/elts from Op. 664 if (DemandedBits == 0 || DemandedElts == 0) 665 return DAG.getUNDEF(Op.getValueType()); 666 667 bool IsLE = DAG.getDataLayout().isLittleEndian(); 668 unsigned NumElts = DemandedElts.getBitWidth(); 669 unsigned BitWidth = DemandedBits.getBitWidth(); 670 KnownBits LHSKnown, RHSKnown; 671 switch (Op.getOpcode()) { 672 case ISD::BITCAST: { 673 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 674 EVT SrcVT = Src.getValueType(); 675 EVT DstVT = Op.getValueType(); 676 if (SrcVT == DstVT) 677 return Src; 678 679 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 680 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 681 if (NumSrcEltBits == NumDstEltBits) 682 if (SDValue V = SimplifyMultipleUseDemandedBits( 683 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 684 return DAG.getBitcast(DstVT, V); 685 686 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0) { 687 unsigned Scale = NumDstEltBits / NumSrcEltBits; 688 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 689 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 690 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 691 for (unsigned i = 0; i != Scale; ++i) { 692 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 693 unsigned BitOffset = EltOffset * NumSrcEltBits; 694 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 695 if (!Sub.isZero()) { 696 DemandedSrcBits |= Sub; 697 for (unsigned j = 0; j != NumElts; ++j) 698 if (DemandedElts[j]) 699 DemandedSrcElts.setBit((j * Scale) + i); 700 } 701 } 702 703 if (SDValue V = SimplifyMultipleUseDemandedBits( 704 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 705 return DAG.getBitcast(DstVT, V); 706 } 707 708 // TODO - bigendian once we have test coverage. 709 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) { 710 unsigned Scale = NumSrcEltBits / NumDstEltBits; 711 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 712 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 713 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 714 for (unsigned i = 0; i != NumElts; ++i) 715 if (DemandedElts[i]) { 716 unsigned Offset = (i % Scale) * NumDstEltBits; 717 DemandedSrcBits.insertBits(DemandedBits, Offset); 718 DemandedSrcElts.setBit(i / Scale); 719 } 720 721 if (SDValue V = SimplifyMultipleUseDemandedBits( 722 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 723 return DAG.getBitcast(DstVT, V); 724 } 725 726 break; 727 } 728 case ISD::AND: { 729 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 730 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 731 732 // If all of the demanded bits are known 1 on one side, return the other. 733 // These bits cannot contribute to the result of the 'and' in this 734 // context. 735 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 736 return Op.getOperand(0); 737 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 738 return Op.getOperand(1); 739 break; 740 } 741 case ISD::OR: { 742 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 743 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 744 745 // If all of the demanded bits are known zero on one side, return the 746 // other. These bits cannot contribute to the result of the 'or' in this 747 // context. 748 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 749 return Op.getOperand(0); 750 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 751 return Op.getOperand(1); 752 break; 753 } 754 case ISD::XOR: { 755 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 756 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 757 758 // If all of the demanded bits are known zero on one side, return the 759 // other. 760 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 761 return Op.getOperand(0); 762 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 763 return Op.getOperand(1); 764 break; 765 } 766 case ISD::SHL: { 767 // If we are only demanding sign bits then we can use the shift source 768 // directly. 769 if (const APInt *MaxSA = 770 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 771 SDValue Op0 = Op.getOperand(0); 772 unsigned ShAmt = MaxSA->getZExtValue(); 773 unsigned NumSignBits = 774 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 775 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 776 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 777 return Op0; 778 } 779 break; 780 } 781 case ISD::SETCC: { 782 SDValue Op0 = Op.getOperand(0); 783 SDValue Op1 = Op.getOperand(1); 784 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 785 // If (1) we only need the sign-bit, (2) the setcc operands are the same 786 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 787 // -1, we may be able to bypass the setcc. 788 if (DemandedBits.isSignMask() && 789 Op0.getScalarValueSizeInBits() == BitWidth && 790 getBooleanContents(Op0.getValueType()) == 791 BooleanContent::ZeroOrNegativeOneBooleanContent) { 792 // If we're testing X < 0, then this compare isn't needed - just use X! 793 // FIXME: We're limiting to integer types here, but this should also work 794 // if we don't care about FP signed-zero. The use of SETLT with FP means 795 // that we don't care about NaNs. 796 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 797 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 798 return Op0; 799 } 800 break; 801 } 802 case ISD::SIGN_EXTEND_INREG: { 803 // If none of the extended bits are demanded, eliminate the sextinreg. 804 SDValue Op0 = Op.getOperand(0); 805 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 806 unsigned ExBits = ExVT.getScalarSizeInBits(); 807 if (DemandedBits.getActiveBits() <= ExBits) 808 return Op0; 809 // If the input is already sign extended, just drop the extension. 810 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 811 if (NumSignBits >= (BitWidth - ExBits + 1)) 812 return Op0; 813 break; 814 } 815 case ISD::ANY_EXTEND_VECTOR_INREG: 816 case ISD::SIGN_EXTEND_VECTOR_INREG: 817 case ISD::ZERO_EXTEND_VECTOR_INREG: { 818 // If we only want the lowest element and none of extended bits, then we can 819 // return the bitcasted source vector. 820 SDValue Src = Op.getOperand(0); 821 EVT SrcVT = Src.getValueType(); 822 EVT DstVT = Op.getValueType(); 823 if (IsLE && DemandedElts == 1 && 824 DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 825 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 826 return DAG.getBitcast(DstVT, Src); 827 } 828 break; 829 } 830 case ISD::INSERT_VECTOR_ELT: { 831 // If we don't demand the inserted element, return the base vector. 832 SDValue Vec = Op.getOperand(0); 833 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 834 EVT VecVT = Vec.getValueType(); 835 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 836 !DemandedElts[CIdx->getZExtValue()]) 837 return Vec; 838 break; 839 } 840 case ISD::INSERT_SUBVECTOR: { 841 SDValue Vec = Op.getOperand(0); 842 SDValue Sub = Op.getOperand(1); 843 uint64_t Idx = Op.getConstantOperandVal(2); 844 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 845 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 846 // If we don't demand the inserted subvector, return the base vector. 847 if (DemandedSubElts == 0) 848 return Vec; 849 // If this simply widens the lowest subvector, see if we can do it earlier. 850 if (Idx == 0 && Vec.isUndef()) { 851 if (SDValue NewSub = SimplifyMultipleUseDemandedBits( 852 Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1)) 853 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 854 Op.getOperand(0), NewSub, Op.getOperand(2)); 855 } 856 break; 857 } 858 case ISD::VECTOR_SHUFFLE: { 859 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 860 861 // If all the demanded elts are from one operand and are inline, 862 // then we can use the operand directly. 863 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 864 for (unsigned i = 0; i != NumElts; ++i) { 865 int M = ShuffleMask[i]; 866 if (M < 0 || !DemandedElts[i]) 867 continue; 868 AllUndef = false; 869 IdentityLHS &= (M == (int)i); 870 IdentityRHS &= ((M - NumElts) == i); 871 } 872 873 if (AllUndef) 874 return DAG.getUNDEF(Op.getValueType()); 875 if (IdentityLHS) 876 return Op.getOperand(0); 877 if (IdentityRHS) 878 return Op.getOperand(1); 879 break; 880 } 881 default: 882 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 883 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 884 Op, DemandedBits, DemandedElts, DAG, Depth)) 885 return V; 886 break; 887 } 888 return SDValue(); 889 } 890 891 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 892 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 893 unsigned Depth) const { 894 EVT VT = Op.getValueType(); 895 APInt DemandedElts = VT.isVector() 896 ? APInt::getAllOnes(VT.getVectorNumElements()) 897 : APInt(1, 1); 898 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 899 Depth); 900 } 901 902 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 903 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 904 unsigned Depth) const { 905 APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits()); 906 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 907 Depth); 908 } 909 910 // Attempt to form ext(avgfloor(A, B)) from shr(add(ext(A), ext(B)), 1). 911 // or to form ext(avgceil(A, B)) from shr(add(ext(A), ext(B), 1), 1). 912 static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG, 913 const TargetLowering &TLI, 914 const APInt &DemandedBits, 915 const APInt &DemandedElts, 916 unsigned Depth) { 917 assert((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) && 918 "SRL or SRA node is required here!"); 919 // Is the right shift using an immediate value of 1? 920 ConstantSDNode *N1C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 921 if (!N1C || !N1C->isOne()) 922 return SDValue(); 923 924 // We are looking for an avgfloor 925 // add(ext, ext) 926 // or one of these as a avgceil 927 // add(add(ext, ext), 1) 928 // add(add(ext, 1), ext) 929 // add(ext, add(ext, 1)) 930 SDValue Add = Op.getOperand(0); 931 if (Add.getOpcode() != ISD::ADD) 932 return SDValue(); 933 934 SDValue ExtOpA = Add.getOperand(0); 935 SDValue ExtOpB = Add.getOperand(1); 936 auto MatchOperands = [&](SDValue Op1, SDValue Op2, SDValue Op3) { 937 ConstantSDNode *ConstOp; 938 if ((ConstOp = isConstOrConstSplat(Op1, DemandedElts)) && 939 ConstOp->isOne()) { 940 ExtOpA = Op2; 941 ExtOpB = Op3; 942 return true; 943 } 944 if ((ConstOp = isConstOrConstSplat(Op2, DemandedElts)) && 945 ConstOp->isOne()) { 946 ExtOpA = Op1; 947 ExtOpB = Op3; 948 return true; 949 } 950 if ((ConstOp = isConstOrConstSplat(Op3, DemandedElts)) && 951 ConstOp->isOne()) { 952 ExtOpA = Op1; 953 ExtOpB = Op2; 954 return true; 955 } 956 return false; 957 }; 958 bool IsCeil = 959 (ExtOpA.getOpcode() == ISD::ADD && 960 MatchOperands(ExtOpA.getOperand(0), ExtOpA.getOperand(1), ExtOpB)) || 961 (ExtOpB.getOpcode() == ISD::ADD && 962 MatchOperands(ExtOpB.getOperand(0), ExtOpB.getOperand(1), ExtOpA)); 963 964 // If the shift is signed (sra): 965 // - Needs >= 2 sign bit for both operands. 966 // - Needs >= 2 zero bits. 967 // If the shift is unsigned (srl): 968 // - Needs >= 1 zero bit for both operands. 969 // - Needs 1 demanded bit zero and >= 2 sign bits. 970 unsigned ShiftOpc = Op.getOpcode(); 971 bool IsSigned = false; 972 unsigned KnownBits; 973 unsigned NumSignedA = DAG.ComputeNumSignBits(ExtOpA, DemandedElts, Depth); 974 unsigned NumSignedB = DAG.ComputeNumSignBits(ExtOpB, DemandedElts, Depth); 975 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1; 976 unsigned NumZeroA = 977 DAG.computeKnownBits(ExtOpA, DemandedElts, Depth).countMinLeadingZeros(); 978 unsigned NumZeroB = 979 DAG.computeKnownBits(ExtOpB, DemandedElts, Depth).countMinLeadingZeros(); 980 unsigned NumZero = std::min(NumZeroA, NumZeroB); 981 982 switch (ShiftOpc) { 983 default: 984 llvm_unreachable("Unexpected ShiftOpc in combineShiftToAVG"); 985 case ISD::SRA: { 986 if (NumZero >= 2 && NumSigned < NumZero) { 987 IsSigned = false; 988 KnownBits = NumZero; 989 break; 990 } 991 if (NumSigned >= 1) { 992 IsSigned = true; 993 KnownBits = NumSigned; 994 break; 995 } 996 return SDValue(); 997 } 998 case ISD::SRL: { 999 if (NumZero >= 1 && NumSigned < NumZero) { 1000 IsSigned = false; 1001 KnownBits = NumZero; 1002 break; 1003 } 1004 if (NumSigned >= 1 && DemandedBits.isSignBitClear()) { 1005 IsSigned = true; 1006 KnownBits = NumSigned; 1007 break; 1008 } 1009 return SDValue(); 1010 } 1011 } 1012 1013 unsigned AVGOpc = IsCeil ? (IsSigned ? ISD::AVGCEILS : ISD::AVGCEILU) 1014 : (IsSigned ? ISD::AVGFLOORS : ISD::AVGFLOORU); 1015 1016 // Find the smallest power-2 type that is legal for this vector size and 1017 // operation, given the original type size and the number of known sign/zero 1018 // bits. 1019 EVT VT = Op.getValueType(); 1020 unsigned MinWidth = 1021 std::max<unsigned>(VT.getScalarSizeInBits() - KnownBits, 8); 1022 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), PowerOf2Ceil(MinWidth)); 1023 if (VT.isVector()) 1024 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount()); 1025 if (!TLI.isOperationLegalOrCustom(AVGOpc, NVT)) 1026 return SDValue(); 1027 1028 SDLoc DL(Op); 1029 SDValue ResultAVG = 1030 DAG.getNode(AVGOpc, DL, NVT, DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpA), 1031 DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpB)); 1032 return DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, 1033 ResultAVG); 1034 } 1035 1036 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 1037 /// result of Op are ever used downstream. If we can use this information to 1038 /// simplify Op, create a new simplified DAG node and return true, returning the 1039 /// original and new nodes in Old and New. Otherwise, analyze the expression and 1040 /// return a mask of Known bits for the expression (used to simplify the 1041 /// caller). The Known bits may only be accurate for those bits in the 1042 /// OriginalDemandedBits and OriginalDemandedElts. 1043 bool TargetLowering::SimplifyDemandedBits( 1044 SDValue Op, const APInt &OriginalDemandedBits, 1045 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 1046 unsigned Depth, bool AssumeSingleUse) const { 1047 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 1048 assert(Op.getScalarValueSizeInBits() == BitWidth && 1049 "Mask size mismatches value type size!"); 1050 1051 // Don't know anything. 1052 Known = KnownBits(BitWidth); 1053 1054 // TODO: We can probably do more work on calculating the known bits and 1055 // simplifying the operations for scalable vectors, but for now we just 1056 // bail out. 1057 if (Op.getValueType().isScalableVector()) 1058 return false; 1059 1060 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 1061 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 1062 assert((!Op.getValueType().isVector() || 1063 NumElts == Op.getValueType().getVectorNumElements()) && 1064 "Unexpected vector size"); 1065 1066 APInt DemandedBits = OriginalDemandedBits; 1067 APInt DemandedElts = OriginalDemandedElts; 1068 SDLoc dl(Op); 1069 auto &DL = TLO.DAG.getDataLayout(); 1070 1071 // Undef operand. 1072 if (Op.isUndef()) 1073 return false; 1074 1075 if (Op.getOpcode() == ISD::Constant) { 1076 // We know all of the bits for a constant! 1077 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 1078 return false; 1079 } 1080 1081 if (Op.getOpcode() == ISD::ConstantFP) { 1082 // We know all of the bits for a floating point constant! 1083 Known = KnownBits::makeConstant( 1084 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 1085 return false; 1086 } 1087 1088 // Other users may use these bits. 1089 EVT VT = Op.getValueType(); 1090 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 1091 if (Depth != 0) { 1092 // If not at the root, Just compute the Known bits to 1093 // simplify things downstream. 1094 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1095 return false; 1096 } 1097 // If this is the root being simplified, allow it to have multiple uses, 1098 // just set the DemandedBits/Elts to all bits. 1099 DemandedBits = APInt::getAllOnes(BitWidth); 1100 DemandedElts = APInt::getAllOnes(NumElts); 1101 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 1102 // Not demanding any bits/elts from Op. 1103 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1104 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 1105 // Limit search depth. 1106 return false; 1107 } 1108 1109 KnownBits Known2; 1110 switch (Op.getOpcode()) { 1111 case ISD::TargetConstant: 1112 llvm_unreachable("Can't simplify this node"); 1113 case ISD::SCALAR_TO_VECTOR: { 1114 if (!DemandedElts[0]) 1115 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1116 1117 KnownBits SrcKnown; 1118 SDValue Src = Op.getOperand(0); 1119 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 1120 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 1121 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 1122 return true; 1123 1124 // Upper elements are undef, so only get the knownbits if we just demand 1125 // the bottom element. 1126 if (DemandedElts == 1) 1127 Known = SrcKnown.anyextOrTrunc(BitWidth); 1128 break; 1129 } 1130 case ISD::BUILD_VECTOR: 1131 // Collect the known bits that are shared by every demanded element. 1132 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 1133 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1134 return false; // Don't fall through, will infinitely loop. 1135 case ISD::LOAD: { 1136 auto *LD = cast<LoadSDNode>(Op); 1137 if (getTargetConstantFromLoad(LD)) { 1138 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1139 return false; // Don't fall through, will infinitely loop. 1140 } 1141 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 1142 // If this is a ZEXTLoad and we are looking at the loaded value. 1143 EVT MemVT = LD->getMemoryVT(); 1144 unsigned MemBits = MemVT.getScalarSizeInBits(); 1145 Known.Zero.setBitsFrom(MemBits); 1146 return false; // Don't fall through, will infinitely loop. 1147 } 1148 break; 1149 } 1150 case ISD::INSERT_VECTOR_ELT: { 1151 SDValue Vec = Op.getOperand(0); 1152 SDValue Scl = Op.getOperand(1); 1153 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 1154 EVT VecVT = Vec.getValueType(); 1155 1156 // If index isn't constant, assume we need all vector elements AND the 1157 // inserted element. 1158 APInt DemandedVecElts(DemandedElts); 1159 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 1160 unsigned Idx = CIdx->getZExtValue(); 1161 DemandedVecElts.clearBit(Idx); 1162 1163 // Inserted element is not required. 1164 if (!DemandedElts[Idx]) 1165 return TLO.CombineTo(Op, Vec); 1166 } 1167 1168 KnownBits KnownScl; 1169 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1170 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1171 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1172 return true; 1173 1174 Known = KnownScl.anyextOrTrunc(BitWidth); 1175 1176 KnownBits KnownVec; 1177 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1178 Depth + 1)) 1179 return true; 1180 1181 if (!!DemandedVecElts) 1182 Known = KnownBits::commonBits(Known, KnownVec); 1183 1184 return false; 1185 } 1186 case ISD::INSERT_SUBVECTOR: { 1187 // Demand any elements from the subvector and the remainder from the src its 1188 // inserted into. 1189 SDValue Src = Op.getOperand(0); 1190 SDValue Sub = Op.getOperand(1); 1191 uint64_t Idx = Op.getConstantOperandVal(2); 1192 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1193 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1194 APInt DemandedSrcElts = DemandedElts; 1195 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 1196 1197 KnownBits KnownSub, KnownSrc; 1198 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1199 Depth + 1)) 1200 return true; 1201 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1202 Depth + 1)) 1203 return true; 1204 1205 Known.Zero.setAllBits(); 1206 Known.One.setAllBits(); 1207 if (!!DemandedSubElts) 1208 Known = KnownBits::commonBits(Known, KnownSub); 1209 if (!!DemandedSrcElts) 1210 Known = KnownBits::commonBits(Known, KnownSrc); 1211 1212 // Attempt to avoid multi-use src if we don't need anything from it. 1213 if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() || 1214 !DemandedSrcElts.isAllOnes()) { 1215 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1216 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1217 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1218 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1219 if (NewSub || NewSrc) { 1220 NewSub = NewSub ? NewSub : Sub; 1221 NewSrc = NewSrc ? NewSrc : Src; 1222 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1223 Op.getOperand(2)); 1224 return TLO.CombineTo(Op, NewOp); 1225 } 1226 } 1227 break; 1228 } 1229 case ISD::EXTRACT_SUBVECTOR: { 1230 // Offset the demanded elts by the subvector index. 1231 SDValue Src = Op.getOperand(0); 1232 if (Src.getValueType().isScalableVector()) 1233 break; 1234 uint64_t Idx = Op.getConstantOperandVal(1); 1235 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1236 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1237 1238 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1239 Depth + 1)) 1240 return true; 1241 1242 // Attempt to avoid multi-use src if we don't need anything from it. 1243 if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 1244 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1245 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1246 if (DemandedSrc) { 1247 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1248 Op.getOperand(1)); 1249 return TLO.CombineTo(Op, NewOp); 1250 } 1251 } 1252 break; 1253 } 1254 case ISD::CONCAT_VECTORS: { 1255 Known.Zero.setAllBits(); 1256 Known.One.setAllBits(); 1257 EVT SubVT = Op.getOperand(0).getValueType(); 1258 unsigned NumSubVecs = Op.getNumOperands(); 1259 unsigned NumSubElts = SubVT.getVectorNumElements(); 1260 for (unsigned i = 0; i != NumSubVecs; ++i) { 1261 APInt DemandedSubElts = 1262 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1263 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1264 Known2, TLO, Depth + 1)) 1265 return true; 1266 // Known bits are shared by every demanded subvector element. 1267 if (!!DemandedSubElts) 1268 Known = KnownBits::commonBits(Known, Known2); 1269 } 1270 break; 1271 } 1272 case ISD::VECTOR_SHUFFLE: { 1273 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1274 1275 // Collect demanded elements from shuffle operands.. 1276 APInt DemandedLHS(NumElts, 0); 1277 APInt DemandedRHS(NumElts, 0); 1278 for (unsigned i = 0; i != NumElts; ++i) { 1279 if (!DemandedElts[i]) 1280 continue; 1281 int M = ShuffleMask[i]; 1282 if (M < 0) { 1283 // For UNDEF elements, we don't know anything about the common state of 1284 // the shuffle result. 1285 DemandedLHS.clearAllBits(); 1286 DemandedRHS.clearAllBits(); 1287 break; 1288 } 1289 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1290 if (M < (int)NumElts) 1291 DemandedLHS.setBit(M); 1292 else 1293 DemandedRHS.setBit(M - NumElts); 1294 } 1295 1296 if (!!DemandedLHS || !!DemandedRHS) { 1297 SDValue Op0 = Op.getOperand(0); 1298 SDValue Op1 = Op.getOperand(1); 1299 1300 Known.Zero.setAllBits(); 1301 Known.One.setAllBits(); 1302 if (!!DemandedLHS) { 1303 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1304 Depth + 1)) 1305 return true; 1306 Known = KnownBits::commonBits(Known, Known2); 1307 } 1308 if (!!DemandedRHS) { 1309 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1310 Depth + 1)) 1311 return true; 1312 Known = KnownBits::commonBits(Known, Known2); 1313 } 1314 1315 // Attempt to avoid multi-use ops if we don't need anything from them. 1316 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1317 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1318 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1319 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1320 if (DemandedOp0 || DemandedOp1) { 1321 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1322 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1323 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1324 return TLO.CombineTo(Op, NewOp); 1325 } 1326 } 1327 break; 1328 } 1329 case ISD::AND: { 1330 SDValue Op0 = Op.getOperand(0); 1331 SDValue Op1 = Op.getOperand(1); 1332 1333 // If the RHS is a constant, check to see if the LHS would be zero without 1334 // using the bits from the RHS. Below, we use knowledge about the RHS to 1335 // simplify the LHS, here we're using information from the LHS to simplify 1336 // the RHS. 1337 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1338 // Do not increment Depth here; that can cause an infinite loop. 1339 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1340 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1341 if ((LHSKnown.Zero & DemandedBits) == 1342 (~RHSC->getAPIntValue() & DemandedBits)) 1343 return TLO.CombineTo(Op, Op0); 1344 1345 // If any of the set bits in the RHS are known zero on the LHS, shrink 1346 // the constant. 1347 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1348 DemandedElts, TLO)) 1349 return true; 1350 1351 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1352 // constant, but if this 'and' is only clearing bits that were just set by 1353 // the xor, then this 'and' can be eliminated by shrinking the mask of 1354 // the xor. For example, for a 32-bit X: 1355 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1356 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1357 LHSKnown.One == ~RHSC->getAPIntValue()) { 1358 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1359 return TLO.CombineTo(Op, Xor); 1360 } 1361 } 1362 1363 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1364 Depth + 1)) 1365 return true; 1366 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1367 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1368 Known2, TLO, Depth + 1)) 1369 return true; 1370 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1371 1372 // Attempt to avoid multi-use ops if we don't need anything from them. 1373 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1374 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1375 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1376 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1377 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1378 if (DemandedOp0 || DemandedOp1) { 1379 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1380 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1381 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1382 return TLO.CombineTo(Op, NewOp); 1383 } 1384 } 1385 1386 // If all of the demanded bits are known one on one side, return the other. 1387 // These bits cannot contribute to the result of the 'and'. 1388 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1389 return TLO.CombineTo(Op, Op0); 1390 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1391 return TLO.CombineTo(Op, Op1); 1392 // If all of the demanded bits in the inputs are known zeros, return zero. 1393 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1394 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1395 // If the RHS is a constant, see if we can simplify it. 1396 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1397 TLO)) 1398 return true; 1399 // If the operation can be done in a smaller type, do so. 1400 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1401 return true; 1402 1403 Known &= Known2; 1404 break; 1405 } 1406 case ISD::OR: { 1407 SDValue Op0 = Op.getOperand(0); 1408 SDValue Op1 = Op.getOperand(1); 1409 1410 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1411 Depth + 1)) 1412 return true; 1413 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1414 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1415 Known2, TLO, Depth + 1)) 1416 return true; 1417 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1418 1419 // Attempt to avoid multi-use ops if we don't need anything from them. 1420 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1421 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1422 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1423 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1424 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1425 if (DemandedOp0 || DemandedOp1) { 1426 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1427 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1428 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1429 return TLO.CombineTo(Op, NewOp); 1430 } 1431 } 1432 1433 // If all of the demanded bits are known zero on one side, return the other. 1434 // These bits cannot contribute to the result of the 'or'. 1435 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1436 return TLO.CombineTo(Op, Op0); 1437 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1438 return TLO.CombineTo(Op, Op1); 1439 // If the RHS is a constant, see if we can simplify it. 1440 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1441 return true; 1442 // If the operation can be done in a smaller type, do so. 1443 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1444 return true; 1445 1446 Known |= Known2; 1447 break; 1448 } 1449 case ISD::XOR: { 1450 SDValue Op0 = Op.getOperand(0); 1451 SDValue Op1 = Op.getOperand(1); 1452 1453 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1454 Depth + 1)) 1455 return true; 1456 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1457 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1458 Depth + 1)) 1459 return true; 1460 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1461 1462 // Attempt to avoid multi-use ops if we don't need anything from them. 1463 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1464 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1465 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1466 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1467 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1468 if (DemandedOp0 || DemandedOp1) { 1469 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1470 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1471 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1472 return TLO.CombineTo(Op, NewOp); 1473 } 1474 } 1475 1476 // If all of the demanded bits are known zero on one side, return the other. 1477 // These bits cannot contribute to the result of the 'xor'. 1478 if (DemandedBits.isSubsetOf(Known.Zero)) 1479 return TLO.CombineTo(Op, Op0); 1480 if (DemandedBits.isSubsetOf(Known2.Zero)) 1481 return TLO.CombineTo(Op, Op1); 1482 // If the operation can be done in a smaller type, do so. 1483 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1484 return true; 1485 1486 // If all of the unknown bits are known to be zero on one side or the other 1487 // turn this into an *inclusive* or. 1488 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1489 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1490 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1491 1492 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1493 if (C) { 1494 // If one side is a constant, and all of the set bits in the constant are 1495 // also known set on the other side, turn this into an AND, as we know 1496 // the bits will be cleared. 1497 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1498 // NB: it is okay if more bits are known than are requested 1499 if (C->getAPIntValue() == Known2.One) { 1500 SDValue ANDC = 1501 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1502 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1503 } 1504 1505 // If the RHS is a constant, see if we can change it. Don't alter a -1 1506 // constant because that's a 'not' op, and that is better for combining 1507 // and codegen. 1508 if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) { 1509 // We're flipping all demanded bits. Flip the undemanded bits too. 1510 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1511 return TLO.CombineTo(Op, New); 1512 } 1513 } 1514 1515 // If we can't turn this into a 'not', try to shrink the constant. 1516 if (!C || !C->isAllOnes()) 1517 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1518 return true; 1519 1520 Known ^= Known2; 1521 break; 1522 } 1523 case ISD::SELECT: 1524 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1525 Depth + 1)) 1526 return true; 1527 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1528 Depth + 1)) 1529 return true; 1530 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1531 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1532 1533 // If the operands are constants, see if we can simplify them. 1534 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1535 return true; 1536 1537 // Only known if known in both the LHS and RHS. 1538 Known = KnownBits::commonBits(Known, Known2); 1539 break; 1540 case ISD::SELECT_CC: 1541 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1542 Depth + 1)) 1543 return true; 1544 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1545 Depth + 1)) 1546 return true; 1547 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1548 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1549 1550 // If the operands are constants, see if we can simplify them. 1551 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1552 return true; 1553 1554 // Only known if known in both the LHS and RHS. 1555 Known = KnownBits::commonBits(Known, Known2); 1556 break; 1557 case ISD::SETCC: { 1558 SDValue Op0 = Op.getOperand(0); 1559 SDValue Op1 = Op.getOperand(1); 1560 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1561 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1562 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1563 // -1, we may be able to bypass the setcc. 1564 if (DemandedBits.isSignMask() && 1565 Op0.getScalarValueSizeInBits() == BitWidth && 1566 getBooleanContents(Op0.getValueType()) == 1567 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1568 // If we're testing X < 0, then this compare isn't needed - just use X! 1569 // FIXME: We're limiting to integer types here, but this should also work 1570 // if we don't care about FP signed-zero. The use of SETLT with FP means 1571 // that we don't care about NaNs. 1572 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1573 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1574 return TLO.CombineTo(Op, Op0); 1575 1576 // TODO: Should we check for other forms of sign-bit comparisons? 1577 // Examples: X <= -1, X >= 0 1578 } 1579 if (getBooleanContents(Op0.getValueType()) == 1580 TargetLowering::ZeroOrOneBooleanContent && 1581 BitWidth > 1) 1582 Known.Zero.setBitsFrom(1); 1583 break; 1584 } 1585 case ISD::SHL: { 1586 SDValue Op0 = Op.getOperand(0); 1587 SDValue Op1 = Op.getOperand(1); 1588 EVT ShiftVT = Op1.getValueType(); 1589 1590 if (const APInt *SA = 1591 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1592 unsigned ShAmt = SA->getZExtValue(); 1593 if (ShAmt == 0) 1594 return TLO.CombineTo(Op, Op0); 1595 1596 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1597 // single shift. We can do this if the bottom bits (which are shifted 1598 // out) are never demanded. 1599 // TODO - support non-uniform vector amounts. 1600 if (Op0.getOpcode() == ISD::SRL) { 1601 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1602 if (const APInt *SA2 = 1603 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1604 unsigned C1 = SA2->getZExtValue(); 1605 unsigned Opc = ISD::SHL; 1606 int Diff = ShAmt - C1; 1607 if (Diff < 0) { 1608 Diff = -Diff; 1609 Opc = ISD::SRL; 1610 } 1611 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1612 return TLO.CombineTo( 1613 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1614 } 1615 } 1616 } 1617 1618 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1619 // are not demanded. This will likely allow the anyext to be folded away. 1620 // TODO - support non-uniform vector amounts. 1621 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1622 SDValue InnerOp = Op0.getOperand(0); 1623 EVT InnerVT = InnerOp.getValueType(); 1624 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1625 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1626 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1627 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1628 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1629 ShTy = InnerVT; 1630 SDValue NarrowShl = 1631 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1632 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1633 return TLO.CombineTo( 1634 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1635 } 1636 1637 // Repeat the SHL optimization above in cases where an extension 1638 // intervenes: (shl (anyext (shr x, c1)), c2) to 1639 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1640 // aren't demanded (as above) and that the shifted upper c1 bits of 1641 // x aren't demanded. 1642 // TODO - support non-uniform vector amounts. 1643 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1644 InnerOp.hasOneUse()) { 1645 if (const APInt *SA2 = 1646 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1647 unsigned InnerShAmt = SA2->getZExtValue(); 1648 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1649 DemandedBits.getActiveBits() <= 1650 (InnerBits - InnerShAmt + ShAmt) && 1651 DemandedBits.countTrailingZeros() >= ShAmt) { 1652 SDValue NewSA = 1653 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1654 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1655 InnerOp.getOperand(0)); 1656 return TLO.CombineTo( 1657 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1658 } 1659 } 1660 } 1661 } 1662 1663 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1664 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1665 Depth + 1)) 1666 return true; 1667 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1668 Known.Zero <<= ShAmt; 1669 Known.One <<= ShAmt; 1670 // low bits known zero. 1671 Known.Zero.setLowBits(ShAmt); 1672 1673 // Try shrinking the operation as long as the shift amount will still be 1674 // in range. 1675 if ((ShAmt < DemandedBits.getActiveBits()) && 1676 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1677 return true; 1678 } 1679 1680 // If we are only demanding sign bits then we can use the shift source 1681 // directly. 1682 if (const APInt *MaxSA = 1683 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1684 unsigned ShAmt = MaxSA->getZExtValue(); 1685 unsigned NumSignBits = 1686 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1687 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1688 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1689 return TLO.CombineTo(Op, Op0); 1690 } 1691 break; 1692 } 1693 case ISD::SRL: { 1694 SDValue Op0 = Op.getOperand(0); 1695 SDValue Op1 = Op.getOperand(1); 1696 EVT ShiftVT = Op1.getValueType(); 1697 1698 // Try to match AVG patterns. 1699 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1700 DemandedElts, Depth + 1)) 1701 return TLO.CombineTo(Op, AVG); 1702 1703 if (const APInt *SA = 1704 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1705 unsigned ShAmt = SA->getZExtValue(); 1706 if (ShAmt == 0) 1707 return TLO.CombineTo(Op, Op0); 1708 1709 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1710 // single shift. We can do this if the top bits (which are shifted out) 1711 // are never demanded. 1712 // TODO - support non-uniform vector amounts. 1713 if (Op0.getOpcode() == ISD::SHL) { 1714 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1715 if (const APInt *SA2 = 1716 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1717 unsigned C1 = SA2->getZExtValue(); 1718 unsigned Opc = ISD::SRL; 1719 int Diff = ShAmt - C1; 1720 if (Diff < 0) { 1721 Diff = -Diff; 1722 Opc = ISD::SHL; 1723 } 1724 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1725 return TLO.CombineTo( 1726 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1727 } 1728 } 1729 } 1730 1731 APInt InDemandedMask = (DemandedBits << ShAmt); 1732 1733 // If the shift is exact, then it does demand the low bits (and knows that 1734 // they are zero). 1735 if (Op->getFlags().hasExact()) 1736 InDemandedMask.setLowBits(ShAmt); 1737 1738 // Compute the new bits that are at the top now. 1739 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1740 Depth + 1)) 1741 return true; 1742 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1743 Known.Zero.lshrInPlace(ShAmt); 1744 Known.One.lshrInPlace(ShAmt); 1745 // High bits known zero. 1746 Known.Zero.setHighBits(ShAmt); 1747 } 1748 break; 1749 } 1750 case ISD::SRA: { 1751 SDValue Op0 = Op.getOperand(0); 1752 SDValue Op1 = Op.getOperand(1); 1753 EVT ShiftVT = Op1.getValueType(); 1754 1755 // If we only want bits that already match the signbit then we don't need 1756 // to shift. 1757 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1758 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1759 NumHiDemandedBits) 1760 return TLO.CombineTo(Op, Op0); 1761 1762 // If this is an arithmetic shift right and only the low-bit is set, we can 1763 // always convert this into a logical shr, even if the shift amount is 1764 // variable. The low bit of the shift cannot be an input sign bit unless 1765 // the shift amount is >= the size of the datatype, which is undefined. 1766 if (DemandedBits.isOne()) 1767 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1768 1769 // Try to match AVG patterns. 1770 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1771 DemandedElts, Depth + 1)) 1772 return TLO.CombineTo(Op, AVG); 1773 1774 if (const APInt *SA = 1775 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1776 unsigned ShAmt = SA->getZExtValue(); 1777 if (ShAmt == 0) 1778 return TLO.CombineTo(Op, Op0); 1779 1780 APInt InDemandedMask = (DemandedBits << ShAmt); 1781 1782 // If the shift is exact, then it does demand the low bits (and knows that 1783 // they are zero). 1784 if (Op->getFlags().hasExact()) 1785 InDemandedMask.setLowBits(ShAmt); 1786 1787 // If any of the demanded bits are produced by the sign extension, we also 1788 // demand the input sign bit. 1789 if (DemandedBits.countLeadingZeros() < ShAmt) 1790 InDemandedMask.setSignBit(); 1791 1792 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1793 Depth + 1)) 1794 return true; 1795 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1796 Known.Zero.lshrInPlace(ShAmt); 1797 Known.One.lshrInPlace(ShAmt); 1798 1799 // If the input sign bit is known to be zero, or if none of the top bits 1800 // are demanded, turn this into an unsigned shift right. 1801 if (Known.Zero[BitWidth - ShAmt - 1] || 1802 DemandedBits.countLeadingZeros() >= ShAmt) { 1803 SDNodeFlags Flags; 1804 Flags.setExact(Op->getFlags().hasExact()); 1805 return TLO.CombineTo( 1806 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1807 } 1808 1809 int Log2 = DemandedBits.exactLogBase2(); 1810 if (Log2 >= 0) { 1811 // The bit must come from the sign. 1812 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1813 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1814 } 1815 1816 if (Known.One[BitWidth - ShAmt - 1]) 1817 // New bits are known one. 1818 Known.One.setHighBits(ShAmt); 1819 1820 // Attempt to avoid multi-use ops if we don't need anything from them. 1821 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1822 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1823 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1824 if (DemandedOp0) { 1825 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1826 return TLO.CombineTo(Op, NewOp); 1827 } 1828 } 1829 } 1830 break; 1831 } 1832 case ISD::FSHL: 1833 case ISD::FSHR: { 1834 SDValue Op0 = Op.getOperand(0); 1835 SDValue Op1 = Op.getOperand(1); 1836 SDValue Op2 = Op.getOperand(2); 1837 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1838 1839 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1840 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1841 1842 // For fshl, 0-shift returns the 1st arg. 1843 // For fshr, 0-shift returns the 2nd arg. 1844 if (Amt == 0) { 1845 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1846 Known, TLO, Depth + 1)) 1847 return true; 1848 break; 1849 } 1850 1851 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1852 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1853 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1854 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1855 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1856 Depth + 1)) 1857 return true; 1858 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1859 Depth + 1)) 1860 return true; 1861 1862 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1863 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1864 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1865 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1866 Known.One |= Known2.One; 1867 Known.Zero |= Known2.Zero; 1868 } 1869 1870 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1871 if (isPowerOf2_32(BitWidth)) { 1872 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1873 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1874 Known2, TLO, Depth + 1)) 1875 return true; 1876 } 1877 break; 1878 } 1879 case ISD::ROTL: 1880 case ISD::ROTR: { 1881 SDValue Op0 = Op.getOperand(0); 1882 SDValue Op1 = Op.getOperand(1); 1883 bool IsROTL = (Op.getOpcode() == ISD::ROTL); 1884 1885 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1886 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1887 return TLO.CombineTo(Op, Op0); 1888 1889 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1890 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1891 unsigned RevAmt = BitWidth - Amt; 1892 1893 // rotl: (Op0 << Amt) | (Op0 >> (BW - Amt)) 1894 // rotr: (Op0 << (BW - Amt)) | (Op0 >> Amt) 1895 APInt Demanded0 = DemandedBits.rotr(IsROTL ? Amt : RevAmt); 1896 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1897 Depth + 1)) 1898 return true; 1899 1900 // rot*(x, 0) --> x 1901 if (Amt == 0) 1902 return TLO.CombineTo(Op, Op0); 1903 1904 // See if we don't demand either half of the rotated bits. 1905 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SHL, VT)) && 1906 DemandedBits.countTrailingZeros() >= (IsROTL ? Amt : RevAmt)) { 1907 Op1 = TLO.DAG.getConstant(IsROTL ? Amt : RevAmt, dl, Op1.getValueType()); 1908 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, Op1)); 1909 } 1910 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT)) && 1911 DemandedBits.countLeadingZeros() >= (IsROTL ? RevAmt : Amt)) { 1912 Op1 = TLO.DAG.getConstant(IsROTL ? RevAmt : Amt, dl, Op1.getValueType()); 1913 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1914 } 1915 } 1916 1917 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1918 if (isPowerOf2_32(BitWidth)) { 1919 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1920 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1921 Depth + 1)) 1922 return true; 1923 } 1924 break; 1925 } 1926 case ISD::UMIN: { 1927 // Check if one arg is always less than (or equal) to the other arg. 1928 SDValue Op0 = Op.getOperand(0); 1929 SDValue Op1 = Op.getOperand(1); 1930 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1931 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1932 Known = KnownBits::umin(Known0, Known1); 1933 if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 1934 return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1); 1935 if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 1936 return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1); 1937 break; 1938 } 1939 case ISD::UMAX: { 1940 // Check if one arg is always greater than (or equal) to the other arg. 1941 SDValue Op0 = Op.getOperand(0); 1942 SDValue Op1 = Op.getOperand(1); 1943 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1944 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1945 Known = KnownBits::umax(Known0, Known1); 1946 if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 1947 return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1); 1948 if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 1949 return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1); 1950 break; 1951 } 1952 case ISD::BITREVERSE: { 1953 SDValue Src = Op.getOperand(0); 1954 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1955 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1956 Depth + 1)) 1957 return true; 1958 Known.One = Known2.One.reverseBits(); 1959 Known.Zero = Known2.Zero.reverseBits(); 1960 break; 1961 } 1962 case ISD::BSWAP: { 1963 SDValue Src = Op.getOperand(0); 1964 1965 // If the only bits demanded come from one byte of the bswap result, 1966 // just shift the input byte into position to eliminate the bswap. 1967 unsigned NLZ = DemandedBits.countLeadingZeros(); 1968 unsigned NTZ = DemandedBits.countTrailingZeros(); 1969 1970 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 1971 // we need all the bits down to bit 8. Likewise, round NLZ. If we 1972 // have 14 leading zeros, round to 8. 1973 NLZ = alignDown(NLZ, 8); 1974 NTZ = alignDown(NTZ, 8); 1975 // If we need exactly one byte, we can do this transformation. 1976 if (BitWidth - NLZ - NTZ == 8) { 1977 // Replace this with either a left or right shift to get the byte into 1978 // the right place. 1979 unsigned ShiftOpcode = NLZ > NTZ ? ISD::SRL : ISD::SHL; 1980 if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) { 1981 EVT ShiftAmtTy = getShiftAmountTy(VT, DL); 1982 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ; 1983 SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy); 1984 SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt); 1985 return TLO.CombineTo(Op, NewOp); 1986 } 1987 } 1988 1989 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1990 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1991 Depth + 1)) 1992 return true; 1993 Known.One = Known2.One.byteSwap(); 1994 Known.Zero = Known2.Zero.byteSwap(); 1995 break; 1996 } 1997 case ISD::CTPOP: { 1998 // If only 1 bit is demanded, replace with PARITY as long as we're before 1999 // op legalization. 2000 // FIXME: Limit to scalars for now. 2001 if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) 2002 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 2003 Op.getOperand(0))); 2004 2005 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2006 break; 2007 } 2008 case ISD::SIGN_EXTEND_INREG: { 2009 SDValue Op0 = Op.getOperand(0); 2010 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2011 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 2012 2013 // If we only care about the highest bit, don't bother shifting right. 2014 if (DemandedBits.isSignMask()) { 2015 unsigned MinSignedBits = 2016 TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1); 2017 bool AlreadySignExtended = ExVTBits >= MinSignedBits; 2018 // However if the input is already sign extended we expect the sign 2019 // extension to be dropped altogether later and do not simplify. 2020 if (!AlreadySignExtended) { 2021 // Compute the correct shift amount type, which must be getShiftAmountTy 2022 // for scalar types after legalization. 2023 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl, 2024 getShiftAmountTy(VT, DL)); 2025 return TLO.CombineTo(Op, 2026 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 2027 } 2028 } 2029 2030 // If none of the extended bits are demanded, eliminate the sextinreg. 2031 if (DemandedBits.getActiveBits() <= ExVTBits) 2032 return TLO.CombineTo(Op, Op0); 2033 2034 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 2035 2036 // Since the sign extended bits are demanded, we know that the sign 2037 // bit is demanded. 2038 InputDemandedBits.setBit(ExVTBits - 1); 2039 2040 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 2041 return true; 2042 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2043 2044 // If the sign bit of the input is known set or clear, then we know the 2045 // top bits of the result. 2046 2047 // If the input sign bit is known zero, convert this into a zero extension. 2048 if (Known.Zero[ExVTBits - 1]) 2049 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 2050 2051 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 2052 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 2053 Known.One.setBitsFrom(ExVTBits); 2054 Known.Zero &= Mask; 2055 } else { // Input sign bit unknown 2056 Known.Zero &= Mask; 2057 Known.One &= Mask; 2058 } 2059 break; 2060 } 2061 case ISD::BUILD_PAIR: { 2062 EVT HalfVT = Op.getOperand(0).getValueType(); 2063 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 2064 2065 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 2066 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 2067 2068 KnownBits KnownLo, KnownHi; 2069 2070 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 2071 return true; 2072 2073 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 2074 return true; 2075 2076 Known.Zero = KnownLo.Zero.zext(BitWidth) | 2077 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 2078 2079 Known.One = KnownLo.One.zext(BitWidth) | 2080 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 2081 break; 2082 } 2083 case ISD::ZERO_EXTEND: 2084 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2085 SDValue Src = Op.getOperand(0); 2086 EVT SrcVT = Src.getValueType(); 2087 unsigned InBits = SrcVT.getScalarSizeInBits(); 2088 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2089 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 2090 2091 // If none of the top bits are demanded, convert this into an any_extend. 2092 if (DemandedBits.getActiveBits() <= InBits) { 2093 // If we only need the non-extended bits of the bottom element 2094 // then we can just bitcast to the result. 2095 if (IsLE && IsVecInReg && DemandedElts == 1 && 2096 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2097 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2098 2099 unsigned Opc = 2100 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2101 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2102 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2103 } 2104 2105 APInt InDemandedBits = DemandedBits.trunc(InBits); 2106 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 2107 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2108 Depth + 1)) 2109 return true; 2110 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2111 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2112 Known = Known.zext(BitWidth); 2113 2114 // Attempt to avoid multi-use ops if we don't need anything from them. 2115 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2116 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2117 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2118 break; 2119 } 2120 case ISD::SIGN_EXTEND: 2121 case ISD::SIGN_EXTEND_VECTOR_INREG: { 2122 SDValue Src = Op.getOperand(0); 2123 EVT SrcVT = Src.getValueType(); 2124 unsigned InBits = SrcVT.getScalarSizeInBits(); 2125 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2126 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 2127 2128 // If none of the top bits are demanded, convert this into an any_extend. 2129 if (DemandedBits.getActiveBits() <= InBits) { 2130 // If we only need the non-extended bits of the bottom element 2131 // then we can just bitcast to the result. 2132 if (IsLE && IsVecInReg && DemandedElts == 1 && 2133 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2134 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2135 2136 unsigned Opc = 2137 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2138 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2139 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2140 } 2141 2142 APInt InDemandedBits = DemandedBits.trunc(InBits); 2143 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 2144 2145 // Since some of the sign extended bits are demanded, we know that the sign 2146 // bit is demanded. 2147 InDemandedBits.setBit(InBits - 1); 2148 2149 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2150 Depth + 1)) 2151 return true; 2152 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2153 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2154 2155 // If the sign bit is known one, the top bits match. 2156 Known = Known.sext(BitWidth); 2157 2158 // If the sign bit is known zero, convert this to a zero extend. 2159 if (Known.isNonNegative()) { 2160 unsigned Opc = 2161 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 2162 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2163 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2164 } 2165 2166 // Attempt to avoid multi-use ops if we don't need anything from them. 2167 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2168 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2169 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2170 break; 2171 } 2172 case ISD::ANY_EXTEND: 2173 case ISD::ANY_EXTEND_VECTOR_INREG: { 2174 SDValue Src = Op.getOperand(0); 2175 EVT SrcVT = Src.getValueType(); 2176 unsigned InBits = SrcVT.getScalarSizeInBits(); 2177 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2178 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 2179 2180 // If we only need the bottom element then we can just bitcast. 2181 // TODO: Handle ANY_EXTEND? 2182 if (IsLE && IsVecInReg && DemandedElts == 1 && 2183 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2184 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2185 2186 APInt InDemandedBits = DemandedBits.trunc(InBits); 2187 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 2188 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2189 Depth + 1)) 2190 return true; 2191 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2192 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2193 Known = Known.anyext(BitWidth); 2194 2195 // Attempt to avoid multi-use ops if we don't need anything from them. 2196 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2197 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2198 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2199 break; 2200 } 2201 case ISD::TRUNCATE: { 2202 SDValue Src = Op.getOperand(0); 2203 2204 // Simplify the input, using demanded bit information, and compute the known 2205 // zero/one bits live out. 2206 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 2207 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 2208 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 2209 Depth + 1)) 2210 return true; 2211 Known = Known.trunc(BitWidth); 2212 2213 // Attempt to avoid multi-use ops if we don't need anything from them. 2214 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2215 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 2216 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 2217 2218 // If the input is only used by this truncate, see if we can shrink it based 2219 // on the known demanded bits. 2220 if (Src.getNode()->hasOneUse()) { 2221 switch (Src.getOpcode()) { 2222 default: 2223 break; 2224 case ISD::SRL: 2225 // Shrink SRL by a constant if none of the high bits shifted in are 2226 // demanded. 2227 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2228 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2229 // undesirable. 2230 break; 2231 2232 const APInt *ShAmtC = 2233 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2234 if (!ShAmtC || ShAmtC->uge(BitWidth)) 2235 break; 2236 uint64_t ShVal = ShAmtC->getZExtValue(); 2237 2238 APInt HighBits = 2239 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2240 HighBits.lshrInPlace(ShVal); 2241 HighBits = HighBits.trunc(BitWidth); 2242 2243 if (!(HighBits & DemandedBits)) { 2244 // None of the shifted in bits are needed. Add a truncate of the 2245 // shift input, then shift it. 2246 SDValue NewShAmt = TLO.DAG.getConstant( 2247 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2248 SDValue NewTrunc = 2249 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2250 return TLO.CombineTo( 2251 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2252 } 2253 break; 2254 } 2255 } 2256 2257 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2258 break; 2259 } 2260 case ISD::AssertZext: { 2261 // AssertZext demands all of the high bits, plus any of the low bits 2262 // demanded by its users. 2263 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2264 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2265 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2266 TLO, Depth + 1)) 2267 return true; 2268 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2269 2270 Known.Zero |= ~InMask; 2271 break; 2272 } 2273 case ISD::EXTRACT_VECTOR_ELT: { 2274 SDValue Src = Op.getOperand(0); 2275 SDValue Idx = Op.getOperand(1); 2276 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2277 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2278 2279 if (SrcEltCnt.isScalable()) 2280 return false; 2281 2282 // Demand the bits from every vector element without a constant index. 2283 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2284 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); 2285 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2286 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2287 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2288 2289 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2290 // anything about the extended bits. 2291 APInt DemandedSrcBits = DemandedBits; 2292 if (BitWidth > EltBitWidth) 2293 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2294 2295 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2296 Depth + 1)) 2297 return true; 2298 2299 // Attempt to avoid multi-use ops if we don't need anything from them. 2300 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2301 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2302 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2303 SDValue NewOp = 2304 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2305 return TLO.CombineTo(Op, NewOp); 2306 } 2307 } 2308 2309 Known = Known2; 2310 if (BitWidth > EltBitWidth) 2311 Known = Known.anyext(BitWidth); 2312 break; 2313 } 2314 case ISD::BITCAST: { 2315 SDValue Src = Op.getOperand(0); 2316 EVT SrcVT = Src.getValueType(); 2317 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2318 2319 // If this is an FP->Int bitcast and if the sign bit is the only 2320 // thing demanded, turn this into a FGETSIGN. 2321 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2322 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2323 SrcVT.isFloatingPoint()) { 2324 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2325 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2326 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2327 SrcVT != MVT::f128) { 2328 // Cannot eliminate/lower SHL for f128 yet. 2329 EVT Ty = OpVTLegal ? VT : MVT::i32; 2330 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2331 // place. We expect the SHL to be eliminated by other optimizations. 2332 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2333 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2334 if (!OpVTLegal && OpVTSizeInBits > 32) 2335 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2336 unsigned ShVal = Op.getValueSizeInBits() - 1; 2337 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2338 return TLO.CombineTo(Op, 2339 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2340 } 2341 } 2342 2343 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2344 // Demand the elt/bit if any of the original elts/bits are demanded. 2345 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0) { 2346 unsigned Scale = BitWidth / NumSrcEltBits; 2347 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2348 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2349 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2350 for (unsigned i = 0; i != Scale; ++i) { 2351 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 2352 unsigned BitOffset = EltOffset * NumSrcEltBits; 2353 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 2354 if (!Sub.isZero()) { 2355 DemandedSrcBits |= Sub; 2356 for (unsigned j = 0; j != NumElts; ++j) 2357 if (DemandedElts[j]) 2358 DemandedSrcElts.setBit((j * Scale) + i); 2359 } 2360 } 2361 2362 APInt KnownSrcUndef, KnownSrcZero; 2363 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2364 KnownSrcZero, TLO, Depth + 1)) 2365 return true; 2366 2367 KnownBits KnownSrcBits; 2368 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2369 KnownSrcBits, TLO, Depth + 1)) 2370 return true; 2371 } else if (IsLE && (NumSrcEltBits % BitWidth) == 0) { 2372 // TODO - bigendian once we have test coverage. 2373 unsigned Scale = NumSrcEltBits / BitWidth; 2374 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2375 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2376 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2377 for (unsigned i = 0; i != NumElts; ++i) 2378 if (DemandedElts[i]) { 2379 unsigned Offset = (i % Scale) * BitWidth; 2380 DemandedSrcBits.insertBits(DemandedBits, Offset); 2381 DemandedSrcElts.setBit(i / Scale); 2382 } 2383 2384 if (SrcVT.isVector()) { 2385 APInt KnownSrcUndef, KnownSrcZero; 2386 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2387 KnownSrcZero, TLO, Depth + 1)) 2388 return true; 2389 } 2390 2391 KnownBits KnownSrcBits; 2392 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2393 KnownSrcBits, TLO, Depth + 1)) 2394 return true; 2395 } 2396 2397 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2398 // recursive call where Known may be useful to the caller. 2399 if (Depth > 0) { 2400 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2401 return false; 2402 } 2403 break; 2404 } 2405 case ISD::MUL: 2406 if (DemandedBits.isPowerOf2()) { 2407 // The LSB of X*Y is set only if (X & 1) == 1 and (Y & 1) == 1. 2408 // If we demand exactly one bit N and we have "X * (C' << N)" where C' is 2409 // odd (has LSB set), then the left-shifted low bit of X is the answer. 2410 unsigned CTZ = DemandedBits.countTrailingZeros(); 2411 ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 2412 if (C && C->getAPIntValue().countTrailingZeros() == CTZ) { 2413 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2414 SDValue AmtC = TLO.DAG.getConstant(CTZ, dl, ShiftAmtTy); 2415 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, Op.getOperand(0), AmtC); 2416 return TLO.CombineTo(Op, Shl); 2417 } 2418 } 2419 // For a squared value "X * X", the bottom 2 bits are 0 and X[0] because: 2420 // X * X is odd iff X is odd. 2421 // 'Quadratic Reciprocity': X * X -> 0 for bit[1] 2422 if (Op.getOperand(0) == Op.getOperand(1) && DemandedBits.ult(4)) { 2423 SDValue One = TLO.DAG.getConstant(1, dl, VT); 2424 SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One); 2425 return TLO.CombineTo(Op, And1); 2426 } 2427 LLVM_FALLTHROUGH; 2428 case ISD::ADD: 2429 case ISD::SUB: { 2430 // Add, Sub, and Mul don't demand any bits in positions beyond that 2431 // of the highest bit demanded of them. 2432 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2433 SDNodeFlags Flags = Op.getNode()->getFlags(); 2434 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2435 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2436 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2437 Depth + 1) || 2438 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2439 Depth + 1) || 2440 // See if the operation should be performed at a smaller bit width. 2441 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2442 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2443 // Disable the nsw and nuw flags. We can no longer guarantee that we 2444 // won't wrap after simplification. 2445 Flags.setNoSignedWrap(false); 2446 Flags.setNoUnsignedWrap(false); 2447 SDValue NewOp = 2448 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2449 return TLO.CombineTo(Op, NewOp); 2450 } 2451 return true; 2452 } 2453 2454 // Attempt to avoid multi-use ops if we don't need anything from them. 2455 if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { 2456 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2457 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2458 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2459 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2460 if (DemandedOp0 || DemandedOp1) { 2461 Flags.setNoSignedWrap(false); 2462 Flags.setNoUnsignedWrap(false); 2463 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2464 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2465 SDValue NewOp = 2466 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2467 return TLO.CombineTo(Op, NewOp); 2468 } 2469 } 2470 2471 // If we have a constant operand, we may be able to turn it into -1 if we 2472 // do not demand the high bits. This can make the constant smaller to 2473 // encode, allow more general folding, or match specialized instruction 2474 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2475 // is probably not useful (and could be detrimental). 2476 ConstantSDNode *C = isConstOrConstSplat(Op1); 2477 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2478 if (C && !C->isAllOnes() && !C->isOne() && 2479 (C->getAPIntValue() | HighMask).isAllOnes()) { 2480 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2481 // Disable the nsw and nuw flags. We can no longer guarantee that we 2482 // won't wrap after simplification. 2483 Flags.setNoSignedWrap(false); 2484 Flags.setNoUnsignedWrap(false); 2485 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2486 return TLO.CombineTo(Op, NewOp); 2487 } 2488 2489 // Match a multiply with a disguised negated-power-of-2 and convert to a 2490 // an equivalent shift-left amount. 2491 // Example: (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2492 auto getShiftLeftAmt = [&HighMask](SDValue Mul) -> unsigned { 2493 if (Mul.getOpcode() != ISD::MUL || !Mul.hasOneUse()) 2494 return 0; 2495 2496 // Don't touch opaque constants. Also, ignore zero and power-of-2 2497 // multiplies. Those will get folded later. 2498 ConstantSDNode *MulC = isConstOrConstSplat(Mul.getOperand(1)); 2499 if (MulC && !MulC->isOpaque() && !MulC->isZero() && 2500 !MulC->getAPIntValue().isPowerOf2()) { 2501 APInt UnmaskedC = MulC->getAPIntValue() | HighMask; 2502 if (UnmaskedC.isNegatedPowerOf2()) 2503 return (-UnmaskedC).logBase2(); 2504 } 2505 return 0; 2506 }; 2507 2508 auto foldMul = [&](SDValue X, SDValue Y, unsigned ShlAmt) { 2509 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2510 SDValue ShlAmtC = TLO.DAG.getConstant(ShlAmt, dl, ShiftAmtTy); 2511 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, X, ShlAmtC); 2512 SDValue Sub = TLO.DAG.getNode(ISD::SUB, dl, VT, Y, Shl); 2513 return TLO.CombineTo(Op, Sub); 2514 }; 2515 2516 if (isOperationLegalOrCustom(ISD::SHL, VT)) { 2517 if (Op.getOpcode() == ISD::ADD) { 2518 // (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2519 if (unsigned ShAmt = getShiftLeftAmt(Op0)) 2520 return foldMul(Op0.getOperand(0), Op1, ShAmt); 2521 // Op0 + (X * MulC) --> Op0 - (X << log2(-MulC)) 2522 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2523 return foldMul(Op1.getOperand(0), Op0, ShAmt); 2524 // TODO: 2525 // Op0 - (X * MulC) --> Op0 + (X << log2(-MulC)) 2526 } 2527 } 2528 2529 LLVM_FALLTHROUGH; 2530 } 2531 default: 2532 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2533 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2534 Known, TLO, Depth)) 2535 return true; 2536 break; 2537 } 2538 2539 // Just use computeKnownBits to compute output bits. 2540 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2541 break; 2542 } 2543 2544 // If we know the value of all of the demanded bits, return this as a 2545 // constant. 2546 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2547 // Avoid folding to a constant if any OpaqueConstant is involved. 2548 const SDNode *N = Op.getNode(); 2549 for (SDNode *Op : 2550 llvm::make_range(SDNodeIterator::begin(N), SDNodeIterator::end(N))) { 2551 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2552 if (C->isOpaque()) 2553 return false; 2554 } 2555 if (VT.isInteger()) 2556 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2557 if (VT.isFloatingPoint()) 2558 return TLO.CombineTo( 2559 Op, 2560 TLO.DAG.getConstantFP( 2561 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2562 } 2563 2564 return false; 2565 } 2566 2567 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2568 const APInt &DemandedElts, 2569 DAGCombinerInfo &DCI) const { 2570 SelectionDAG &DAG = DCI.DAG; 2571 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2572 !DCI.isBeforeLegalizeOps()); 2573 2574 APInt KnownUndef, KnownZero; 2575 bool Simplified = 2576 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2577 if (Simplified) { 2578 DCI.AddToWorklist(Op.getNode()); 2579 DCI.CommitTargetLoweringOpt(TLO); 2580 } 2581 2582 return Simplified; 2583 } 2584 2585 /// Given a vector binary operation and known undefined elements for each input 2586 /// operand, compute whether each element of the output is undefined. 2587 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2588 const APInt &UndefOp0, 2589 const APInt &UndefOp1) { 2590 EVT VT = BO.getValueType(); 2591 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2592 "Vector binop only"); 2593 2594 EVT EltVT = VT.getVectorElementType(); 2595 unsigned NumElts = VT.getVectorNumElements(); 2596 assert(UndefOp0.getBitWidth() == NumElts && 2597 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2598 2599 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2600 const APInt &UndefVals) { 2601 if (UndefVals[Index]) 2602 return DAG.getUNDEF(EltVT); 2603 2604 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2605 // Try hard to make sure that the getNode() call is not creating temporary 2606 // nodes. Ignore opaque integers because they do not constant fold. 2607 SDValue Elt = BV->getOperand(Index); 2608 auto *C = dyn_cast<ConstantSDNode>(Elt); 2609 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2610 return Elt; 2611 } 2612 2613 return SDValue(); 2614 }; 2615 2616 APInt KnownUndef = APInt::getZero(NumElts); 2617 for (unsigned i = 0; i != NumElts; ++i) { 2618 // If both inputs for this element are either constant or undef and match 2619 // the element type, compute the constant/undef result for this element of 2620 // the vector. 2621 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2622 // not handle FP constants. The code within getNode() should be refactored 2623 // to avoid the danger of creating a bogus temporary node here. 2624 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2625 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2626 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2627 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2628 KnownUndef.setBit(i); 2629 } 2630 return KnownUndef; 2631 } 2632 2633 bool TargetLowering::SimplifyDemandedVectorElts( 2634 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2635 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2636 bool AssumeSingleUse) const { 2637 EVT VT = Op.getValueType(); 2638 unsigned Opcode = Op.getOpcode(); 2639 APInt DemandedElts = OriginalDemandedElts; 2640 unsigned NumElts = DemandedElts.getBitWidth(); 2641 assert(VT.isVector() && "Expected vector op"); 2642 2643 KnownUndef = KnownZero = APInt::getZero(NumElts); 2644 2645 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 2646 if (!TLI.shouldSimplifyDemandedVectorElts(Op, TLO)) 2647 return false; 2648 2649 // TODO: For now we assume we know nothing about scalable vectors. 2650 if (VT.isScalableVector()) 2651 return false; 2652 2653 assert(VT.getVectorNumElements() == NumElts && 2654 "Mask size mismatches value type element count!"); 2655 2656 // Undef operand. 2657 if (Op.isUndef()) { 2658 KnownUndef.setAllBits(); 2659 return false; 2660 } 2661 2662 // If Op has other users, assume that all elements are needed. 2663 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2664 DemandedElts.setAllBits(); 2665 2666 // Not demanding any elements from Op. 2667 if (DemandedElts == 0) { 2668 KnownUndef.setAllBits(); 2669 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2670 } 2671 2672 // Limit search depth. 2673 if (Depth >= SelectionDAG::MaxRecursionDepth) 2674 return false; 2675 2676 SDLoc DL(Op); 2677 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2678 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 2679 2680 // Helper for demanding the specified elements and all the bits of both binary 2681 // operands. 2682 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2683 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2684 TLO.DAG, Depth + 1); 2685 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2686 TLO.DAG, Depth + 1); 2687 if (NewOp0 || NewOp1) { 2688 SDValue NewOp = TLO.DAG.getNode( 2689 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2690 return TLO.CombineTo(Op, NewOp); 2691 } 2692 return false; 2693 }; 2694 2695 switch (Opcode) { 2696 case ISD::SCALAR_TO_VECTOR: { 2697 if (!DemandedElts[0]) { 2698 KnownUndef.setAllBits(); 2699 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2700 } 2701 SDValue ScalarSrc = Op.getOperand(0); 2702 if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2703 SDValue Src = ScalarSrc.getOperand(0); 2704 SDValue Idx = ScalarSrc.getOperand(1); 2705 EVT SrcVT = Src.getValueType(); 2706 2707 ElementCount SrcEltCnt = SrcVT.getVectorElementCount(); 2708 2709 if (SrcEltCnt.isScalable()) 2710 return false; 2711 2712 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2713 if (isNullConstant(Idx)) { 2714 APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0); 2715 APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts); 2716 APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts); 2717 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2718 TLO, Depth + 1)) 2719 return true; 2720 } 2721 } 2722 KnownUndef.setHighBits(NumElts - 1); 2723 break; 2724 } 2725 case ISD::BITCAST: { 2726 SDValue Src = Op.getOperand(0); 2727 EVT SrcVT = Src.getValueType(); 2728 2729 // We only handle vectors here. 2730 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2731 if (!SrcVT.isVector()) 2732 break; 2733 2734 // Fast handling of 'identity' bitcasts. 2735 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2736 if (NumSrcElts == NumElts) 2737 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2738 KnownZero, TLO, Depth + 1); 2739 2740 APInt SrcDemandedElts, SrcZero, SrcUndef; 2741 2742 // Bitcast from 'large element' src vector to 'small element' vector, we 2743 // must demand a source element if any DemandedElt maps to it. 2744 if ((NumElts % NumSrcElts) == 0) { 2745 unsigned Scale = NumElts / NumSrcElts; 2746 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2747 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2748 TLO, Depth + 1)) 2749 return true; 2750 2751 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2752 // of the large element. 2753 // TODO - bigendian once we have test coverage. 2754 if (IsLE) { 2755 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2756 APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits); 2757 for (unsigned i = 0; i != NumElts; ++i) 2758 if (DemandedElts[i]) { 2759 unsigned Ofs = (i % Scale) * EltSizeInBits; 2760 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2761 } 2762 2763 KnownBits Known; 2764 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2765 TLO, Depth + 1)) 2766 return true; 2767 2768 // The bitcast has split each wide element into a number of 2769 // narrow subelements. We have just computed the Known bits 2770 // for wide elements. See if element splitting results in 2771 // some subelements being zero. Only for demanded elements! 2772 for (unsigned SubElt = 0; SubElt != Scale; ++SubElt) { 2773 if (!Known.Zero.extractBits(EltSizeInBits, SubElt * EltSizeInBits) 2774 .isAllOnes()) 2775 continue; 2776 for (unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) { 2777 unsigned Elt = Scale * SrcElt + SubElt; 2778 if (DemandedElts[Elt]) 2779 KnownZero.setBit(Elt); 2780 } 2781 } 2782 } 2783 2784 // If the src element is zero/undef then all the output elements will be - 2785 // only demanded elements are guaranteed to be correct. 2786 for (unsigned i = 0; i != NumSrcElts; ++i) { 2787 if (SrcDemandedElts[i]) { 2788 if (SrcZero[i]) 2789 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2790 if (SrcUndef[i]) 2791 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2792 } 2793 } 2794 } 2795 2796 // Bitcast from 'small element' src vector to 'large element' vector, we 2797 // demand all smaller source elements covered by the larger demanded element 2798 // of this vector. 2799 if ((NumSrcElts % NumElts) == 0) { 2800 unsigned Scale = NumSrcElts / NumElts; 2801 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2802 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2803 TLO, Depth + 1)) 2804 return true; 2805 2806 // If all the src elements covering an output element are zero/undef, then 2807 // the output element will be as well, assuming it was demanded. 2808 for (unsigned i = 0; i != NumElts; ++i) { 2809 if (DemandedElts[i]) { 2810 if (SrcZero.extractBits(Scale, i * Scale).isAllOnes()) 2811 KnownZero.setBit(i); 2812 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes()) 2813 KnownUndef.setBit(i); 2814 } 2815 } 2816 } 2817 break; 2818 } 2819 case ISD::BUILD_VECTOR: { 2820 // Check all elements and simplify any unused elements with UNDEF. 2821 if (!DemandedElts.isAllOnes()) { 2822 // Don't simplify BROADCASTS. 2823 if (llvm::any_of(Op->op_values(), 2824 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2825 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2826 bool Updated = false; 2827 for (unsigned i = 0; i != NumElts; ++i) { 2828 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2829 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2830 KnownUndef.setBit(i); 2831 Updated = true; 2832 } 2833 } 2834 if (Updated) 2835 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2836 } 2837 } 2838 for (unsigned i = 0; i != NumElts; ++i) { 2839 SDValue SrcOp = Op.getOperand(i); 2840 if (SrcOp.isUndef()) { 2841 KnownUndef.setBit(i); 2842 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2843 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2844 KnownZero.setBit(i); 2845 } 2846 } 2847 break; 2848 } 2849 case ISD::CONCAT_VECTORS: { 2850 EVT SubVT = Op.getOperand(0).getValueType(); 2851 unsigned NumSubVecs = Op.getNumOperands(); 2852 unsigned NumSubElts = SubVT.getVectorNumElements(); 2853 for (unsigned i = 0; i != NumSubVecs; ++i) { 2854 SDValue SubOp = Op.getOperand(i); 2855 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2856 APInt SubUndef, SubZero; 2857 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2858 Depth + 1)) 2859 return true; 2860 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2861 KnownZero.insertBits(SubZero, i * NumSubElts); 2862 } 2863 break; 2864 } 2865 case ISD::INSERT_SUBVECTOR: { 2866 // Demand any elements from the subvector and the remainder from the src its 2867 // inserted into. 2868 SDValue Src = Op.getOperand(0); 2869 SDValue Sub = Op.getOperand(1); 2870 uint64_t Idx = Op.getConstantOperandVal(2); 2871 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2872 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2873 APInt DemandedSrcElts = DemandedElts; 2874 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 2875 2876 APInt SubUndef, SubZero; 2877 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2878 Depth + 1)) 2879 return true; 2880 2881 // If none of the src operand elements are demanded, replace it with undef. 2882 if (!DemandedSrcElts && !Src.isUndef()) 2883 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2884 TLO.DAG.getUNDEF(VT), Sub, 2885 Op.getOperand(2))); 2886 2887 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2888 TLO, Depth + 1)) 2889 return true; 2890 KnownUndef.insertBits(SubUndef, Idx); 2891 KnownZero.insertBits(SubZero, Idx); 2892 2893 // Attempt to avoid multi-use ops if we don't need anything from them. 2894 if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) { 2895 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2896 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2897 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2898 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2899 if (NewSrc || NewSub) { 2900 NewSrc = NewSrc ? NewSrc : Src; 2901 NewSub = NewSub ? NewSub : Sub; 2902 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2903 NewSub, Op.getOperand(2)); 2904 return TLO.CombineTo(Op, NewOp); 2905 } 2906 } 2907 break; 2908 } 2909 case ISD::EXTRACT_SUBVECTOR: { 2910 // Offset the demanded elts by the subvector index. 2911 SDValue Src = Op.getOperand(0); 2912 if (Src.getValueType().isScalableVector()) 2913 break; 2914 uint64_t Idx = Op.getConstantOperandVal(1); 2915 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2916 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2917 2918 APInt SrcUndef, SrcZero; 2919 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2920 Depth + 1)) 2921 return true; 2922 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2923 KnownZero = SrcZero.extractBits(NumElts, Idx); 2924 2925 // Attempt to avoid multi-use ops if we don't need anything from them. 2926 if (!DemandedElts.isAllOnes()) { 2927 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2928 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2929 if (NewSrc) { 2930 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2931 Op.getOperand(1)); 2932 return TLO.CombineTo(Op, NewOp); 2933 } 2934 } 2935 break; 2936 } 2937 case ISD::INSERT_VECTOR_ELT: { 2938 SDValue Vec = Op.getOperand(0); 2939 SDValue Scl = Op.getOperand(1); 2940 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2941 2942 // For a legal, constant insertion index, if we don't need this insertion 2943 // then strip it, else remove it from the demanded elts. 2944 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2945 unsigned Idx = CIdx->getZExtValue(); 2946 if (!DemandedElts[Idx]) 2947 return TLO.CombineTo(Op, Vec); 2948 2949 APInt DemandedVecElts(DemandedElts); 2950 DemandedVecElts.clearBit(Idx); 2951 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2952 KnownZero, TLO, Depth + 1)) 2953 return true; 2954 2955 KnownUndef.setBitVal(Idx, Scl.isUndef()); 2956 2957 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 2958 break; 2959 } 2960 2961 APInt VecUndef, VecZero; 2962 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2963 Depth + 1)) 2964 return true; 2965 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2966 break; 2967 } 2968 case ISD::VSELECT: { 2969 // Try to transform the select condition based on the current demanded 2970 // elements. 2971 // TODO: If a condition element is undef, we can choose from one arm of the 2972 // select (and if one arm is undef, then we can propagate that to the 2973 // result). 2974 // TODO - add support for constant vselect masks (see IR version of this). 2975 APInt UnusedUndef, UnusedZero; 2976 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2977 UnusedZero, TLO, Depth + 1)) 2978 return true; 2979 2980 // See if we can simplify either vselect operand. 2981 APInt DemandedLHS(DemandedElts); 2982 APInt DemandedRHS(DemandedElts); 2983 APInt UndefLHS, ZeroLHS; 2984 APInt UndefRHS, ZeroRHS; 2985 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2986 ZeroLHS, TLO, Depth + 1)) 2987 return true; 2988 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2989 ZeroRHS, TLO, Depth + 1)) 2990 return true; 2991 2992 KnownUndef = UndefLHS & UndefRHS; 2993 KnownZero = ZeroLHS & ZeroRHS; 2994 break; 2995 } 2996 case ISD::VECTOR_SHUFFLE: { 2997 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2998 2999 // Collect demanded elements from shuffle operands.. 3000 APInt DemandedLHS(NumElts, 0); 3001 APInt DemandedRHS(NumElts, 0); 3002 for (unsigned i = 0; i != NumElts; ++i) { 3003 int M = ShuffleMask[i]; 3004 if (M < 0 || !DemandedElts[i]) 3005 continue; 3006 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 3007 if (M < (int)NumElts) 3008 DemandedLHS.setBit(M); 3009 else 3010 DemandedRHS.setBit(M - NumElts); 3011 } 3012 3013 // See if we can simplify either shuffle operand. 3014 APInt UndefLHS, ZeroLHS; 3015 APInt UndefRHS, ZeroRHS; 3016 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 3017 ZeroLHS, TLO, Depth + 1)) 3018 return true; 3019 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 3020 ZeroRHS, TLO, Depth + 1)) 3021 return true; 3022 3023 // Simplify mask using undef elements from LHS/RHS. 3024 bool Updated = false; 3025 bool IdentityLHS = true, IdentityRHS = true; 3026 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 3027 for (unsigned i = 0; i != NumElts; ++i) { 3028 int &M = NewMask[i]; 3029 if (M < 0) 3030 continue; 3031 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 3032 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 3033 Updated = true; 3034 M = -1; 3035 } 3036 IdentityLHS &= (M < 0) || (M == (int)i); 3037 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 3038 } 3039 3040 // Update legal shuffle masks based on demanded elements if it won't reduce 3041 // to Identity which can cause premature removal of the shuffle mask. 3042 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 3043 SDValue LegalShuffle = 3044 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 3045 NewMask, TLO.DAG); 3046 if (LegalShuffle) 3047 return TLO.CombineTo(Op, LegalShuffle); 3048 } 3049 3050 // Propagate undef/zero elements from LHS/RHS. 3051 for (unsigned i = 0; i != NumElts; ++i) { 3052 int M = ShuffleMask[i]; 3053 if (M < 0) { 3054 KnownUndef.setBit(i); 3055 } else if (M < (int)NumElts) { 3056 if (UndefLHS[M]) 3057 KnownUndef.setBit(i); 3058 if (ZeroLHS[M]) 3059 KnownZero.setBit(i); 3060 } else { 3061 if (UndefRHS[M - NumElts]) 3062 KnownUndef.setBit(i); 3063 if (ZeroRHS[M - NumElts]) 3064 KnownZero.setBit(i); 3065 } 3066 } 3067 break; 3068 } 3069 case ISD::ANY_EXTEND_VECTOR_INREG: 3070 case ISD::SIGN_EXTEND_VECTOR_INREG: 3071 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3072 APInt SrcUndef, SrcZero; 3073 SDValue Src = Op.getOperand(0); 3074 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3075 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 3076 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3077 Depth + 1)) 3078 return true; 3079 KnownZero = SrcZero.zextOrTrunc(NumElts); 3080 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 3081 3082 if (IsLE && Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 3083 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 3084 DemandedSrcElts == 1) { 3085 // aext - if we just need the bottom element then we can bitcast. 3086 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 3087 } 3088 3089 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 3090 // zext(undef) upper bits are guaranteed to be zero. 3091 if (DemandedElts.isSubsetOf(KnownUndef)) 3092 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3093 KnownUndef.clearAllBits(); 3094 3095 // zext - if we just need the bottom element then we can mask: 3096 // zext(and(x,c)) -> and(x,c') iff the zext is the only user of the and. 3097 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() == ISD::AND && 3098 Op->isOnlyUserOf(Src.getNode()) && 3099 Op.getValueSizeInBits() == Src.getValueSizeInBits()) { 3100 SDLoc DL(Op); 3101 EVT SrcVT = Src.getValueType(); 3102 EVT SrcSVT = SrcVT.getScalarType(); 3103 SmallVector<SDValue> MaskElts; 3104 MaskElts.push_back(TLO.DAG.getAllOnesConstant(DL, SrcSVT)); 3105 MaskElts.append(NumSrcElts - 1, TLO.DAG.getConstant(0, DL, SrcSVT)); 3106 SDValue Mask = TLO.DAG.getBuildVector(SrcVT, DL, MaskElts); 3107 if (SDValue Fold = TLO.DAG.FoldConstantArithmetic( 3108 ISD::AND, DL, SrcVT, {Src.getOperand(1), Mask})) { 3109 Fold = TLO.DAG.getNode(ISD::AND, DL, SrcVT, Src.getOperand(0), Fold); 3110 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Fold)); 3111 } 3112 } 3113 } 3114 break; 3115 } 3116 3117 // TODO: There are more binop opcodes that could be handled here - MIN, 3118 // MAX, saturated math, etc. 3119 case ISD::ADD: { 3120 SDValue Op0 = Op.getOperand(0); 3121 SDValue Op1 = Op.getOperand(1); 3122 if (Op0 == Op1 && Op->isOnlyUserOf(Op0.getNode())) { 3123 APInt UndefLHS, ZeroLHS; 3124 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3125 Depth + 1, /*AssumeSingleUse*/ true)) 3126 return true; 3127 } 3128 LLVM_FALLTHROUGH; 3129 } 3130 case ISD::OR: 3131 case ISD::XOR: 3132 case ISD::SUB: 3133 case ISD::FADD: 3134 case ISD::FSUB: 3135 case ISD::FMUL: 3136 case ISD::FDIV: 3137 case ISD::FREM: { 3138 SDValue Op0 = Op.getOperand(0); 3139 SDValue Op1 = Op.getOperand(1); 3140 3141 APInt UndefRHS, ZeroRHS; 3142 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3143 Depth + 1)) 3144 return true; 3145 APInt UndefLHS, ZeroLHS; 3146 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3147 Depth + 1)) 3148 return true; 3149 3150 KnownZero = ZeroLHS & ZeroRHS; 3151 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 3152 3153 // Attempt to avoid multi-use ops if we don't need anything from them. 3154 // TODO - use KnownUndef to relax the demandedelts? 3155 if (!DemandedElts.isAllOnes()) 3156 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3157 return true; 3158 break; 3159 } 3160 case ISD::SHL: 3161 case ISD::SRL: 3162 case ISD::SRA: 3163 case ISD::ROTL: 3164 case ISD::ROTR: { 3165 SDValue Op0 = Op.getOperand(0); 3166 SDValue Op1 = Op.getOperand(1); 3167 3168 APInt UndefRHS, ZeroRHS; 3169 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3170 Depth + 1)) 3171 return true; 3172 APInt UndefLHS, ZeroLHS; 3173 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3174 Depth + 1)) 3175 return true; 3176 3177 KnownZero = ZeroLHS; 3178 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 3179 3180 // Attempt to avoid multi-use ops if we don't need anything from them. 3181 // TODO - use KnownUndef to relax the demandedelts? 3182 if (!DemandedElts.isAllOnes()) 3183 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3184 return true; 3185 break; 3186 } 3187 case ISD::MUL: 3188 case ISD::AND: { 3189 SDValue Op0 = Op.getOperand(0); 3190 SDValue Op1 = Op.getOperand(1); 3191 3192 APInt SrcUndef, SrcZero; 3193 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 3194 Depth + 1)) 3195 return true; 3196 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 3197 TLO, Depth + 1)) 3198 return true; 3199 3200 // If either side has a zero element, then the result element is zero, even 3201 // if the other is an UNDEF. 3202 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 3203 // and then handle 'and' nodes with the rest of the binop opcodes. 3204 KnownZero |= SrcZero; 3205 KnownUndef &= SrcUndef; 3206 KnownUndef &= ~KnownZero; 3207 3208 // Attempt to avoid multi-use ops if we don't need anything from them. 3209 // TODO - use KnownUndef to relax the demandedelts? 3210 if (!DemandedElts.isAllOnes()) 3211 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3212 return true; 3213 break; 3214 } 3215 case ISD::TRUNCATE: 3216 case ISD::SIGN_EXTEND: 3217 case ISD::ZERO_EXTEND: 3218 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 3219 KnownZero, TLO, Depth + 1)) 3220 return true; 3221 3222 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 3223 // zext(undef) upper bits are guaranteed to be zero. 3224 if (DemandedElts.isSubsetOf(KnownUndef)) 3225 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3226 KnownUndef.clearAllBits(); 3227 } 3228 break; 3229 default: { 3230 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 3231 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 3232 KnownZero, TLO, Depth)) 3233 return true; 3234 } else { 3235 KnownBits Known; 3236 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits); 3237 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 3238 TLO, Depth, AssumeSingleUse)) 3239 return true; 3240 } 3241 break; 3242 } 3243 } 3244 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 3245 3246 // Constant fold all undef cases. 3247 // TODO: Handle zero cases as well. 3248 if (DemandedElts.isSubsetOf(KnownUndef)) 3249 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 3250 3251 return false; 3252 } 3253 3254 /// Determine which of the bits specified in Mask are known to be either zero or 3255 /// one and return them in the Known. 3256 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3257 KnownBits &Known, 3258 const APInt &DemandedElts, 3259 const SelectionDAG &DAG, 3260 unsigned Depth) const { 3261 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3262 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3263 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3264 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3265 "Should use MaskedValueIsZero if you don't know whether Op" 3266 " is a target node!"); 3267 Known.resetAll(); 3268 } 3269 3270 void TargetLowering::computeKnownBitsForTargetInstr( 3271 GISelKnownBits &Analysis, Register R, KnownBits &Known, 3272 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 3273 unsigned Depth) const { 3274 Known.resetAll(); 3275 } 3276 3277 void TargetLowering::computeKnownBitsForFrameIndex( 3278 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 3279 // The low bits are known zero if the pointer is aligned. 3280 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 3281 } 3282 3283 Align TargetLowering::computeKnownAlignForTargetInstr( 3284 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 3285 unsigned Depth) const { 3286 return Align(1); 3287 } 3288 3289 /// This method can be implemented by targets that want to expose additional 3290 /// information about sign bits to the DAG Combiner. 3291 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 3292 const APInt &, 3293 const SelectionDAG &, 3294 unsigned Depth) const { 3295 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3296 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3297 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3298 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3299 "Should use ComputeNumSignBits if you don't know whether Op" 3300 " is a target node!"); 3301 return 1; 3302 } 3303 3304 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 3305 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 3306 const MachineRegisterInfo &MRI, unsigned Depth) const { 3307 return 1; 3308 } 3309 3310 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 3311 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 3312 TargetLoweringOpt &TLO, unsigned Depth) const { 3313 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3314 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3315 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3316 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3317 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 3318 " is a target node!"); 3319 return false; 3320 } 3321 3322 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 3323 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3324 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 3325 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3326 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3327 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3328 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3329 "Should use SimplifyDemandedBits if you don't know whether Op" 3330 " is a target node!"); 3331 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 3332 return false; 3333 } 3334 3335 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 3336 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3337 SelectionDAG &DAG, unsigned Depth) const { 3338 assert( 3339 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3340 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3341 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3342 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3343 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3344 " is a target node!"); 3345 return SDValue(); 3346 } 3347 3348 SDValue 3349 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3350 SDValue N1, MutableArrayRef<int> Mask, 3351 SelectionDAG &DAG) const { 3352 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3353 if (!LegalMask) { 3354 std::swap(N0, N1); 3355 ShuffleVectorSDNode::commuteMask(Mask); 3356 LegalMask = isShuffleMaskLegal(Mask, VT); 3357 } 3358 3359 if (!LegalMask) 3360 return SDValue(); 3361 3362 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3363 } 3364 3365 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3366 return nullptr; 3367 } 3368 3369 bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3370 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3371 bool PoisonOnly, unsigned Depth) const { 3372 assert( 3373 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3374 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3375 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3376 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3377 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op" 3378 " is a target node!"); 3379 return false; 3380 } 3381 3382 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3383 const SelectionDAG &DAG, 3384 bool SNaN, 3385 unsigned Depth) const { 3386 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3387 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3388 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3389 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3390 "Should use isKnownNeverNaN if you don't know whether Op" 3391 " is a target node!"); 3392 return false; 3393 } 3394 3395 bool TargetLowering::isSplatValueForTargetNode(SDValue Op, 3396 const APInt &DemandedElts, 3397 APInt &UndefElts, 3398 unsigned Depth) const { 3399 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3400 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3401 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3402 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3403 "Should use isSplatValue if you don't know whether Op" 3404 " is a target node!"); 3405 return false; 3406 } 3407 3408 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3409 // work with truncating build vectors and vectors with elements of less than 3410 // 8 bits. 3411 bool TargetLowering::isConstTrueVal(SDValue N) const { 3412 if (!N) 3413 return false; 3414 3415 unsigned EltWidth; 3416 APInt CVal; 3417 if (ConstantSDNode *CN = isConstOrConstSplat(N, /*AllowUndefs=*/false, 3418 /*AllowTruncation=*/true)) { 3419 CVal = CN->getAPIntValue(); 3420 EltWidth = N.getValueType().getScalarSizeInBits(); 3421 } else 3422 return false; 3423 3424 // If this is a truncating splat, truncate the splat value. 3425 // Otherwise, we may fail to match the expected values below. 3426 if (EltWidth < CVal.getBitWidth()) 3427 CVal = CVal.trunc(EltWidth); 3428 3429 switch (getBooleanContents(N.getValueType())) { 3430 case UndefinedBooleanContent: 3431 return CVal[0]; 3432 case ZeroOrOneBooleanContent: 3433 return CVal.isOne(); 3434 case ZeroOrNegativeOneBooleanContent: 3435 return CVal.isAllOnes(); 3436 } 3437 3438 llvm_unreachable("Invalid boolean contents"); 3439 } 3440 3441 bool TargetLowering::isConstFalseVal(SDValue N) const { 3442 if (!N) 3443 return false; 3444 3445 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3446 if (!CN) { 3447 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3448 if (!BV) 3449 return false; 3450 3451 // Only interested in constant splats, we don't care about undef 3452 // elements in identifying boolean constants and getConstantSplatNode 3453 // returns NULL if all ops are undef; 3454 CN = BV->getConstantSplatNode(); 3455 if (!CN) 3456 return false; 3457 } 3458 3459 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3460 return !CN->getAPIntValue()[0]; 3461 3462 return CN->isZero(); 3463 } 3464 3465 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3466 bool SExt) const { 3467 if (VT == MVT::i1) 3468 return N->isOne(); 3469 3470 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3471 switch (Cnt) { 3472 case TargetLowering::ZeroOrOneBooleanContent: 3473 // An extended value of 1 is always true, unless its original type is i1, 3474 // in which case it will be sign extended to -1. 3475 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3476 case TargetLowering::UndefinedBooleanContent: 3477 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3478 return N->isAllOnes() && SExt; 3479 } 3480 llvm_unreachable("Unexpected enumeration."); 3481 } 3482 3483 /// This helper function of SimplifySetCC tries to optimize the comparison when 3484 /// either operand of the SetCC node is a bitwise-and instruction. 3485 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3486 ISD::CondCode Cond, const SDLoc &DL, 3487 DAGCombinerInfo &DCI) const { 3488 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3489 std::swap(N0, N1); 3490 3491 SelectionDAG &DAG = DCI.DAG; 3492 EVT OpVT = N0.getValueType(); 3493 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3494 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3495 return SDValue(); 3496 3497 // (X & Y) != 0 --> zextOrTrunc(X & Y) 3498 // iff everything but LSB is known zero: 3499 if (Cond == ISD::SETNE && isNullConstant(N1) && 3500 (getBooleanContents(OpVT) == TargetLowering::UndefinedBooleanContent || 3501 getBooleanContents(OpVT) == TargetLowering::ZeroOrOneBooleanContent)) { 3502 unsigned NumEltBits = OpVT.getScalarSizeInBits(); 3503 APInt UpperBits = APInt::getHighBitsSet(NumEltBits, NumEltBits - 1); 3504 if (DAG.MaskedValueIsZero(N0, UpperBits)) 3505 return DAG.getBoolExtOrTrunc(N0, DL, VT, OpVT); 3506 } 3507 3508 // Match these patterns in any of their permutations: 3509 // (X & Y) == Y 3510 // (X & Y) != Y 3511 SDValue X, Y; 3512 if (N0.getOperand(0) == N1) { 3513 X = N0.getOperand(1); 3514 Y = N0.getOperand(0); 3515 } else if (N0.getOperand(1) == N1) { 3516 X = N0.getOperand(0); 3517 Y = N0.getOperand(1); 3518 } else { 3519 return SDValue(); 3520 } 3521 3522 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3523 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3524 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3525 // Note that where Y is variable and is known to have at most one bit set 3526 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3527 // equivalent when Y == 0. 3528 assert(OpVT.isInteger()); 3529 Cond = ISD::getSetCCInverse(Cond, OpVT); 3530 if (DCI.isBeforeLegalizeOps() || 3531 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3532 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3533 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3534 // If the target supports an 'and-not' or 'and-complement' logic operation, 3535 // try to use that to make a comparison operation more efficient. 3536 // But don't do this transform if the mask is a single bit because there are 3537 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3538 // 'rlwinm' on PPC). 3539 3540 // Bail out if the compare operand that we want to turn into a zero is 3541 // already a zero (otherwise, infinite loop). 3542 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3543 if (YConst && YConst->isZero()) 3544 return SDValue(); 3545 3546 // Transform this into: ~X & Y == 0. 3547 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3548 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3549 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3550 } 3551 3552 return SDValue(); 3553 } 3554 3555 /// There are multiple IR patterns that could be checking whether certain 3556 /// truncation of a signed number would be lossy or not. The pattern which is 3557 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3558 /// We are looking for the following pattern: (KeptBits is a constant) 3559 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3560 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3561 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3562 /// We will unfold it into the natural trunc+sext pattern: 3563 /// ((%x << C) a>> C) dstcond %x 3564 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3565 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3566 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3567 const SDLoc &DL) const { 3568 // We must be comparing with a constant. 3569 ConstantSDNode *C1; 3570 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3571 return SDValue(); 3572 3573 // N0 should be: add %x, (1 << (KeptBits-1)) 3574 if (N0->getOpcode() != ISD::ADD) 3575 return SDValue(); 3576 3577 // And we must be 'add'ing a constant. 3578 ConstantSDNode *C01; 3579 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3580 return SDValue(); 3581 3582 SDValue X = N0->getOperand(0); 3583 EVT XVT = X.getValueType(); 3584 3585 // Validate constants ... 3586 3587 APInt I1 = C1->getAPIntValue(); 3588 3589 ISD::CondCode NewCond; 3590 if (Cond == ISD::CondCode::SETULT) { 3591 NewCond = ISD::CondCode::SETEQ; 3592 } else if (Cond == ISD::CondCode::SETULE) { 3593 NewCond = ISD::CondCode::SETEQ; 3594 // But need to 'canonicalize' the constant. 3595 I1 += 1; 3596 } else if (Cond == ISD::CondCode::SETUGT) { 3597 NewCond = ISD::CondCode::SETNE; 3598 // But need to 'canonicalize' the constant. 3599 I1 += 1; 3600 } else if (Cond == ISD::CondCode::SETUGE) { 3601 NewCond = ISD::CondCode::SETNE; 3602 } else 3603 return SDValue(); 3604 3605 APInt I01 = C01->getAPIntValue(); 3606 3607 auto checkConstants = [&I1, &I01]() -> bool { 3608 // Both of them must be power-of-two, and the constant from setcc is bigger. 3609 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3610 }; 3611 3612 if (checkConstants()) { 3613 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3614 } else { 3615 // What if we invert constants? (and the target predicate) 3616 I1.negate(); 3617 I01.negate(); 3618 assert(XVT.isInteger()); 3619 NewCond = getSetCCInverse(NewCond, XVT); 3620 if (!checkConstants()) 3621 return SDValue(); 3622 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3623 } 3624 3625 // They are power-of-two, so which bit is set? 3626 const unsigned KeptBits = I1.logBase2(); 3627 const unsigned KeptBitsMinusOne = I01.logBase2(); 3628 3629 // Magic! 3630 if (KeptBits != (KeptBitsMinusOne + 1)) 3631 return SDValue(); 3632 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3633 3634 // We don't want to do this in every single case. 3635 SelectionDAG &DAG = DCI.DAG; 3636 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3637 XVT, KeptBits)) 3638 return SDValue(); 3639 3640 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3641 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3642 3643 // Unfold into: ((%x << C) a>> C) cond %x 3644 // Where 'cond' will be either 'eq' or 'ne'. 3645 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3646 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3647 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3648 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3649 3650 return T2; 3651 } 3652 3653 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3654 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3655 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3656 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3657 assert(isConstOrConstSplat(N1C) && 3658 isConstOrConstSplat(N1C)->getAPIntValue().isZero() && 3659 "Should be a comparison with 0."); 3660 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3661 "Valid only for [in]equality comparisons."); 3662 3663 unsigned NewShiftOpcode; 3664 SDValue X, C, Y; 3665 3666 SelectionDAG &DAG = DCI.DAG; 3667 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3668 3669 // Look for '(C l>>/<< Y)'. 3670 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3671 // The shift should be one-use. 3672 if (!V.hasOneUse()) 3673 return false; 3674 unsigned OldShiftOpcode = V.getOpcode(); 3675 switch (OldShiftOpcode) { 3676 case ISD::SHL: 3677 NewShiftOpcode = ISD::SRL; 3678 break; 3679 case ISD::SRL: 3680 NewShiftOpcode = ISD::SHL; 3681 break; 3682 default: 3683 return false; // must be a logical shift. 3684 } 3685 // We should be shifting a constant. 3686 // FIXME: best to use isConstantOrConstantVector(). 3687 C = V.getOperand(0); 3688 ConstantSDNode *CC = 3689 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3690 if (!CC) 3691 return false; 3692 Y = V.getOperand(1); 3693 3694 ConstantSDNode *XC = 3695 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3696 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3697 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3698 }; 3699 3700 // LHS of comparison should be an one-use 'and'. 3701 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3702 return SDValue(); 3703 3704 X = N0.getOperand(0); 3705 SDValue Mask = N0.getOperand(1); 3706 3707 // 'and' is commutative! 3708 if (!Match(Mask)) { 3709 std::swap(X, Mask); 3710 if (!Match(Mask)) 3711 return SDValue(); 3712 } 3713 3714 EVT VT = X.getValueType(); 3715 3716 // Produce: 3717 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3718 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3719 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3720 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3721 return T2; 3722 } 3723 3724 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3725 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3726 /// handle the commuted versions of these patterns. 3727 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3728 ISD::CondCode Cond, const SDLoc &DL, 3729 DAGCombinerInfo &DCI) const { 3730 unsigned BOpcode = N0.getOpcode(); 3731 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3732 "Unexpected binop"); 3733 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3734 3735 // (X + Y) == X --> Y == 0 3736 // (X - Y) == X --> Y == 0 3737 // (X ^ Y) == X --> Y == 0 3738 SelectionDAG &DAG = DCI.DAG; 3739 EVT OpVT = N0.getValueType(); 3740 SDValue X = N0.getOperand(0); 3741 SDValue Y = N0.getOperand(1); 3742 if (X == N1) 3743 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3744 3745 if (Y != N1) 3746 return SDValue(); 3747 3748 // (X + Y) == Y --> X == 0 3749 // (X ^ Y) == Y --> X == 0 3750 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3751 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3752 3753 // The shift would not be valid if the operands are boolean (i1). 3754 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3755 return SDValue(); 3756 3757 // (X - Y) == Y --> X == Y << 1 3758 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3759 !DCI.isBeforeLegalize()); 3760 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3761 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3762 if (!DCI.isCalledByLegalizer()) 3763 DCI.AddToWorklist(YShl1.getNode()); 3764 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3765 } 3766 3767 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3768 SDValue N0, const APInt &C1, 3769 ISD::CondCode Cond, const SDLoc &dl, 3770 SelectionDAG &DAG) { 3771 // Look through truncs that don't change the value of a ctpop. 3772 // FIXME: Add vector support? Need to be careful with setcc result type below. 3773 SDValue CTPOP = N0; 3774 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3775 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3776 CTPOP = N0.getOperand(0); 3777 3778 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3779 return SDValue(); 3780 3781 EVT CTVT = CTPOP.getValueType(); 3782 SDValue CTOp = CTPOP.getOperand(0); 3783 3784 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3785 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3786 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3787 return SDValue(); 3788 3789 // (ctpop x) u< 2 -> (x & x-1) == 0 3790 // (ctpop x) u> 1 -> (x & x-1) != 0 3791 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 3792 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 3793 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 3794 return SDValue(); 3795 if (C1 == 0 && (Cond == ISD::SETULT)) 3796 return SDValue(); // This is handled elsewhere. 3797 3798 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 3799 3800 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3801 SDValue Result = CTOp; 3802 for (unsigned i = 0; i < Passes; i++) { 3803 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 3804 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 3805 } 3806 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3807 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 3808 } 3809 3810 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3811 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3812 // For scalars, keep CTPOP if it is legal or custom. 3813 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 3814 return SDValue(); 3815 // This is based on X86's custom lowering for CTPOP which produces more 3816 // instructions than the expansion here. 3817 3818 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3819 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3820 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3821 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3822 assert(CTVT.isInteger()); 3823 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3824 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3825 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3826 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3827 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3828 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3829 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3830 } 3831 3832 return SDValue(); 3833 } 3834 3835 static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, 3836 ISD::CondCode Cond, const SDLoc &dl, 3837 SelectionDAG &DAG) { 3838 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 3839 return SDValue(); 3840 3841 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 3842 if (!C1 || !(C1->isZero() || C1->isAllOnes())) 3843 return SDValue(); 3844 3845 auto getRotateSource = [](SDValue X) { 3846 if (X.getOpcode() == ISD::ROTL || X.getOpcode() == ISD::ROTR) 3847 return X.getOperand(0); 3848 return SDValue(); 3849 }; 3850 3851 // Peek through a rotated value compared against 0 or -1: 3852 // (rot X, Y) == 0/-1 --> X == 0/-1 3853 // (rot X, Y) != 0/-1 --> X != 0/-1 3854 if (SDValue R = getRotateSource(N0)) 3855 return DAG.getSetCC(dl, VT, R, N1, Cond); 3856 3857 // Peek through an 'or' of a rotated value compared against 0: 3858 // or (rot X, Y), Z ==/!= 0 --> (or X, Z) ==/!= 0 3859 // or Z, (rot X, Y) ==/!= 0 --> (or X, Z) ==/!= 0 3860 // 3861 // TODO: Add the 'and' with -1 sibling. 3862 // TODO: Recurse through a series of 'or' ops to find the rotate. 3863 EVT OpVT = N0.getValueType(); 3864 if (N0.hasOneUse() && N0.getOpcode() == ISD::OR && C1->isZero()) { 3865 if (SDValue R = getRotateSource(N0.getOperand(0))) { 3866 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(1)); 3867 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3868 } 3869 if (SDValue R = getRotateSource(N0.getOperand(1))) { 3870 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(0)); 3871 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3872 } 3873 } 3874 3875 return SDValue(); 3876 } 3877 3878 static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, 3879 ISD::CondCode Cond, const SDLoc &dl, 3880 SelectionDAG &DAG) { 3881 // If we are testing for all-bits-clear, we might be able to do that with 3882 // less shifting since bit-order does not matter. 3883 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 3884 return SDValue(); 3885 3886 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 3887 if (!C1 || !C1->isZero()) 3888 return SDValue(); 3889 3890 if (!N0.hasOneUse() || 3891 (N0.getOpcode() != ISD::FSHL && N0.getOpcode() != ISD::FSHR)) 3892 return SDValue(); 3893 3894 unsigned BitWidth = N0.getScalarValueSizeInBits(); 3895 auto *ShAmtC = isConstOrConstSplat(N0.getOperand(2)); 3896 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 3897 return SDValue(); 3898 3899 // Canonicalize fshr as fshl to reduce pattern-matching. 3900 unsigned ShAmt = ShAmtC->getZExtValue(); 3901 if (N0.getOpcode() == ISD::FSHR) 3902 ShAmt = BitWidth - ShAmt; 3903 3904 // Match an 'or' with a specific operand 'Other' in either commuted variant. 3905 SDValue X, Y; 3906 auto matchOr = [&X, &Y](SDValue Or, SDValue Other) { 3907 if (Or.getOpcode() != ISD::OR || !Or.hasOneUse()) 3908 return false; 3909 if (Or.getOperand(0) == Other) { 3910 X = Or.getOperand(0); 3911 Y = Or.getOperand(1); 3912 return true; 3913 } 3914 if (Or.getOperand(1) == Other) { 3915 X = Or.getOperand(1); 3916 Y = Or.getOperand(0); 3917 return true; 3918 } 3919 return false; 3920 }; 3921 3922 EVT OpVT = N0.getValueType(); 3923 EVT ShAmtVT = N0.getOperand(2).getValueType(); 3924 SDValue F0 = N0.getOperand(0); 3925 SDValue F1 = N0.getOperand(1); 3926 if (matchOr(F0, F1)) { 3927 // fshl (or X, Y), X, C ==/!= 0 --> or (shl Y, C), X ==/!= 0 3928 SDValue NewShAmt = DAG.getConstant(ShAmt, dl, ShAmtVT); 3929 SDValue Shift = DAG.getNode(ISD::SHL, dl, OpVT, Y, NewShAmt); 3930 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 3931 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3932 } 3933 if (matchOr(F1, F0)) { 3934 // fshl X, (or X, Y), C ==/!= 0 --> or (srl Y, BW-C), X ==/!= 0 3935 SDValue NewShAmt = DAG.getConstant(BitWidth - ShAmt, dl, ShAmtVT); 3936 SDValue Shift = DAG.getNode(ISD::SRL, dl, OpVT, Y, NewShAmt); 3937 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 3938 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3939 } 3940 3941 return SDValue(); 3942 } 3943 3944 /// Try to simplify a setcc built with the specified operands and cc. If it is 3945 /// unable to simplify it, return a null SDValue. 3946 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3947 ISD::CondCode Cond, bool foldBooleans, 3948 DAGCombinerInfo &DCI, 3949 const SDLoc &dl) const { 3950 SelectionDAG &DAG = DCI.DAG; 3951 const DataLayout &Layout = DAG.getDataLayout(); 3952 EVT OpVT = N0.getValueType(); 3953 3954 // Constant fold or commute setcc. 3955 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3956 return Fold; 3957 3958 bool N0ConstOrSplat = 3959 isConstOrConstSplat(N0, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 3960 bool N1ConstOrSplat = 3961 isConstOrConstSplat(N1, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 3962 3963 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3964 // TODO: Handle non-splat vector constants. All undef causes trouble. 3965 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 3966 // infinite loop here when we encounter one. 3967 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3968 if (N0ConstOrSplat && (!OpVT.isScalableVector() || !N1ConstOrSplat) && 3969 (DCI.isBeforeLegalizeOps() || 3970 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3971 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3972 3973 // If we have a subtract with the same 2 non-constant operands as this setcc 3974 // -- but in reverse order -- then try to commute the operands of this setcc 3975 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3976 // instruction on some targets. 3977 if (!N0ConstOrSplat && !N1ConstOrSplat && 3978 (DCI.isBeforeLegalizeOps() || 3979 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3980 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 3981 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 3982 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3983 3984 if (SDValue V = foldSetCCWithRotate(VT, N0, N1, Cond, dl, DAG)) 3985 return V; 3986 3987 if (SDValue V = foldSetCCWithFunnelShift(VT, N0, N1, Cond, dl, DAG)) 3988 return V; 3989 3990 if (auto *N1C = isConstOrConstSplat(N1)) { 3991 const APInt &C1 = N1C->getAPIntValue(); 3992 3993 // Optimize some CTPOP cases. 3994 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 3995 return V; 3996 3997 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3998 // equality comparison, then we're just comparing whether X itself is 3999 // zero. 4000 if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && 4001 N0.getOperand(0).getOpcode() == ISD::CTLZ && 4002 isPowerOf2_32(N0.getScalarValueSizeInBits())) { 4003 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 4004 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4005 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 4006 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 4007 // (srl (ctlz x), 5) == 0 -> X != 0 4008 // (srl (ctlz x), 5) != 1 -> X != 0 4009 Cond = ISD::SETNE; 4010 } else { 4011 // (srl (ctlz x), 5) != 0 -> X == 0 4012 // (srl (ctlz x), 5) == 1 -> X == 0 4013 Cond = ISD::SETEQ; 4014 } 4015 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 4016 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 4017 Cond); 4018 } 4019 } 4020 } 4021 } 4022 4023 // FIXME: Support vectors. 4024 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4025 const APInt &C1 = N1C->getAPIntValue(); 4026 4027 // (zext x) == C --> x == (trunc C) 4028 // (sext x) == C --> x == (trunc C) 4029 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4030 DCI.isBeforeLegalize() && N0->hasOneUse()) { 4031 unsigned MinBits = N0.getValueSizeInBits(); 4032 SDValue PreExt; 4033 bool Signed = false; 4034 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 4035 // ZExt 4036 MinBits = N0->getOperand(0).getValueSizeInBits(); 4037 PreExt = N0->getOperand(0); 4038 } else if (N0->getOpcode() == ISD::AND) { 4039 // DAGCombine turns costly ZExts into ANDs 4040 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 4041 if ((C->getAPIntValue()+1).isPowerOf2()) { 4042 MinBits = C->getAPIntValue().countTrailingOnes(); 4043 PreExt = N0->getOperand(0); 4044 } 4045 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 4046 // SExt 4047 MinBits = N0->getOperand(0).getValueSizeInBits(); 4048 PreExt = N0->getOperand(0); 4049 Signed = true; 4050 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 4051 // ZEXTLOAD / SEXTLOAD 4052 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 4053 MinBits = LN0->getMemoryVT().getSizeInBits(); 4054 PreExt = N0; 4055 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 4056 Signed = true; 4057 MinBits = LN0->getMemoryVT().getSizeInBits(); 4058 PreExt = N0; 4059 } 4060 } 4061 4062 // Figure out how many bits we need to preserve this constant. 4063 unsigned ReqdBits = Signed ? C1.getMinSignedBits() : C1.getActiveBits(); 4064 4065 // Make sure we're not losing bits from the constant. 4066 if (MinBits > 0 && 4067 MinBits < C1.getBitWidth() && 4068 MinBits >= ReqdBits) { 4069 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 4070 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 4071 // Will get folded away. 4072 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 4073 if (MinBits == 1 && C1 == 1) 4074 // Invert the condition. 4075 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 4076 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4077 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 4078 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 4079 } 4080 4081 // If truncating the setcc operands is not desirable, we can still 4082 // simplify the expression in some cases: 4083 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 4084 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 4085 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 4086 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 4087 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 4088 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 4089 SDValue TopSetCC = N0->getOperand(0); 4090 unsigned N0Opc = N0->getOpcode(); 4091 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 4092 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 4093 TopSetCC.getOpcode() == ISD::SETCC && 4094 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 4095 (isConstFalseVal(N1) || 4096 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 4097 4098 bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) || 4099 (!N1C->isZero() && Cond == ISD::SETNE); 4100 4101 if (!Inverse) 4102 return TopSetCC; 4103 4104 ISD::CondCode InvCond = ISD::getSetCCInverse( 4105 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 4106 TopSetCC.getOperand(0).getValueType()); 4107 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 4108 TopSetCC.getOperand(1), 4109 InvCond); 4110 } 4111 } 4112 } 4113 4114 // If the LHS is '(and load, const)', the RHS is 0, the test is for 4115 // equality or unsigned, and all 1 bits of the const are in the same 4116 // partial word, see if we can shorten the load. 4117 if (DCI.isBeforeLegalize() && 4118 !ISD::isSignedIntSetCC(Cond) && 4119 N0.getOpcode() == ISD::AND && C1 == 0 && 4120 N0.getNode()->hasOneUse() && 4121 isa<LoadSDNode>(N0.getOperand(0)) && 4122 N0.getOperand(0).getNode()->hasOneUse() && 4123 isa<ConstantSDNode>(N0.getOperand(1))) { 4124 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 4125 APInt bestMask; 4126 unsigned bestWidth = 0, bestOffset = 0; 4127 if (Lod->isSimple() && Lod->isUnindexed()) { 4128 unsigned origWidth = N0.getValueSizeInBits(); 4129 unsigned maskWidth = origWidth; 4130 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 4131 // 8 bits, but have to be careful... 4132 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 4133 origWidth = Lod->getMemoryVT().getSizeInBits(); 4134 const APInt &Mask = N0.getConstantOperandAPInt(1); 4135 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 4136 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 4137 for (unsigned offset=0; offset<origWidth/width; offset++) { 4138 if (Mask.isSubsetOf(newMask)) { 4139 if (Layout.isLittleEndian()) 4140 bestOffset = (uint64_t)offset * (width/8); 4141 else 4142 bestOffset = (origWidth/width - offset - 1) * (width/8); 4143 bestMask = Mask.lshr(offset * (width/8) * 8); 4144 bestWidth = width; 4145 break; 4146 } 4147 newMask <<= width; 4148 } 4149 } 4150 } 4151 if (bestWidth) { 4152 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 4153 if (newVT.isRound() && 4154 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 4155 SDValue Ptr = Lod->getBasePtr(); 4156 if (bestOffset != 0) 4157 Ptr = 4158 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 4159 SDValue NewLoad = 4160 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 4161 Lod->getPointerInfo().getWithOffset(bestOffset), 4162 Lod->getOriginalAlign()); 4163 return DAG.getSetCC(dl, VT, 4164 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 4165 DAG.getConstant(bestMask.trunc(bestWidth), 4166 dl, newVT)), 4167 DAG.getConstant(0LL, dl, newVT), Cond); 4168 } 4169 } 4170 } 4171 4172 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 4173 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 4174 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 4175 4176 // If the comparison constant has bits in the upper part, the 4177 // zero-extended value could never match. 4178 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 4179 C1.getBitWidth() - InSize))) { 4180 switch (Cond) { 4181 case ISD::SETUGT: 4182 case ISD::SETUGE: 4183 case ISD::SETEQ: 4184 return DAG.getConstant(0, dl, VT); 4185 case ISD::SETULT: 4186 case ISD::SETULE: 4187 case ISD::SETNE: 4188 return DAG.getConstant(1, dl, VT); 4189 case ISD::SETGT: 4190 case ISD::SETGE: 4191 // True if the sign bit of C1 is set. 4192 return DAG.getConstant(C1.isNegative(), dl, VT); 4193 case ISD::SETLT: 4194 case ISD::SETLE: 4195 // True if the sign bit of C1 isn't set. 4196 return DAG.getConstant(C1.isNonNegative(), dl, VT); 4197 default: 4198 break; 4199 } 4200 } 4201 4202 // Otherwise, we can perform the comparison with the low bits. 4203 switch (Cond) { 4204 case ISD::SETEQ: 4205 case ISD::SETNE: 4206 case ISD::SETUGT: 4207 case ISD::SETUGE: 4208 case ISD::SETULT: 4209 case ISD::SETULE: { 4210 EVT newVT = N0.getOperand(0).getValueType(); 4211 if (DCI.isBeforeLegalizeOps() || 4212 (isOperationLegal(ISD::SETCC, newVT) && 4213 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 4214 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 4215 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 4216 4217 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 4218 NewConst, Cond); 4219 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 4220 } 4221 break; 4222 } 4223 default: 4224 break; // todo, be more careful with signed comparisons 4225 } 4226 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 4227 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4228 !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(), 4229 OpVT)) { 4230 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 4231 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 4232 EVT ExtDstTy = N0.getValueType(); 4233 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 4234 4235 // If the constant doesn't fit into the number of bits for the source of 4236 // the sign extension, it is impossible for both sides to be equal. 4237 if (C1.getMinSignedBits() > ExtSrcTyBits) 4238 return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT); 4239 4240 assert(ExtDstTy == N0.getOperand(0).getValueType() && 4241 ExtDstTy != ExtSrcTy && "Unexpected types!"); 4242 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 4243 SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0), 4244 DAG.getConstant(Imm, dl, ExtDstTy)); 4245 if (!DCI.isCalledByLegalizer()) 4246 DCI.AddToWorklist(ZextOp.getNode()); 4247 // Otherwise, make this a use of a zext. 4248 return DAG.getSetCC(dl, VT, ZextOp, 4249 DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond); 4250 } else if ((N1C->isZero() || N1C->isOne()) && 4251 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4252 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 4253 if (N0.getOpcode() == ISD::SETCC && 4254 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 4255 (N0.getValueType() == MVT::i1 || 4256 getBooleanContents(N0.getOperand(0).getValueType()) == 4257 ZeroOrOneBooleanContent)) { 4258 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 4259 if (TrueWhenTrue) 4260 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 4261 // Invert the condition. 4262 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4263 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 4264 if (DCI.isBeforeLegalizeOps() || 4265 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 4266 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 4267 } 4268 4269 if ((N0.getOpcode() == ISD::XOR || 4270 (N0.getOpcode() == ISD::AND && 4271 N0.getOperand(0).getOpcode() == ISD::XOR && 4272 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 4273 isOneConstant(N0.getOperand(1))) { 4274 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 4275 // can only do this if the top bits are known zero. 4276 unsigned BitWidth = N0.getValueSizeInBits(); 4277 if (DAG.MaskedValueIsZero(N0, 4278 APInt::getHighBitsSet(BitWidth, 4279 BitWidth-1))) { 4280 // Okay, get the un-inverted input value. 4281 SDValue Val; 4282 if (N0.getOpcode() == ISD::XOR) { 4283 Val = N0.getOperand(0); 4284 } else { 4285 assert(N0.getOpcode() == ISD::AND && 4286 N0.getOperand(0).getOpcode() == ISD::XOR); 4287 // ((X^1)&1)^1 -> X & 1 4288 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 4289 N0.getOperand(0).getOperand(0), 4290 N0.getOperand(1)); 4291 } 4292 4293 return DAG.getSetCC(dl, VT, Val, N1, 4294 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4295 } 4296 } else if (N1C->isOne()) { 4297 SDValue Op0 = N0; 4298 if (Op0.getOpcode() == ISD::TRUNCATE) 4299 Op0 = Op0.getOperand(0); 4300 4301 if ((Op0.getOpcode() == ISD::XOR) && 4302 Op0.getOperand(0).getOpcode() == ISD::SETCC && 4303 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 4304 SDValue XorLHS = Op0.getOperand(0); 4305 SDValue XorRHS = Op0.getOperand(1); 4306 // Ensure that the input setccs return an i1 type or 0/1 value. 4307 if (Op0.getValueType() == MVT::i1 || 4308 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 4309 ZeroOrOneBooleanContent && 4310 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 4311 ZeroOrOneBooleanContent)) { 4312 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 4313 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 4314 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 4315 } 4316 } 4317 if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) { 4318 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 4319 if (Op0.getValueType().bitsGT(VT)) 4320 Op0 = DAG.getNode(ISD::AND, dl, VT, 4321 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 4322 DAG.getConstant(1, dl, VT)); 4323 else if (Op0.getValueType().bitsLT(VT)) 4324 Op0 = DAG.getNode(ISD::AND, dl, VT, 4325 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 4326 DAG.getConstant(1, dl, VT)); 4327 4328 return DAG.getSetCC(dl, VT, Op0, 4329 DAG.getConstant(0, dl, Op0.getValueType()), 4330 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4331 } 4332 if (Op0.getOpcode() == ISD::AssertZext && 4333 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 4334 return DAG.getSetCC(dl, VT, Op0, 4335 DAG.getConstant(0, dl, Op0.getValueType()), 4336 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4337 } 4338 } 4339 4340 // Given: 4341 // icmp eq/ne (urem %x, %y), 0 4342 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 4343 // icmp eq/ne %x, 0 4344 if (N0.getOpcode() == ISD::UREM && N1C->isZero() && 4345 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4346 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 4347 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 4348 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 4349 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 4350 } 4351 4352 // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0 4353 // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0 4354 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4355 N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) && 4356 N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 && 4357 N1C && N1C->isAllOnes()) { 4358 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4359 DAG.getConstant(0, dl, OpVT), 4360 Cond == ISD::SETEQ ? ISD::SETLT : ISD::SETGE); 4361 } 4362 4363 if (SDValue V = 4364 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 4365 return V; 4366 } 4367 4368 // These simplifications apply to splat vectors as well. 4369 // TODO: Handle more splat vector cases. 4370 if (auto *N1C = isConstOrConstSplat(N1)) { 4371 const APInt &C1 = N1C->getAPIntValue(); 4372 4373 APInt MinVal, MaxVal; 4374 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 4375 if (ISD::isSignedIntSetCC(Cond)) { 4376 MinVal = APInt::getSignedMinValue(OperandBitSize); 4377 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 4378 } else { 4379 MinVal = APInt::getMinValue(OperandBitSize); 4380 MaxVal = APInt::getMaxValue(OperandBitSize); 4381 } 4382 4383 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 4384 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 4385 // X >= MIN --> true 4386 if (C1 == MinVal) 4387 return DAG.getBoolConstant(true, dl, VT, OpVT); 4388 4389 if (!VT.isVector()) { // TODO: Support this for vectors. 4390 // X >= C0 --> X > (C0 - 1) 4391 APInt C = C1 - 1; 4392 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 4393 if ((DCI.isBeforeLegalizeOps() || 4394 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4395 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4396 isLegalICmpImmediate(C.getSExtValue())))) { 4397 return DAG.getSetCC(dl, VT, N0, 4398 DAG.getConstant(C, dl, N1.getValueType()), 4399 NewCC); 4400 } 4401 } 4402 } 4403 4404 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 4405 // X <= MAX --> true 4406 if (C1 == MaxVal) 4407 return DAG.getBoolConstant(true, dl, VT, OpVT); 4408 4409 // X <= C0 --> X < (C0 + 1) 4410 if (!VT.isVector()) { // TODO: Support this for vectors. 4411 APInt C = C1 + 1; 4412 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 4413 if ((DCI.isBeforeLegalizeOps() || 4414 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4415 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4416 isLegalICmpImmediate(C.getSExtValue())))) { 4417 return DAG.getSetCC(dl, VT, N0, 4418 DAG.getConstant(C, dl, N1.getValueType()), 4419 NewCC); 4420 } 4421 } 4422 } 4423 4424 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 4425 if (C1 == MinVal) 4426 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 4427 4428 // TODO: Support this for vectors after legalize ops. 4429 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4430 // Canonicalize setlt X, Max --> setne X, Max 4431 if (C1 == MaxVal) 4432 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4433 4434 // If we have setult X, 1, turn it into seteq X, 0 4435 if (C1 == MinVal+1) 4436 return DAG.getSetCC(dl, VT, N0, 4437 DAG.getConstant(MinVal, dl, N0.getValueType()), 4438 ISD::SETEQ); 4439 } 4440 } 4441 4442 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 4443 if (C1 == MaxVal) 4444 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 4445 4446 // TODO: Support this for vectors after legalize ops. 4447 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4448 // Canonicalize setgt X, Min --> setne X, Min 4449 if (C1 == MinVal) 4450 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4451 4452 // If we have setugt X, Max-1, turn it into seteq X, Max 4453 if (C1 == MaxVal-1) 4454 return DAG.getSetCC(dl, VT, N0, 4455 DAG.getConstant(MaxVal, dl, N0.getValueType()), 4456 ISD::SETEQ); 4457 } 4458 } 4459 4460 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 4461 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4462 if (C1.isZero()) 4463 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 4464 VT, N0, N1, Cond, DCI, dl)) 4465 return CC; 4466 4467 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 4468 // For example, when high 32-bits of i64 X are known clear: 4469 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 4470 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 4471 bool CmpZero = N1C->getAPIntValue().isZero(); 4472 bool CmpNegOne = N1C->getAPIntValue().isAllOnes(); 4473 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 4474 // Match or(lo,shl(hi,bw/2)) pattern. 4475 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 4476 unsigned EltBits = V.getScalarValueSizeInBits(); 4477 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 4478 return false; 4479 SDValue LHS = V.getOperand(0); 4480 SDValue RHS = V.getOperand(1); 4481 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 4482 // Unshifted element must have zero upperbits. 4483 if (RHS.getOpcode() == ISD::SHL && 4484 isa<ConstantSDNode>(RHS.getOperand(1)) && 4485 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4486 DAG.MaskedValueIsZero(LHS, HiBits)) { 4487 Lo = LHS; 4488 Hi = RHS.getOperand(0); 4489 return true; 4490 } 4491 if (LHS.getOpcode() == ISD::SHL && 4492 isa<ConstantSDNode>(LHS.getOperand(1)) && 4493 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4494 DAG.MaskedValueIsZero(RHS, HiBits)) { 4495 Lo = RHS; 4496 Hi = LHS.getOperand(0); 4497 return true; 4498 } 4499 return false; 4500 }; 4501 4502 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4503 unsigned EltBits = N0.getScalarValueSizeInBits(); 4504 unsigned HalfBits = EltBits / 2; 4505 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4506 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4507 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4508 SDValue NewN0 = 4509 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4510 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4511 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4512 }; 4513 4514 SDValue Lo, Hi; 4515 if (IsConcat(N0, Lo, Hi)) 4516 return MergeConcat(Lo, Hi); 4517 4518 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4519 SDValue Lo0, Lo1, Hi0, Hi1; 4520 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4521 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4522 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4523 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4524 } 4525 } 4526 } 4527 } 4528 4529 // If we have "setcc X, C0", check to see if we can shrink the immediate 4530 // by changing cc. 4531 // TODO: Support this for vectors after legalize ops. 4532 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4533 // SETUGT X, SINTMAX -> SETLT X, 0 4534 // SETUGE X, SINTMIN -> SETLT X, 0 4535 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4536 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4537 return DAG.getSetCC(dl, VT, N0, 4538 DAG.getConstant(0, dl, N1.getValueType()), 4539 ISD::SETLT); 4540 4541 // SETULT X, SINTMIN -> SETGT X, -1 4542 // SETULE X, SINTMAX -> SETGT X, -1 4543 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4544 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4545 return DAG.getSetCC(dl, VT, N0, 4546 DAG.getAllOnesConstant(dl, N1.getValueType()), 4547 ISD::SETGT); 4548 } 4549 } 4550 4551 // Back to non-vector simplifications. 4552 // TODO: Can we do these for vector splats? 4553 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4554 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4555 const APInt &C1 = N1C->getAPIntValue(); 4556 EVT ShValTy = N0.getValueType(); 4557 4558 // Fold bit comparisons when we can. This will result in an 4559 // incorrect value when boolean false is negative one, unless 4560 // the bitsize is 1 in which case the false value is the same 4561 // in practice regardless of the representation. 4562 if ((VT.getSizeInBits() == 1 || 4563 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4564 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4565 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4566 N0.getOpcode() == ISD::AND) { 4567 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4568 EVT ShiftTy = 4569 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4570 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4571 // Perform the xform if the AND RHS is a single bit. 4572 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4573 if (AndRHS->getAPIntValue().isPowerOf2() && 4574 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4575 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4576 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4577 DAG.getConstant(ShCt, dl, ShiftTy))); 4578 } 4579 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4580 // (X & 8) == 8 --> (X & 8) >> 3 4581 // Perform the xform if C1 is a single bit. 4582 unsigned ShCt = C1.logBase2(); 4583 if (C1.isPowerOf2() && 4584 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4585 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4586 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4587 DAG.getConstant(ShCt, dl, ShiftTy))); 4588 } 4589 } 4590 } 4591 } 4592 4593 if (C1.getMinSignedBits() <= 64 && 4594 !isLegalICmpImmediate(C1.getSExtValue())) { 4595 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4596 // (X & -256) == 256 -> (X >> 8) == 1 4597 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4598 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4599 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4600 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4601 if (AndRHSC.isNegatedPowerOf2() && (AndRHSC & C1) == C1) { 4602 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4603 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4604 SDValue Shift = 4605 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4606 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4607 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4608 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4609 } 4610 } 4611 } 4612 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4613 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4614 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4615 // X < 0x100000000 -> (X >> 32) < 1 4616 // X >= 0x100000000 -> (X >> 32) >= 1 4617 // X <= 0x0ffffffff -> (X >> 32) < 1 4618 // X > 0x0ffffffff -> (X >> 32) >= 1 4619 unsigned ShiftBits; 4620 APInt NewC = C1; 4621 ISD::CondCode NewCond = Cond; 4622 if (AdjOne) { 4623 ShiftBits = C1.countTrailingOnes(); 4624 NewC = NewC + 1; 4625 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4626 } else { 4627 ShiftBits = C1.countTrailingZeros(); 4628 } 4629 NewC.lshrInPlace(ShiftBits); 4630 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4631 isLegalICmpImmediate(NewC.getSExtValue()) && 4632 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4633 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4634 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4635 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4636 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4637 } 4638 } 4639 } 4640 } 4641 4642 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4643 auto *CFP = cast<ConstantFPSDNode>(N1); 4644 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4645 4646 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4647 // constant if knowing that the operand is non-nan is enough. We prefer to 4648 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4649 // materialize 0.0. 4650 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4651 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4652 4653 // setcc (fneg x), C -> setcc swap(pred) x, -C 4654 if (N0.getOpcode() == ISD::FNEG) { 4655 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4656 if (DCI.isBeforeLegalizeOps() || 4657 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4658 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4659 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4660 } 4661 } 4662 4663 // If the condition is not legal, see if we can find an equivalent one 4664 // which is legal. 4665 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4666 // If the comparison was an awkward floating-point == or != and one of 4667 // the comparison operands is infinity or negative infinity, convert the 4668 // condition to a less-awkward <= or >=. 4669 if (CFP->getValueAPF().isInfinity()) { 4670 bool IsNegInf = CFP->getValueAPF().isNegative(); 4671 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4672 switch (Cond) { 4673 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4674 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4675 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4676 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4677 default: break; 4678 } 4679 if (NewCond != ISD::SETCC_INVALID && 4680 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4681 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4682 } 4683 } 4684 } 4685 4686 if (N0 == N1) { 4687 // The sext(setcc()) => setcc() optimization relies on the appropriate 4688 // constant being emitted. 4689 assert(!N0.getValueType().isInteger() && 4690 "Integer types should be handled by FoldSetCC"); 4691 4692 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4693 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4694 if (UOF == 2) // FP operators that are undefined on NaNs. 4695 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4696 if (UOF == unsigned(EqTrue)) 4697 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4698 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4699 // if it is not already. 4700 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4701 if (NewCond != Cond && 4702 (DCI.isBeforeLegalizeOps() || 4703 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4704 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4705 } 4706 4707 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4708 N0.getValueType().isInteger()) { 4709 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4710 N0.getOpcode() == ISD::XOR) { 4711 // Simplify (X+Y) == (X+Z) --> Y == Z 4712 if (N0.getOpcode() == N1.getOpcode()) { 4713 if (N0.getOperand(0) == N1.getOperand(0)) 4714 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4715 if (N0.getOperand(1) == N1.getOperand(1)) 4716 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4717 if (isCommutativeBinOp(N0.getOpcode())) { 4718 // If X op Y == Y op X, try other combinations. 4719 if (N0.getOperand(0) == N1.getOperand(1)) 4720 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4721 Cond); 4722 if (N0.getOperand(1) == N1.getOperand(0)) 4723 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4724 Cond); 4725 } 4726 } 4727 4728 // If RHS is a legal immediate value for a compare instruction, we need 4729 // to be careful about increasing register pressure needlessly. 4730 bool LegalRHSImm = false; 4731 4732 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4733 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4734 // Turn (X+C1) == C2 --> X == C2-C1 4735 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) 4736 return DAG.getSetCC( 4737 dl, VT, N0.getOperand(0), 4738 DAG.getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(), 4739 dl, N0.getValueType()), 4740 Cond); 4741 4742 // Turn (X^C1) == C2 --> X == C1^C2 4743 if (N0.getOpcode() == ISD::XOR && N0.getNode()->hasOneUse()) 4744 return DAG.getSetCC( 4745 dl, VT, N0.getOperand(0), 4746 DAG.getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(), 4747 dl, N0.getValueType()), 4748 Cond); 4749 } 4750 4751 // Turn (C1-X) == C2 --> X == C1-C2 4752 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 4753 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) 4754 return DAG.getSetCC( 4755 dl, VT, N0.getOperand(1), 4756 DAG.getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(), 4757 dl, N0.getValueType()), 4758 Cond); 4759 4760 // Could RHSC fold directly into a compare? 4761 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4762 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4763 } 4764 4765 // (X+Y) == X --> Y == 0 and similar folds. 4766 // Don't do this if X is an immediate that can fold into a cmp 4767 // instruction and X+Y has other uses. It could be an induction variable 4768 // chain, and the transform would increase register pressure. 4769 if (!LegalRHSImm || N0.hasOneUse()) 4770 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4771 return V; 4772 } 4773 4774 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4775 N1.getOpcode() == ISD::XOR) 4776 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4777 return V; 4778 4779 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4780 return V; 4781 } 4782 4783 // Fold remainder of division by a constant. 4784 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4785 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4786 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4787 4788 // When division is cheap or optimizing for minimum size, 4789 // fall through to DIVREM creation by skipping this fold. 4790 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) { 4791 if (N0.getOpcode() == ISD::UREM) { 4792 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4793 return Folded; 4794 } else if (N0.getOpcode() == ISD::SREM) { 4795 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4796 return Folded; 4797 } 4798 } 4799 } 4800 4801 // Fold away ALL boolean setcc's. 4802 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4803 SDValue Temp; 4804 switch (Cond) { 4805 default: llvm_unreachable("Unknown integer setcc!"); 4806 case ISD::SETEQ: // X == Y -> ~(X^Y) 4807 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4808 N0 = DAG.getNOT(dl, Temp, OpVT); 4809 if (!DCI.isCalledByLegalizer()) 4810 DCI.AddToWorklist(Temp.getNode()); 4811 break; 4812 case ISD::SETNE: // X != Y --> (X^Y) 4813 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4814 break; 4815 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4816 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4817 Temp = DAG.getNOT(dl, N0, OpVT); 4818 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4819 if (!DCI.isCalledByLegalizer()) 4820 DCI.AddToWorklist(Temp.getNode()); 4821 break; 4822 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4823 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4824 Temp = DAG.getNOT(dl, N1, OpVT); 4825 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4826 if (!DCI.isCalledByLegalizer()) 4827 DCI.AddToWorklist(Temp.getNode()); 4828 break; 4829 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4830 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4831 Temp = DAG.getNOT(dl, N0, OpVT); 4832 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4833 if (!DCI.isCalledByLegalizer()) 4834 DCI.AddToWorklist(Temp.getNode()); 4835 break; 4836 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4837 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4838 Temp = DAG.getNOT(dl, N1, OpVT); 4839 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4840 break; 4841 } 4842 if (VT.getScalarType() != MVT::i1) { 4843 if (!DCI.isCalledByLegalizer()) 4844 DCI.AddToWorklist(N0.getNode()); 4845 // FIXME: If running after legalize, we probably can't do this. 4846 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4847 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4848 } 4849 return N0; 4850 } 4851 4852 // Could not fold it. 4853 return SDValue(); 4854 } 4855 4856 /// Returns true (and the GlobalValue and the offset) if the node is a 4857 /// GlobalAddress + offset. 4858 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4859 int64_t &Offset) const { 4860 4861 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4862 4863 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4864 GA = GASD->getGlobal(); 4865 Offset += GASD->getOffset(); 4866 return true; 4867 } 4868 4869 if (N->getOpcode() == ISD::ADD) { 4870 SDValue N1 = N->getOperand(0); 4871 SDValue N2 = N->getOperand(1); 4872 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4873 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4874 Offset += V->getSExtValue(); 4875 return true; 4876 } 4877 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4878 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4879 Offset += V->getSExtValue(); 4880 return true; 4881 } 4882 } 4883 } 4884 4885 return false; 4886 } 4887 4888 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4889 DAGCombinerInfo &DCI) const { 4890 // Default implementation: no optimization. 4891 return SDValue(); 4892 } 4893 4894 //===----------------------------------------------------------------------===// 4895 // Inline Assembler Implementation Methods 4896 //===----------------------------------------------------------------------===// 4897 4898 TargetLowering::ConstraintType 4899 TargetLowering::getConstraintType(StringRef Constraint) const { 4900 unsigned S = Constraint.size(); 4901 4902 if (S == 1) { 4903 switch (Constraint[0]) { 4904 default: break; 4905 case 'r': 4906 return C_RegisterClass; 4907 case 'm': // memory 4908 case 'o': // offsetable 4909 case 'V': // not offsetable 4910 return C_Memory; 4911 case 'p': // Address. 4912 return C_Address; 4913 case 'n': // Simple Integer 4914 case 'E': // Floating Point Constant 4915 case 'F': // Floating Point Constant 4916 return C_Immediate; 4917 case 'i': // Simple Integer or Relocatable Constant 4918 case 's': // Relocatable Constant 4919 case 'X': // Allow ANY value. 4920 case 'I': // Target registers. 4921 case 'J': 4922 case 'K': 4923 case 'L': 4924 case 'M': 4925 case 'N': 4926 case 'O': 4927 case 'P': 4928 case '<': 4929 case '>': 4930 return C_Other; 4931 } 4932 } 4933 4934 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4935 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4936 return C_Memory; 4937 return C_Register; 4938 } 4939 return C_Unknown; 4940 } 4941 4942 /// Try to replace an X constraint, which matches anything, with another that 4943 /// has more specific requirements based on the type of the corresponding 4944 /// operand. 4945 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4946 if (ConstraintVT.isInteger()) 4947 return "r"; 4948 if (ConstraintVT.isFloatingPoint()) 4949 return "f"; // works for many targets 4950 return nullptr; 4951 } 4952 4953 SDValue TargetLowering::LowerAsmOutputForConstraint( 4954 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 4955 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 4956 return SDValue(); 4957 } 4958 4959 /// Lower the specified operand into the Ops vector. 4960 /// If it is invalid, don't add anything to Ops. 4961 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4962 std::string &Constraint, 4963 std::vector<SDValue> &Ops, 4964 SelectionDAG &DAG) const { 4965 4966 if (Constraint.length() > 1) return; 4967 4968 char ConstraintLetter = Constraint[0]; 4969 switch (ConstraintLetter) { 4970 default: break; 4971 case 'X': // Allows any operand 4972 case 'i': // Simple Integer or Relocatable Constant 4973 case 'n': // Simple Integer 4974 case 's': { // Relocatable Constant 4975 4976 ConstantSDNode *C; 4977 uint64_t Offset = 0; 4978 4979 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4980 // etc., since getelementpointer is variadic. We can't use 4981 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4982 // while in this case the GA may be furthest from the root node which is 4983 // likely an ISD::ADD. 4984 while (true) { 4985 if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') { 4986 // gcc prints these as sign extended. Sign extend value to 64 bits 4987 // now; without this it would get ZExt'd later in 4988 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4989 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4990 BooleanContent BCont = getBooleanContents(MVT::i64); 4991 ISD::NodeType ExtOpc = 4992 IsBool ? getExtendForContent(BCont) : ISD::SIGN_EXTEND; 4993 int64_t ExtVal = 4994 ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() : C->getSExtValue(); 4995 Ops.push_back( 4996 DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64)); 4997 return; 4998 } 4999 if (ConstraintLetter != 'n') { 5000 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5001 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 5002 GA->getValueType(0), 5003 Offset + GA->getOffset())); 5004 return; 5005 } 5006 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 5007 Ops.push_back(DAG.getTargetBlockAddress( 5008 BA->getBlockAddress(), BA->getValueType(0), 5009 Offset + BA->getOffset(), BA->getTargetFlags())); 5010 return; 5011 } 5012 if (isa<BasicBlockSDNode>(Op)) { 5013 Ops.push_back(Op); 5014 return; 5015 } 5016 } 5017 const unsigned OpCode = Op.getOpcode(); 5018 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 5019 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 5020 Op = Op.getOperand(1); 5021 // Subtraction is not commutative. 5022 else if (OpCode == ISD::ADD && 5023 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 5024 Op = Op.getOperand(0); 5025 else 5026 return; 5027 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 5028 continue; 5029 } 5030 return; 5031 } 5032 break; 5033 } 5034 } 5035 } 5036 5037 std::pair<unsigned, const TargetRegisterClass *> 5038 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 5039 StringRef Constraint, 5040 MVT VT) const { 5041 if (Constraint.empty() || Constraint[0] != '{') 5042 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 5043 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 5044 5045 // Remove the braces from around the name. 5046 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 5047 5048 std::pair<unsigned, const TargetRegisterClass *> R = 5049 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 5050 5051 // Figure out which register class contains this reg. 5052 for (const TargetRegisterClass *RC : RI->regclasses()) { 5053 // If none of the value types for this register class are valid, we 5054 // can't use it. For example, 64-bit reg classes on 32-bit targets. 5055 if (!isLegalRC(*RI, *RC)) 5056 continue; 5057 5058 for (const MCPhysReg &PR : *RC) { 5059 if (RegName.equals_insensitive(RI->getRegAsmName(PR))) { 5060 std::pair<unsigned, const TargetRegisterClass *> S = 5061 std::make_pair(PR, RC); 5062 5063 // If this register class has the requested value type, return it, 5064 // otherwise keep searching and return the first class found 5065 // if no other is found which explicitly has the requested type. 5066 if (RI->isTypeLegalForClass(*RC, VT)) 5067 return S; 5068 if (!R.second) 5069 R = S; 5070 } 5071 } 5072 } 5073 5074 return R; 5075 } 5076 5077 //===----------------------------------------------------------------------===// 5078 // Constraint Selection. 5079 5080 /// Return true of this is an input operand that is a matching constraint like 5081 /// "4". 5082 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 5083 assert(!ConstraintCode.empty() && "No known constraint!"); 5084 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 5085 } 5086 5087 /// If this is an input matching constraint, this method returns the output 5088 /// operand it matches. 5089 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 5090 assert(!ConstraintCode.empty() && "No known constraint!"); 5091 return atoi(ConstraintCode.c_str()); 5092 } 5093 5094 /// Split up the constraint string from the inline assembly value into the 5095 /// specific constraints and their prefixes, and also tie in the associated 5096 /// operand values. 5097 /// If this returns an empty vector, and if the constraint string itself 5098 /// isn't empty, there was an error parsing. 5099 TargetLowering::AsmOperandInfoVector 5100 TargetLowering::ParseConstraints(const DataLayout &DL, 5101 const TargetRegisterInfo *TRI, 5102 const CallBase &Call) const { 5103 /// Information about all of the constraints. 5104 AsmOperandInfoVector ConstraintOperands; 5105 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 5106 unsigned maCount = 0; // Largest number of multiple alternative constraints. 5107 5108 // Do a prepass over the constraints, canonicalizing them, and building up the 5109 // ConstraintOperands list. 5110 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 5111 unsigned ResNo = 0; // ResNo - The result number of the next output. 5112 5113 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 5114 ConstraintOperands.emplace_back(std::move(CI)); 5115 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 5116 5117 // Update multiple alternative constraint count. 5118 if (OpInfo.multipleAlternatives.size() > maCount) 5119 maCount = OpInfo.multipleAlternatives.size(); 5120 5121 OpInfo.ConstraintVT = MVT::Other; 5122 5123 // Compute the value type for each operand. 5124 switch (OpInfo.Type) { 5125 case InlineAsm::isOutput: 5126 // Indirect outputs just consume an argument. 5127 if (OpInfo.isIndirect) { 5128 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5129 break; 5130 } 5131 5132 // The return value of the call is this value. As such, there is no 5133 // corresponding argument. 5134 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 5135 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 5136 OpInfo.ConstraintVT = 5137 getSimpleValueType(DL, STy->getElementType(ResNo)); 5138 } else { 5139 assert(ResNo == 0 && "Asm only has one result!"); 5140 OpInfo.ConstraintVT = 5141 getAsmOperandValueType(DL, Call.getType()).getSimpleVT(); 5142 } 5143 ++ResNo; 5144 break; 5145 case InlineAsm::isInput: 5146 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5147 break; 5148 case InlineAsm::isClobber: 5149 // Nothing to do. 5150 break; 5151 } 5152 5153 if (OpInfo.CallOperandVal) { 5154 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 5155 if (OpInfo.isIndirect) { 5156 OpTy = Call.getParamElementType(ArgNo); 5157 assert(OpTy && "Indirect operand must have elementtype attribute"); 5158 } 5159 5160 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 5161 if (StructType *STy = dyn_cast<StructType>(OpTy)) 5162 if (STy->getNumElements() == 1) 5163 OpTy = STy->getElementType(0); 5164 5165 // If OpTy is not a single value, it may be a struct/union that we 5166 // can tile with integers. 5167 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 5168 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 5169 switch (BitSize) { 5170 default: break; 5171 case 1: 5172 case 8: 5173 case 16: 5174 case 32: 5175 case 64: 5176 case 128: 5177 OpInfo.ConstraintVT = 5178 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 5179 break; 5180 } 5181 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 5182 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 5183 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 5184 } else { 5185 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 5186 } 5187 5188 ArgNo++; 5189 } 5190 } 5191 5192 // If we have multiple alternative constraints, select the best alternative. 5193 if (!ConstraintOperands.empty()) { 5194 if (maCount) { 5195 unsigned bestMAIndex = 0; 5196 int bestWeight = -1; 5197 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 5198 int weight = -1; 5199 unsigned maIndex; 5200 // Compute the sums of the weights for each alternative, keeping track 5201 // of the best (highest weight) one so far. 5202 for (maIndex = 0; maIndex < maCount; ++maIndex) { 5203 int weightSum = 0; 5204 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5205 cIndex != eIndex; ++cIndex) { 5206 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5207 if (OpInfo.Type == InlineAsm::isClobber) 5208 continue; 5209 5210 // If this is an output operand with a matching input operand, 5211 // look up the matching input. If their types mismatch, e.g. one 5212 // is an integer, the other is floating point, or their sizes are 5213 // different, flag it as an maCantMatch. 5214 if (OpInfo.hasMatchingInput()) { 5215 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5216 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5217 if ((OpInfo.ConstraintVT.isInteger() != 5218 Input.ConstraintVT.isInteger()) || 5219 (OpInfo.ConstraintVT.getSizeInBits() != 5220 Input.ConstraintVT.getSizeInBits())) { 5221 weightSum = -1; // Can't match. 5222 break; 5223 } 5224 } 5225 } 5226 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 5227 if (weight == -1) { 5228 weightSum = -1; 5229 break; 5230 } 5231 weightSum += weight; 5232 } 5233 // Update best. 5234 if (weightSum > bestWeight) { 5235 bestWeight = weightSum; 5236 bestMAIndex = maIndex; 5237 } 5238 } 5239 5240 // Now select chosen alternative in each constraint. 5241 for (AsmOperandInfo &cInfo : ConstraintOperands) 5242 if (cInfo.Type != InlineAsm::isClobber) 5243 cInfo.selectAlternative(bestMAIndex); 5244 } 5245 } 5246 5247 // Check and hook up tied operands, choose constraint code to use. 5248 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5249 cIndex != eIndex; ++cIndex) { 5250 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5251 5252 // If this is an output operand with a matching input operand, look up the 5253 // matching input. If their types mismatch, e.g. one is an integer, the 5254 // other is floating point, or their sizes are different, flag it as an 5255 // error. 5256 if (OpInfo.hasMatchingInput()) { 5257 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5258 5259 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5260 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 5261 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 5262 OpInfo.ConstraintVT); 5263 std::pair<unsigned, const TargetRegisterClass *> InputRC = 5264 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 5265 Input.ConstraintVT); 5266 if ((OpInfo.ConstraintVT.isInteger() != 5267 Input.ConstraintVT.isInteger()) || 5268 (MatchRC.second != InputRC.second)) { 5269 report_fatal_error("Unsupported asm: input constraint" 5270 " with a matching output constraint of" 5271 " incompatible type!"); 5272 } 5273 } 5274 } 5275 } 5276 5277 return ConstraintOperands; 5278 } 5279 5280 /// Return an integer indicating how general CT is. 5281 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 5282 switch (CT) { 5283 case TargetLowering::C_Immediate: 5284 case TargetLowering::C_Other: 5285 case TargetLowering::C_Unknown: 5286 return 0; 5287 case TargetLowering::C_Register: 5288 return 1; 5289 case TargetLowering::C_RegisterClass: 5290 return 2; 5291 case TargetLowering::C_Memory: 5292 case TargetLowering::C_Address: 5293 return 3; 5294 } 5295 llvm_unreachable("Invalid constraint type"); 5296 } 5297 5298 /// Examine constraint type and operand type and determine a weight value. 5299 /// This object must already have been set up with the operand type 5300 /// and the current alternative constraint selected. 5301 TargetLowering::ConstraintWeight 5302 TargetLowering::getMultipleConstraintMatchWeight( 5303 AsmOperandInfo &info, int maIndex) const { 5304 InlineAsm::ConstraintCodeVector *rCodes; 5305 if (maIndex >= (int)info.multipleAlternatives.size()) 5306 rCodes = &info.Codes; 5307 else 5308 rCodes = &info.multipleAlternatives[maIndex].Codes; 5309 ConstraintWeight BestWeight = CW_Invalid; 5310 5311 // Loop over the options, keeping track of the most general one. 5312 for (const std::string &rCode : *rCodes) { 5313 ConstraintWeight weight = 5314 getSingleConstraintMatchWeight(info, rCode.c_str()); 5315 if (weight > BestWeight) 5316 BestWeight = weight; 5317 } 5318 5319 return BestWeight; 5320 } 5321 5322 /// Examine constraint type and operand type and determine a weight value. 5323 /// This object must already have been set up with the operand type 5324 /// and the current alternative constraint selected. 5325 TargetLowering::ConstraintWeight 5326 TargetLowering::getSingleConstraintMatchWeight( 5327 AsmOperandInfo &info, const char *constraint) const { 5328 ConstraintWeight weight = CW_Invalid; 5329 Value *CallOperandVal = info.CallOperandVal; 5330 // If we don't have a value, we can't do a match, 5331 // but allow it at the lowest weight. 5332 if (!CallOperandVal) 5333 return CW_Default; 5334 // Look at the constraint type. 5335 switch (*constraint) { 5336 case 'i': // immediate integer. 5337 case 'n': // immediate integer with a known value. 5338 if (isa<ConstantInt>(CallOperandVal)) 5339 weight = CW_Constant; 5340 break; 5341 case 's': // non-explicit intregal immediate. 5342 if (isa<GlobalValue>(CallOperandVal)) 5343 weight = CW_Constant; 5344 break; 5345 case 'E': // immediate float if host format. 5346 case 'F': // immediate float. 5347 if (isa<ConstantFP>(CallOperandVal)) 5348 weight = CW_Constant; 5349 break; 5350 case '<': // memory operand with autodecrement. 5351 case '>': // memory operand with autoincrement. 5352 case 'm': // memory operand. 5353 case 'o': // offsettable memory operand 5354 case 'V': // non-offsettable memory operand 5355 weight = CW_Memory; 5356 break; 5357 case 'r': // general register. 5358 case 'g': // general register, memory operand or immediate integer. 5359 // note: Clang converts "g" to "imr". 5360 if (CallOperandVal->getType()->isIntegerTy()) 5361 weight = CW_Register; 5362 break; 5363 case 'X': // any operand. 5364 default: 5365 weight = CW_Default; 5366 break; 5367 } 5368 return weight; 5369 } 5370 5371 /// If there are multiple different constraints that we could pick for this 5372 /// operand (e.g. "imr") try to pick the 'best' one. 5373 /// This is somewhat tricky: constraints fall into four classes: 5374 /// Other -> immediates and magic values 5375 /// Register -> one specific register 5376 /// RegisterClass -> a group of regs 5377 /// Memory -> memory 5378 /// Ideally, we would pick the most specific constraint possible: if we have 5379 /// something that fits into a register, we would pick it. The problem here 5380 /// is that if we have something that could either be in a register or in 5381 /// memory that use of the register could cause selection of *other* 5382 /// operands to fail: they might only succeed if we pick memory. Because of 5383 /// this the heuristic we use is: 5384 /// 5385 /// 1) If there is an 'other' constraint, and if the operand is valid for 5386 /// that constraint, use it. This makes us take advantage of 'i' 5387 /// constraints when available. 5388 /// 2) Otherwise, pick the most general constraint present. This prefers 5389 /// 'm' over 'r', for example. 5390 /// 5391 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 5392 const TargetLowering &TLI, 5393 SDValue Op, SelectionDAG *DAG) { 5394 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 5395 unsigned BestIdx = 0; 5396 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 5397 int BestGenerality = -1; 5398 5399 // Loop over the options, keeping track of the most general one. 5400 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 5401 TargetLowering::ConstraintType CType = 5402 TLI.getConstraintType(OpInfo.Codes[i]); 5403 5404 // Indirect 'other' or 'immediate' constraints are not allowed. 5405 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 5406 CType == TargetLowering::C_Register || 5407 CType == TargetLowering::C_RegisterClass)) 5408 continue; 5409 5410 // If this is an 'other' or 'immediate' constraint, see if the operand is 5411 // valid for it. For example, on X86 we might have an 'rI' constraint. If 5412 // the operand is an integer in the range [0..31] we want to use I (saving a 5413 // load of a register), otherwise we must use 'r'. 5414 if ((CType == TargetLowering::C_Other || 5415 CType == TargetLowering::C_Immediate) && Op.getNode()) { 5416 assert(OpInfo.Codes[i].size() == 1 && 5417 "Unhandled multi-letter 'other' constraint"); 5418 std::vector<SDValue> ResultOps; 5419 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 5420 ResultOps, *DAG); 5421 if (!ResultOps.empty()) { 5422 BestType = CType; 5423 BestIdx = i; 5424 break; 5425 } 5426 } 5427 5428 // Things with matching constraints can only be registers, per gcc 5429 // documentation. This mainly affects "g" constraints. 5430 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 5431 continue; 5432 5433 // This constraint letter is more general than the previous one, use it. 5434 int Generality = getConstraintGenerality(CType); 5435 if (Generality > BestGenerality) { 5436 BestType = CType; 5437 BestIdx = i; 5438 BestGenerality = Generality; 5439 } 5440 } 5441 5442 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 5443 OpInfo.ConstraintType = BestType; 5444 } 5445 5446 /// Determines the constraint code and constraint type to use for the specific 5447 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5448 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5449 SDValue Op, 5450 SelectionDAG *DAG) const { 5451 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 5452 5453 // Single-letter constraints ('r') are very common. 5454 if (OpInfo.Codes.size() == 1) { 5455 OpInfo.ConstraintCode = OpInfo.Codes[0]; 5456 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5457 } else { 5458 ChooseConstraint(OpInfo, *this, Op, DAG); 5459 } 5460 5461 // 'X' matches anything. 5462 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 5463 // Constants are handled elsewhere. For Functions, the type here is the 5464 // type of the result, which is not what we want to look at; leave them 5465 // alone. 5466 Value *v = OpInfo.CallOperandVal; 5467 if (isa<ConstantInt>(v) || isa<Function>(v)) { 5468 return; 5469 } 5470 5471 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) { 5472 OpInfo.ConstraintCode = "i"; 5473 return; 5474 } 5475 5476 // Otherwise, try to resolve it to something we know about by looking at 5477 // the actual operand type. 5478 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5479 OpInfo.ConstraintCode = Repl; 5480 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5481 } 5482 } 5483 } 5484 5485 /// Given an exact SDIV by a constant, create a multiplication 5486 /// with the multiplicative inverse of the constant. 5487 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5488 const SDLoc &dl, SelectionDAG &DAG, 5489 SmallVectorImpl<SDNode *> &Created) { 5490 SDValue Op0 = N->getOperand(0); 5491 SDValue Op1 = N->getOperand(1); 5492 EVT VT = N->getValueType(0); 5493 EVT SVT = VT.getScalarType(); 5494 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5495 EVT ShSVT = ShVT.getScalarType(); 5496 5497 bool UseSRA = false; 5498 SmallVector<SDValue, 16> Shifts, Factors; 5499 5500 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5501 if (C->isZero()) 5502 return false; 5503 APInt Divisor = C->getAPIntValue(); 5504 unsigned Shift = Divisor.countTrailingZeros(); 5505 if (Shift) { 5506 Divisor.ashrInPlace(Shift); 5507 UseSRA = true; 5508 } 5509 // Calculate the multiplicative inverse, using Newton's method. 5510 APInt t; 5511 APInt Factor = Divisor; 5512 while ((t = Divisor * Factor) != 1) 5513 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5514 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5515 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5516 return true; 5517 }; 5518 5519 // Collect all magic values from the build vector. 5520 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5521 return SDValue(); 5522 5523 SDValue Shift, Factor; 5524 if (Op1.getOpcode() == ISD::BUILD_VECTOR) { 5525 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5526 Factor = DAG.getBuildVector(VT, dl, Factors); 5527 } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) { 5528 assert(Shifts.size() == 1 && Factors.size() == 1 && 5529 "Expected matchUnaryPredicate to return one element for scalable " 5530 "vectors"); 5531 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5532 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5533 } else { 5534 assert(isa<ConstantSDNode>(Op1) && "Expected a constant"); 5535 Shift = Shifts[0]; 5536 Factor = Factors[0]; 5537 } 5538 5539 SDValue Res = Op0; 5540 5541 // Shift the value upfront if it is even, so the LSB is one. 5542 if (UseSRA) { 5543 // TODO: For UDIV use SRL instead of SRA. 5544 SDNodeFlags Flags; 5545 Flags.setExact(true); 5546 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5547 Created.push_back(Res.getNode()); 5548 } 5549 5550 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5551 } 5552 5553 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5554 SelectionDAG &DAG, 5555 SmallVectorImpl<SDNode *> &Created) const { 5556 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5557 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5558 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5559 return SDValue(N, 0); // Lower SDIV as SDIV 5560 return SDValue(); 5561 } 5562 5563 /// Given an ISD::SDIV node expressing a divide by constant, 5564 /// return a DAG expression to select that will generate the same value by 5565 /// multiplying by a magic number. 5566 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5567 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5568 bool IsAfterLegalization, 5569 SmallVectorImpl<SDNode *> &Created) const { 5570 SDLoc dl(N); 5571 EVT VT = N->getValueType(0); 5572 EVT SVT = VT.getScalarType(); 5573 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5574 EVT ShSVT = ShVT.getScalarType(); 5575 unsigned EltBits = VT.getScalarSizeInBits(); 5576 EVT MulVT; 5577 5578 // Check to see if we can do this. 5579 // FIXME: We should be more aggressive here. 5580 if (!isTypeLegal(VT)) { 5581 // Limit this to simple scalars for now. 5582 if (VT.isVector() || !VT.isSimple()) 5583 return SDValue(); 5584 5585 // If this type will be promoted to a large enough type with a legal 5586 // multiply operation, we can go ahead and do this transform. 5587 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5588 return SDValue(); 5589 5590 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5591 if (MulVT.getSizeInBits() < (2 * EltBits) || 5592 !isOperationLegal(ISD::MUL, MulVT)) 5593 return SDValue(); 5594 } 5595 5596 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5597 if (N->getFlags().hasExact()) 5598 return BuildExactSDIV(*this, N, dl, DAG, Created); 5599 5600 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5601 5602 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5603 if (C->isZero()) 5604 return false; 5605 5606 const APInt &Divisor = C->getAPIntValue(); 5607 SignedDivisionByConstantInfo magics = SignedDivisionByConstantInfo::get(Divisor); 5608 int NumeratorFactor = 0; 5609 int ShiftMask = -1; 5610 5611 if (Divisor.isOne() || Divisor.isAllOnes()) { 5612 // If d is +1/-1, we just multiply the numerator by +1/-1. 5613 NumeratorFactor = Divisor.getSExtValue(); 5614 magics.Magic = 0; 5615 magics.ShiftAmount = 0; 5616 ShiftMask = 0; 5617 } else if (Divisor.isStrictlyPositive() && magics.Magic.isNegative()) { 5618 // If d > 0 and m < 0, add the numerator. 5619 NumeratorFactor = 1; 5620 } else if (Divisor.isNegative() && magics.Magic.isStrictlyPositive()) { 5621 // If d < 0 and m > 0, subtract the numerator. 5622 NumeratorFactor = -1; 5623 } 5624 5625 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5626 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5627 Shifts.push_back(DAG.getConstant(magics.ShiftAmount, dl, ShSVT)); 5628 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5629 return true; 5630 }; 5631 5632 SDValue N0 = N->getOperand(0); 5633 SDValue N1 = N->getOperand(1); 5634 5635 // Collect the shifts / magic values from each element. 5636 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5637 return SDValue(); 5638 5639 SDValue MagicFactor, Factor, Shift, ShiftMask; 5640 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5641 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5642 Factor = DAG.getBuildVector(VT, dl, Factors); 5643 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5644 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5645 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5646 assert(MagicFactors.size() == 1 && Factors.size() == 1 && 5647 Shifts.size() == 1 && ShiftMasks.size() == 1 && 5648 "Expected matchUnaryPredicate to return one element for scalable " 5649 "vectors"); 5650 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5651 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5652 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5653 ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]); 5654 } else { 5655 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5656 MagicFactor = MagicFactors[0]; 5657 Factor = Factors[0]; 5658 Shift = Shifts[0]; 5659 ShiftMask = ShiftMasks[0]; 5660 } 5661 5662 // Multiply the numerator (operand 0) by the magic value. 5663 // FIXME: We should support doing a MUL in a wider type. 5664 auto GetMULHS = [&](SDValue X, SDValue Y) { 5665 // If the type isn't legal, use a wider mul of the the type calculated 5666 // earlier. 5667 if (!isTypeLegal(VT)) { 5668 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); 5669 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, Y); 5670 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5671 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5672 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5673 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5674 } 5675 5676 if (isOperationLegalOrCustom(ISD::MULHS, VT, IsAfterLegalization)) 5677 return DAG.getNode(ISD::MULHS, dl, VT, X, Y); 5678 if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT, IsAfterLegalization)) { 5679 SDValue LoHi = 5680 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5681 return SDValue(LoHi.getNode(), 1); 5682 } 5683 return SDValue(); 5684 }; 5685 5686 SDValue Q = GetMULHS(N0, MagicFactor); 5687 if (!Q) 5688 return SDValue(); 5689 5690 Created.push_back(Q.getNode()); 5691 5692 // (Optionally) Add/subtract the numerator using Factor. 5693 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5694 Created.push_back(Factor.getNode()); 5695 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5696 Created.push_back(Q.getNode()); 5697 5698 // Shift right algebraic by shift value. 5699 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5700 Created.push_back(Q.getNode()); 5701 5702 // Extract the sign bit, mask it and add it to the quotient. 5703 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5704 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5705 Created.push_back(T.getNode()); 5706 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5707 Created.push_back(T.getNode()); 5708 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5709 } 5710 5711 /// Given an ISD::UDIV node expressing a divide by constant, 5712 /// return a DAG expression to select that will generate the same value by 5713 /// multiplying by a magic number. 5714 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5715 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5716 bool IsAfterLegalization, 5717 SmallVectorImpl<SDNode *> &Created) const { 5718 SDLoc dl(N); 5719 EVT VT = N->getValueType(0); 5720 EVT SVT = VT.getScalarType(); 5721 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5722 EVT ShSVT = ShVT.getScalarType(); 5723 unsigned EltBits = VT.getScalarSizeInBits(); 5724 EVT MulVT; 5725 5726 // Check to see if we can do this. 5727 // FIXME: We should be more aggressive here. 5728 if (!isTypeLegal(VT)) { 5729 // Limit this to simple scalars for now. 5730 if (VT.isVector() || !VT.isSimple()) 5731 return SDValue(); 5732 5733 // If this type will be promoted to a large enough type with a legal 5734 // multiply operation, we can go ahead and do this transform. 5735 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5736 return SDValue(); 5737 5738 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5739 if (MulVT.getSizeInBits() < (2 * EltBits) || 5740 !isOperationLegal(ISD::MUL, MulVT)) 5741 return SDValue(); 5742 } 5743 5744 bool UseNPQ = false; 5745 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5746 5747 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5748 if (C->isZero()) 5749 return false; 5750 // FIXME: We should use a narrower constant when the upper 5751 // bits are known to be zero. 5752 const APInt& Divisor = C->getAPIntValue(); 5753 UnsignedDivisonByConstantInfo magics = UnsignedDivisonByConstantInfo::get(Divisor); 5754 unsigned PreShift = 0, PostShift = 0; 5755 5756 // If the divisor is even, we can avoid using the expensive fixup by 5757 // shifting the divided value upfront. 5758 if (magics.IsAdd != 0 && !Divisor[0]) { 5759 PreShift = Divisor.countTrailingZeros(); 5760 // Get magic number for the shifted divisor. 5761 magics = UnsignedDivisonByConstantInfo::get(Divisor.lshr(PreShift), PreShift); 5762 assert(magics.IsAdd == 0 && "Should use cheap fixup now"); 5763 } 5764 5765 APInt Magic = magics.Magic; 5766 5767 unsigned SelNPQ; 5768 if (magics.IsAdd == 0 || Divisor.isOne()) { 5769 assert(magics.ShiftAmount < Divisor.getBitWidth() && 5770 "We shouldn't generate an undefined shift!"); 5771 PostShift = magics.ShiftAmount; 5772 SelNPQ = false; 5773 } else { 5774 PostShift = magics.ShiftAmount - 1; 5775 SelNPQ = true; 5776 } 5777 5778 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5779 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5780 NPQFactors.push_back( 5781 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5782 : APInt::getZero(EltBits), 5783 dl, SVT)); 5784 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5785 UseNPQ |= SelNPQ; 5786 return true; 5787 }; 5788 5789 SDValue N0 = N->getOperand(0); 5790 SDValue N1 = N->getOperand(1); 5791 5792 // Collect the shifts/magic values from each element. 5793 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5794 return SDValue(); 5795 5796 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5797 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5798 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5799 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5800 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5801 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5802 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5803 assert(PreShifts.size() == 1 && MagicFactors.size() == 1 && 5804 NPQFactors.size() == 1 && PostShifts.size() == 1 && 5805 "Expected matchUnaryPredicate to return one for scalable vectors"); 5806 PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]); 5807 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5808 NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]); 5809 PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]); 5810 } else { 5811 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5812 PreShift = PreShifts[0]; 5813 MagicFactor = MagicFactors[0]; 5814 PostShift = PostShifts[0]; 5815 } 5816 5817 SDValue Q = N0; 5818 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5819 Created.push_back(Q.getNode()); 5820 5821 // FIXME: We should support doing a MUL in a wider type. 5822 auto GetMULHU = [&](SDValue X, SDValue Y) { 5823 // If the type isn't legal, use a wider mul of the the type calculated 5824 // earlier. 5825 if (!isTypeLegal(VT)) { 5826 X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); 5827 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, Y); 5828 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5829 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5830 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5831 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5832 } 5833 5834 if (isOperationLegalOrCustom(ISD::MULHU, VT, IsAfterLegalization)) 5835 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5836 if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT, IsAfterLegalization)) { 5837 SDValue LoHi = 5838 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5839 return SDValue(LoHi.getNode(), 1); 5840 } 5841 return SDValue(); // No mulhu or equivalent 5842 }; 5843 5844 // Multiply the numerator (operand 0) by the magic value. 5845 Q = GetMULHU(Q, MagicFactor); 5846 if (!Q) 5847 return SDValue(); 5848 5849 Created.push_back(Q.getNode()); 5850 5851 if (UseNPQ) { 5852 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5853 Created.push_back(NPQ.getNode()); 5854 5855 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5856 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5857 if (VT.isVector()) 5858 NPQ = GetMULHU(NPQ, NPQFactor); 5859 else 5860 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5861 5862 Created.push_back(NPQ.getNode()); 5863 5864 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5865 Created.push_back(Q.getNode()); 5866 } 5867 5868 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5869 Created.push_back(Q.getNode()); 5870 5871 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 5872 5873 SDValue One = DAG.getConstant(1, dl, VT); 5874 SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ); 5875 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5876 } 5877 5878 /// If all values in Values that *don't* match the predicate are same 'splat' 5879 /// value, then replace all values with that splat value. 5880 /// Else, if AlternativeReplacement was provided, then replace all values that 5881 /// do match predicate with AlternativeReplacement value. 5882 static void 5883 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5884 std::function<bool(SDValue)> Predicate, 5885 SDValue AlternativeReplacement = SDValue()) { 5886 SDValue Replacement; 5887 // Is there a value for which the Predicate does *NOT* match? What is it? 5888 auto SplatValue = llvm::find_if_not(Values, Predicate); 5889 if (SplatValue != Values.end()) { 5890 // Does Values consist only of SplatValue's and values matching Predicate? 5891 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5892 return Value == *SplatValue || Predicate(Value); 5893 })) // Then we shall replace values matching predicate with SplatValue. 5894 Replacement = *SplatValue; 5895 } 5896 if (!Replacement) { 5897 // Oops, we did not find the "baseline" splat value. 5898 if (!AlternativeReplacement) 5899 return; // Nothing to do. 5900 // Let's replace with provided value then. 5901 Replacement = AlternativeReplacement; 5902 } 5903 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5904 } 5905 5906 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5907 /// where the divisor is constant and the comparison target is zero, 5908 /// return a DAG expression that will generate the same comparison result 5909 /// using only multiplications, additions and shifts/rotations. 5910 /// Ref: "Hacker's Delight" 10-17. 5911 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5912 SDValue CompTargetNode, 5913 ISD::CondCode Cond, 5914 DAGCombinerInfo &DCI, 5915 const SDLoc &DL) const { 5916 SmallVector<SDNode *, 5> Built; 5917 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5918 DCI, DL, Built)) { 5919 for (SDNode *N : Built) 5920 DCI.AddToWorklist(N); 5921 return Folded; 5922 } 5923 5924 return SDValue(); 5925 } 5926 5927 SDValue 5928 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5929 SDValue CompTargetNode, ISD::CondCode Cond, 5930 DAGCombinerInfo &DCI, const SDLoc &DL, 5931 SmallVectorImpl<SDNode *> &Created) const { 5932 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5933 // - D must be constant, with D = D0 * 2^K where D0 is odd 5934 // - P is the multiplicative inverse of D0 modulo 2^W 5935 // - Q = floor(((2^W) - 1) / D) 5936 // where W is the width of the common type of N and D. 5937 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5938 "Only applicable for (in)equality comparisons."); 5939 5940 SelectionDAG &DAG = DCI.DAG; 5941 5942 EVT VT = REMNode.getValueType(); 5943 EVT SVT = VT.getScalarType(); 5944 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 5945 EVT ShSVT = ShVT.getScalarType(); 5946 5947 // If MUL is unavailable, we cannot proceed in any case. 5948 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 5949 return SDValue(); 5950 5951 bool ComparingWithAllZeros = true; 5952 bool AllComparisonsWithNonZerosAreTautological = true; 5953 bool HadTautologicalLanes = false; 5954 bool AllLanesAreTautological = true; 5955 bool HadEvenDivisor = false; 5956 bool AllDivisorsArePowerOfTwo = true; 5957 bool HadTautologicalInvertedLanes = false; 5958 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5959 5960 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5961 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5962 if (CDiv->isZero()) 5963 return false; 5964 5965 const APInt &D = CDiv->getAPIntValue(); 5966 const APInt &Cmp = CCmp->getAPIntValue(); 5967 5968 ComparingWithAllZeros &= Cmp.isZero(); 5969 5970 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5971 // if C2 is not less than C1, the comparison is always false. 5972 // But we will only be able to produce the comparison that will give the 5973 // opposive tautological answer. So this lane would need to be fixed up. 5974 bool TautologicalInvertedLane = D.ule(Cmp); 5975 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5976 5977 // If all lanes are tautological (either all divisors are ones, or divisor 5978 // is not greater than the constant we are comparing with), 5979 // we will prefer to avoid the fold. 5980 bool TautologicalLane = D.isOne() || TautologicalInvertedLane; 5981 HadTautologicalLanes |= TautologicalLane; 5982 AllLanesAreTautological &= TautologicalLane; 5983 5984 // If we are comparing with non-zero, we need'll need to subtract said 5985 // comparison value from the LHS. But there is no point in doing that if 5986 // every lane where we are comparing with non-zero is tautological.. 5987 if (!Cmp.isZero()) 5988 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5989 5990 // Decompose D into D0 * 2^K 5991 unsigned K = D.countTrailingZeros(); 5992 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 5993 APInt D0 = D.lshr(K); 5994 5995 // D is even if it has trailing zeros. 5996 HadEvenDivisor |= (K != 0); 5997 // D is a power-of-two if D0 is one. 5998 // If all divisors are power-of-two, we will prefer to avoid the fold. 5999 AllDivisorsArePowerOfTwo &= D0.isOne(); 6000 6001 // P = inv(D0, 2^W) 6002 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6003 unsigned W = D.getBitWidth(); 6004 APInt P = D0.zext(W + 1) 6005 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6006 .trunc(W); 6007 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6008 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6009 6010 // Q = floor((2^W - 1) u/ D) 6011 // R = ((2^W - 1) u% D) 6012 APInt Q, R; 6013 APInt::udivrem(APInt::getAllOnes(W), D, Q, R); 6014 6015 // If we are comparing with zero, then that comparison constant is okay, 6016 // else it may need to be one less than that. 6017 if (Cmp.ugt(R)) 6018 Q -= 1; 6019 6020 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6021 "We are expecting that K is always less than all-ones for ShSVT"); 6022 6023 // If the lane is tautological the result can be constant-folded. 6024 if (TautologicalLane) { 6025 // Set P and K amount to a bogus values so we can try to splat them. 6026 P = 0; 6027 K = -1; 6028 // And ensure that comparison constant is tautological, 6029 // it will always compare true/false. 6030 Q = -1; 6031 } 6032 6033 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6034 KAmts.push_back( 6035 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6036 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6037 return true; 6038 }; 6039 6040 SDValue N = REMNode.getOperand(0); 6041 SDValue D = REMNode.getOperand(1); 6042 6043 // Collect the values from each element. 6044 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 6045 return SDValue(); 6046 6047 // If all lanes are tautological, the result can be constant-folded. 6048 if (AllLanesAreTautological) 6049 return SDValue(); 6050 6051 // If this is a urem by a powers-of-two, avoid the fold since it can be 6052 // best implemented as a bit test. 6053 if (AllDivisorsArePowerOfTwo) 6054 return SDValue(); 6055 6056 SDValue PVal, KVal, QVal; 6057 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6058 if (HadTautologicalLanes) { 6059 // Try to turn PAmts into a splat, since we don't care about the values 6060 // that are currently '0'. If we can't, just keep '0'`s. 6061 turnVectorIntoSplatVector(PAmts, isNullConstant); 6062 // Try to turn KAmts into a splat, since we don't care about the values 6063 // that are currently '-1'. If we can't, change them to '0'`s. 6064 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6065 DAG.getConstant(0, DL, ShSVT)); 6066 } 6067 6068 PVal = DAG.getBuildVector(VT, DL, PAmts); 6069 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6070 QVal = DAG.getBuildVector(VT, DL, QAmts); 6071 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6072 assert(PAmts.size() == 1 && KAmts.size() == 1 && QAmts.size() == 1 && 6073 "Expected matchBinaryPredicate to return one element for " 6074 "SPLAT_VECTORs"); 6075 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6076 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6077 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6078 } else { 6079 PVal = PAmts[0]; 6080 KVal = KAmts[0]; 6081 QVal = QAmts[0]; 6082 } 6083 6084 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 6085 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::SUB, VT)) 6086 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 6087 assert(CompTargetNode.getValueType() == N.getValueType() && 6088 "Expecting that the types on LHS and RHS of comparisons match."); 6089 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 6090 } 6091 6092 // (mul N, P) 6093 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6094 Created.push_back(Op0.getNode()); 6095 6096 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6097 // divisors as a performance improvement, since rotating by 0 is a no-op. 6098 if (HadEvenDivisor) { 6099 // We need ROTR to do this. 6100 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6101 return SDValue(); 6102 // UREM: (rotr (mul N, P), K) 6103 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6104 Created.push_back(Op0.getNode()); 6105 } 6106 6107 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 6108 SDValue NewCC = 6109 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6110 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6111 if (!HadTautologicalInvertedLanes) 6112 return NewCC; 6113 6114 // If any lanes previously compared always-false, the NewCC will give 6115 // always-true result for them, so we need to fixup those lanes. 6116 // Or the other way around for inequality predicate. 6117 assert(VT.isVector() && "Can/should only get here for vectors."); 6118 Created.push_back(NewCC.getNode()); 6119 6120 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6121 // if C2 is not less than C1, the comparison is always false. 6122 // But we have produced the comparison that will give the 6123 // opposive tautological answer. So these lanes would need to be fixed up. 6124 SDValue TautologicalInvertedChannels = 6125 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 6126 Created.push_back(TautologicalInvertedChannels.getNode()); 6127 6128 // NOTE: we avoid letting illegal types through even if we're before legalize 6129 // ops – legalization has a hard time producing good code for this. 6130 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 6131 // If we have a vector select, let's replace the comparison results in the 6132 // affected lanes with the correct tautological result. 6133 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 6134 DL, SETCCVT, SETCCVT); 6135 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 6136 Replacement, NewCC); 6137 } 6138 6139 // Else, we can just invert the comparison result in the appropriate lanes. 6140 // 6141 // NOTE: see the note above VSELECT above. 6142 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 6143 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 6144 TautologicalInvertedChannels); 6145 6146 return SDValue(); // Don't know how to lower. 6147 } 6148 6149 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 6150 /// where the divisor is constant and the comparison target is zero, 6151 /// return a DAG expression that will generate the same comparison result 6152 /// using only multiplications, additions and shifts/rotations. 6153 /// Ref: "Hacker's Delight" 10-17. 6154 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 6155 SDValue CompTargetNode, 6156 ISD::CondCode Cond, 6157 DAGCombinerInfo &DCI, 6158 const SDLoc &DL) const { 6159 SmallVector<SDNode *, 7> Built; 6160 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6161 DCI, DL, Built)) { 6162 assert(Built.size() <= 7 && "Max size prediction failed."); 6163 for (SDNode *N : Built) 6164 DCI.AddToWorklist(N); 6165 return Folded; 6166 } 6167 6168 return SDValue(); 6169 } 6170 6171 SDValue 6172 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 6173 SDValue CompTargetNode, ISD::CondCode Cond, 6174 DAGCombinerInfo &DCI, const SDLoc &DL, 6175 SmallVectorImpl<SDNode *> &Created) const { 6176 // Fold: 6177 // (seteq/ne (srem N, D), 0) 6178 // To: 6179 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 6180 // 6181 // - D must be constant, with D = D0 * 2^K where D0 is odd 6182 // - P is the multiplicative inverse of D0 modulo 2^W 6183 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 6184 // - Q = floor((2 * A) / (2^K)) 6185 // where W is the width of the common type of N and D. 6186 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6187 "Only applicable for (in)equality comparisons."); 6188 6189 SelectionDAG &DAG = DCI.DAG; 6190 6191 EVT VT = REMNode.getValueType(); 6192 EVT SVT = VT.getScalarType(); 6193 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6194 EVT ShSVT = ShVT.getScalarType(); 6195 6196 // If we are after ops legalization, and MUL is unavailable, we can not 6197 // proceed. 6198 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6199 return SDValue(); 6200 6201 // TODO: Could support comparing with non-zero too. 6202 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 6203 if (!CompTarget || !CompTarget->isZero()) 6204 return SDValue(); 6205 6206 bool HadIntMinDivisor = false; 6207 bool HadOneDivisor = false; 6208 bool AllDivisorsAreOnes = true; 6209 bool HadEvenDivisor = false; 6210 bool NeedToApplyOffset = false; 6211 bool AllDivisorsArePowerOfTwo = true; 6212 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 6213 6214 auto BuildSREMPattern = [&](ConstantSDNode *C) { 6215 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6216 if (C->isZero()) 6217 return false; 6218 6219 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 6220 6221 // WARNING: this fold is only valid for positive divisors! 6222 APInt D = C->getAPIntValue(); 6223 if (D.isNegative()) 6224 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 6225 6226 HadIntMinDivisor |= D.isMinSignedValue(); 6227 6228 // If all divisors are ones, we will prefer to avoid the fold. 6229 HadOneDivisor |= D.isOne(); 6230 AllDivisorsAreOnes &= D.isOne(); 6231 6232 // Decompose D into D0 * 2^K 6233 unsigned K = D.countTrailingZeros(); 6234 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6235 APInt D0 = D.lshr(K); 6236 6237 if (!D.isMinSignedValue()) { 6238 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 6239 // we don't care about this lane in this fold, we'll special-handle it. 6240 HadEvenDivisor |= (K != 0); 6241 } 6242 6243 // D is a power-of-two if D0 is one. This includes INT_MIN. 6244 // If all divisors are power-of-two, we will prefer to avoid the fold. 6245 AllDivisorsArePowerOfTwo &= D0.isOne(); 6246 6247 // P = inv(D0, 2^W) 6248 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6249 unsigned W = D.getBitWidth(); 6250 APInt P = D0.zext(W + 1) 6251 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6252 .trunc(W); 6253 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6254 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6255 6256 // A = floor((2^(W - 1) - 1) / D0) & -2^K 6257 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 6258 A.clearLowBits(K); 6259 6260 if (!D.isMinSignedValue()) { 6261 // If divisor INT_MIN, then we don't care about this lane in this fold, 6262 // we'll special-handle it. 6263 NeedToApplyOffset |= A != 0; 6264 } 6265 6266 // Q = floor((2 * A) / (2^K)) 6267 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 6268 6269 assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) && 6270 "We are expecting that A is always less than all-ones for SVT"); 6271 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6272 "We are expecting that K is always less than all-ones for ShSVT"); 6273 6274 // If the divisor is 1 the result can be constant-folded. Likewise, we 6275 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 6276 if (D.isOne()) { 6277 // Set P, A and K to a bogus values so we can try to splat them. 6278 P = 0; 6279 A = -1; 6280 K = -1; 6281 6282 // x ?% 1 == 0 <--> true <--> x u<= -1 6283 Q = -1; 6284 } 6285 6286 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6287 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 6288 KAmts.push_back( 6289 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6290 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6291 return true; 6292 }; 6293 6294 SDValue N = REMNode.getOperand(0); 6295 SDValue D = REMNode.getOperand(1); 6296 6297 // Collect the values from each element. 6298 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 6299 return SDValue(); 6300 6301 // If this is a srem by a one, avoid the fold since it can be constant-folded. 6302 if (AllDivisorsAreOnes) 6303 return SDValue(); 6304 6305 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 6306 // since it can be best implemented as a bit test. 6307 if (AllDivisorsArePowerOfTwo) 6308 return SDValue(); 6309 6310 SDValue PVal, AVal, KVal, QVal; 6311 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6312 if (HadOneDivisor) { 6313 // Try to turn PAmts into a splat, since we don't care about the values 6314 // that are currently '0'. If we can't, just keep '0'`s. 6315 turnVectorIntoSplatVector(PAmts, isNullConstant); 6316 // Try to turn AAmts into a splat, since we don't care about the 6317 // values that are currently '-1'. If we can't, change them to '0'`s. 6318 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 6319 DAG.getConstant(0, DL, SVT)); 6320 // Try to turn KAmts into a splat, since we don't care about the values 6321 // that are currently '-1'. If we can't, change them to '0'`s. 6322 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6323 DAG.getConstant(0, DL, ShSVT)); 6324 } 6325 6326 PVal = DAG.getBuildVector(VT, DL, PAmts); 6327 AVal = DAG.getBuildVector(VT, DL, AAmts); 6328 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6329 QVal = DAG.getBuildVector(VT, DL, QAmts); 6330 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6331 assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 && 6332 QAmts.size() == 1 && 6333 "Expected matchUnaryPredicate to return one element for scalable " 6334 "vectors"); 6335 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6336 AVal = DAG.getSplatVector(VT, DL, AAmts[0]); 6337 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6338 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6339 } else { 6340 assert(isa<ConstantSDNode>(D) && "Expected a constant"); 6341 PVal = PAmts[0]; 6342 AVal = AAmts[0]; 6343 KVal = KAmts[0]; 6344 QVal = QAmts[0]; 6345 } 6346 6347 // (mul N, P) 6348 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6349 Created.push_back(Op0.getNode()); 6350 6351 if (NeedToApplyOffset) { 6352 // We need ADD to do this. 6353 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ADD, VT)) 6354 return SDValue(); 6355 6356 // (add (mul N, P), A) 6357 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 6358 Created.push_back(Op0.getNode()); 6359 } 6360 6361 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6362 // divisors as a performance improvement, since rotating by 0 is a no-op. 6363 if (HadEvenDivisor) { 6364 // We need ROTR to do this. 6365 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6366 return SDValue(); 6367 // SREM: (rotr (add (mul N, P), A), K) 6368 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6369 Created.push_back(Op0.getNode()); 6370 } 6371 6372 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 6373 SDValue Fold = 6374 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6375 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6376 6377 // If we didn't have lanes with INT_MIN divisor, then we're done. 6378 if (!HadIntMinDivisor) 6379 return Fold; 6380 6381 // That fold is only valid for positive divisors. Which effectively means, 6382 // it is invalid for INT_MIN divisors. So if we have such a lane, 6383 // we must fix-up results for said lanes. 6384 assert(VT.isVector() && "Can/should only get here for vectors."); 6385 6386 // NOTE: we avoid letting illegal types through even if we're before legalize 6387 // ops – legalization has a hard time producing good code for the code that 6388 // follows. 6389 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 6390 !isOperationLegalOrCustom(ISD::AND, VT) || 6391 !isOperationLegalOrCustom(Cond, VT) || 6392 !isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) 6393 return SDValue(); 6394 6395 Created.push_back(Fold.getNode()); 6396 6397 SDValue IntMin = DAG.getConstant( 6398 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 6399 SDValue IntMax = DAG.getConstant( 6400 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 6401 SDValue Zero = 6402 DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT); 6403 6404 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 6405 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 6406 Created.push_back(DivisorIsIntMin.getNode()); 6407 6408 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 6409 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 6410 Created.push_back(Masked.getNode()); 6411 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 6412 Created.push_back(MaskedIsZero.getNode()); 6413 6414 // To produce final result we need to blend 2 vectors: 'SetCC' and 6415 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 6416 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 6417 // constant-folded, select can get lowered to a shuffle with constant mask. 6418 SDValue Blended = DAG.getNode(ISD::VSELECT, DL, SETCCVT, DivisorIsIntMin, 6419 MaskedIsZero, Fold); 6420 6421 return Blended; 6422 } 6423 6424 bool TargetLowering:: 6425 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 6426 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 6427 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 6428 "be a constant integer"); 6429 return true; 6430 } 6431 6432 return false; 6433 } 6434 6435 SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 6436 const DenormalMode &Mode) const { 6437 SDLoc DL(Op); 6438 EVT VT = Op.getValueType(); 6439 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6440 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 6441 // Testing it with denormal inputs to avoid wrong estimate. 6442 if (Mode.Input == DenormalMode::IEEE) { 6443 // This is specifically a check for the handling of denormal inputs, 6444 // not the result. 6445 6446 // Test = fabs(X) < SmallestNormal 6447 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 6448 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem); 6449 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT); 6450 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op); 6451 return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT); 6452 } 6453 // Test = X == 0.0 6454 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 6455 } 6456 6457 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 6458 bool LegalOps, bool OptForSize, 6459 NegatibleCost &Cost, 6460 unsigned Depth) const { 6461 // fneg is removable even if it has multiple uses. 6462 if (Op.getOpcode() == ISD::FNEG) { 6463 Cost = NegatibleCost::Cheaper; 6464 return Op.getOperand(0); 6465 } 6466 6467 // Don't recurse exponentially. 6468 if (Depth > SelectionDAG::MaxRecursionDepth) 6469 return SDValue(); 6470 6471 // Pre-increment recursion depth for use in recursive calls. 6472 ++Depth; 6473 const SDNodeFlags Flags = Op->getFlags(); 6474 const TargetOptions &Options = DAG.getTarget().Options; 6475 EVT VT = Op.getValueType(); 6476 unsigned Opcode = Op.getOpcode(); 6477 6478 // Don't allow anything with multiple uses unless we know it is free. 6479 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 6480 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 6481 isFPExtFree(VT, Op.getOperand(0).getValueType()); 6482 if (!IsFreeExtend) 6483 return SDValue(); 6484 } 6485 6486 auto RemoveDeadNode = [&](SDValue N) { 6487 if (N && N.getNode()->use_empty()) 6488 DAG.RemoveDeadNode(N.getNode()); 6489 }; 6490 6491 SDLoc DL(Op); 6492 6493 // Because getNegatedExpression can delete nodes we need a handle to keep 6494 // temporary nodes alive in case the recursion manages to create an identical 6495 // node. 6496 std::list<HandleSDNode> Handles; 6497 6498 switch (Opcode) { 6499 case ISD::ConstantFP: { 6500 // Don't invert constant FP values after legalization unless the target says 6501 // the negated constant is legal. 6502 bool IsOpLegal = 6503 isOperationLegal(ISD::ConstantFP, VT) || 6504 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 6505 OptForSize); 6506 6507 if (LegalOps && !IsOpLegal) 6508 break; 6509 6510 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6511 V.changeSign(); 6512 SDValue CFP = DAG.getConstantFP(V, DL, VT); 6513 6514 // If we already have the use of the negated floating constant, it is free 6515 // to negate it even it has multiple uses. 6516 if (!Op.hasOneUse() && CFP.use_empty()) 6517 break; 6518 Cost = NegatibleCost::Neutral; 6519 return CFP; 6520 } 6521 case ISD::BUILD_VECTOR: { 6522 // Only permit BUILD_VECTOR of constants. 6523 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 6524 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 6525 })) 6526 break; 6527 6528 bool IsOpLegal = 6529 (isOperationLegal(ISD::ConstantFP, VT) && 6530 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 6531 llvm::all_of(Op->op_values(), [&](SDValue N) { 6532 return N.isUndef() || 6533 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 6534 OptForSize); 6535 }); 6536 6537 if (LegalOps && !IsOpLegal) 6538 break; 6539 6540 SmallVector<SDValue, 4> Ops; 6541 for (SDValue C : Op->op_values()) { 6542 if (C.isUndef()) { 6543 Ops.push_back(C); 6544 continue; 6545 } 6546 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 6547 V.changeSign(); 6548 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 6549 } 6550 Cost = NegatibleCost::Neutral; 6551 return DAG.getBuildVector(VT, DL, Ops); 6552 } 6553 case ISD::FADD: { 6554 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6555 break; 6556 6557 // After operation legalization, it might not be legal to create new FSUBs. 6558 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 6559 break; 6560 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6561 6562 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 6563 NegatibleCost CostX = NegatibleCost::Expensive; 6564 SDValue NegX = 6565 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6566 // Prevent this node from being deleted by the next call. 6567 if (NegX) 6568 Handles.emplace_back(NegX); 6569 6570 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 6571 NegatibleCost CostY = NegatibleCost::Expensive; 6572 SDValue NegY = 6573 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6574 6575 // We're done with the handles. 6576 Handles.clear(); 6577 6578 // Negate the X if its cost is less or equal than Y. 6579 if (NegX && (CostX <= CostY)) { 6580 Cost = CostX; 6581 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 6582 if (NegY != N) 6583 RemoveDeadNode(NegY); 6584 return N; 6585 } 6586 6587 // Negate the Y if it is not expensive. 6588 if (NegY) { 6589 Cost = CostY; 6590 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 6591 if (NegX != N) 6592 RemoveDeadNode(NegX); 6593 return N; 6594 } 6595 break; 6596 } 6597 case ISD::FSUB: { 6598 // We can't turn -(A-B) into B-A when we honor signed zeros. 6599 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6600 break; 6601 6602 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6603 // fold (fneg (fsub 0, Y)) -> Y 6604 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 6605 if (C->isZero()) { 6606 Cost = NegatibleCost::Cheaper; 6607 return Y; 6608 } 6609 6610 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 6611 Cost = NegatibleCost::Neutral; 6612 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 6613 } 6614 case ISD::FMUL: 6615 case ISD::FDIV: { 6616 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6617 6618 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 6619 NegatibleCost CostX = NegatibleCost::Expensive; 6620 SDValue NegX = 6621 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6622 // Prevent this node from being deleted by the next call. 6623 if (NegX) 6624 Handles.emplace_back(NegX); 6625 6626 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 6627 NegatibleCost CostY = NegatibleCost::Expensive; 6628 SDValue NegY = 6629 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6630 6631 // We're done with the handles. 6632 Handles.clear(); 6633 6634 // Negate the X if its cost is less or equal than Y. 6635 if (NegX && (CostX <= CostY)) { 6636 Cost = CostX; 6637 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 6638 if (NegY != N) 6639 RemoveDeadNode(NegY); 6640 return N; 6641 } 6642 6643 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 6644 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 6645 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 6646 break; 6647 6648 // Negate the Y if it is not expensive. 6649 if (NegY) { 6650 Cost = CostY; 6651 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 6652 if (NegX != N) 6653 RemoveDeadNode(NegX); 6654 return N; 6655 } 6656 break; 6657 } 6658 case ISD::FMA: 6659 case ISD::FMAD: { 6660 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6661 break; 6662 6663 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 6664 NegatibleCost CostZ = NegatibleCost::Expensive; 6665 SDValue NegZ = 6666 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 6667 // Give up if fail to negate the Z. 6668 if (!NegZ) 6669 break; 6670 6671 // Prevent this node from being deleted by the next two calls. 6672 Handles.emplace_back(NegZ); 6673 6674 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 6675 NegatibleCost CostX = NegatibleCost::Expensive; 6676 SDValue NegX = 6677 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6678 // Prevent this node from being deleted by the next call. 6679 if (NegX) 6680 Handles.emplace_back(NegX); 6681 6682 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 6683 NegatibleCost CostY = NegatibleCost::Expensive; 6684 SDValue NegY = 6685 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6686 6687 // We're done with the handles. 6688 Handles.clear(); 6689 6690 // Negate the X if its cost is less or equal than Y. 6691 if (NegX && (CostX <= CostY)) { 6692 Cost = std::min(CostX, CostZ); 6693 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 6694 if (NegY != N) 6695 RemoveDeadNode(NegY); 6696 return N; 6697 } 6698 6699 // Negate the Y if it is not expensive. 6700 if (NegY) { 6701 Cost = std::min(CostY, CostZ); 6702 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 6703 if (NegX != N) 6704 RemoveDeadNode(NegX); 6705 return N; 6706 } 6707 break; 6708 } 6709 6710 case ISD::FP_EXTEND: 6711 case ISD::FSIN: 6712 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6713 OptForSize, Cost, Depth)) 6714 return DAG.getNode(Opcode, DL, VT, NegV); 6715 break; 6716 case ISD::FP_ROUND: 6717 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6718 OptForSize, Cost, Depth)) 6719 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6720 break; 6721 } 6722 6723 return SDValue(); 6724 } 6725 6726 //===----------------------------------------------------------------------===// 6727 // Legalization Utilities 6728 //===----------------------------------------------------------------------===// 6729 6730 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6731 SDValue LHS, SDValue RHS, 6732 SmallVectorImpl<SDValue> &Result, 6733 EVT HiLoVT, SelectionDAG &DAG, 6734 MulExpansionKind Kind, SDValue LL, 6735 SDValue LH, SDValue RL, SDValue RH) const { 6736 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6737 Opcode == ISD::SMUL_LOHI); 6738 6739 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6740 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6741 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6742 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6743 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6744 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6745 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6746 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6747 6748 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6749 return false; 6750 6751 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6752 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6753 6754 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6755 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6756 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6757 6758 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6759 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6760 bool Signed) -> bool { 6761 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6762 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6763 Hi = SDValue(Lo.getNode(), 1); 6764 return true; 6765 } 6766 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6767 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6768 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6769 return true; 6770 } 6771 return false; 6772 }; 6773 6774 SDValue Lo, Hi; 6775 6776 if (!LL.getNode() && !RL.getNode() && 6777 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6778 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6779 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6780 } 6781 6782 if (!LL.getNode()) 6783 return false; 6784 6785 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6786 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6787 DAG.MaskedValueIsZero(RHS, HighMask)) { 6788 // The inputs are both zero-extended. 6789 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6790 Result.push_back(Lo); 6791 Result.push_back(Hi); 6792 if (Opcode != ISD::MUL) { 6793 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6794 Result.push_back(Zero); 6795 Result.push_back(Zero); 6796 } 6797 return true; 6798 } 6799 } 6800 6801 if (!VT.isVector() && Opcode == ISD::MUL && 6802 DAG.ComputeNumSignBits(LHS) > InnerBitSize && 6803 DAG.ComputeNumSignBits(RHS) > InnerBitSize) { 6804 // The input values are both sign-extended. 6805 // TODO non-MUL case? 6806 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6807 Result.push_back(Lo); 6808 Result.push_back(Hi); 6809 return true; 6810 } 6811 } 6812 6813 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6814 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6815 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6816 6817 if (!LH.getNode() && !RH.getNode() && 6818 isOperationLegalOrCustom(ISD::SRL, VT) && 6819 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6820 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6821 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6822 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6823 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6824 } 6825 6826 if (!LH.getNode()) 6827 return false; 6828 6829 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6830 return false; 6831 6832 Result.push_back(Lo); 6833 6834 if (Opcode == ISD::MUL) { 6835 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6836 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6837 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6838 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6839 Result.push_back(Hi); 6840 return true; 6841 } 6842 6843 // Compute the full width result. 6844 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6845 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6846 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6847 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6848 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6849 }; 6850 6851 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6852 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6853 return false; 6854 6855 // This is effectively the add part of a multiply-add of half-sized operands, 6856 // so it cannot overflow. 6857 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6858 6859 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6860 return false; 6861 6862 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6863 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6864 6865 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6866 isOperationLegalOrCustom(ISD::ADDE, VT)); 6867 if (UseGlue) 6868 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6869 Merge(Lo, Hi)); 6870 else 6871 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6872 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6873 6874 SDValue Carry = Next.getValue(1); 6875 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6876 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6877 6878 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6879 return false; 6880 6881 if (UseGlue) 6882 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6883 Carry); 6884 else 6885 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6886 Zero, Carry); 6887 6888 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6889 6890 if (Opcode == ISD::SMUL_LOHI) { 6891 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6892 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6893 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6894 6895 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6896 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6897 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6898 } 6899 6900 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6901 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6902 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6903 return true; 6904 } 6905 6906 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6907 SelectionDAG &DAG, MulExpansionKind Kind, 6908 SDValue LL, SDValue LH, SDValue RL, 6909 SDValue RH) const { 6910 SmallVector<SDValue, 2> Result; 6911 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 6912 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6913 DAG, Kind, LL, LH, RL, RH); 6914 if (Ok) { 6915 assert(Result.size() == 2); 6916 Lo = Result[0]; 6917 Hi = Result[1]; 6918 } 6919 return Ok; 6920 } 6921 6922 // Check that (every element of) Z is undef or not an exact multiple of BW. 6923 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 6924 return ISD::matchUnaryPredicate( 6925 Z, 6926 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 6927 true); 6928 } 6929 6930 SDValue TargetLowering::expandFunnelShift(SDNode *Node, 6931 SelectionDAG &DAG) const { 6932 EVT VT = Node->getValueType(0); 6933 6934 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6935 !isOperationLegalOrCustom(ISD::SRL, VT) || 6936 !isOperationLegalOrCustom(ISD::SUB, VT) || 6937 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6938 return SDValue(); 6939 6940 SDValue X = Node->getOperand(0); 6941 SDValue Y = Node->getOperand(1); 6942 SDValue Z = Node->getOperand(2); 6943 6944 unsigned BW = VT.getScalarSizeInBits(); 6945 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6946 SDLoc DL(SDValue(Node, 0)); 6947 6948 EVT ShVT = Z.getValueType(); 6949 6950 // If a funnel shift in the other direction is more supported, use it. 6951 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 6952 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 6953 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 6954 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6955 // fshl X, Y, Z -> fshr X, Y, -Z 6956 // fshr X, Y, Z -> fshl X, Y, -Z 6957 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6958 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 6959 } else { 6960 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 6961 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 6962 SDValue One = DAG.getConstant(1, DL, ShVT); 6963 if (IsFSHL) { 6964 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6965 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 6966 } else { 6967 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6968 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 6969 } 6970 Z = DAG.getNOT(DL, Z, ShVT); 6971 } 6972 return DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 6973 } 6974 6975 SDValue ShX, ShY; 6976 SDValue ShAmt, InvShAmt; 6977 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6978 // fshl: X << C | Y >> (BW - C) 6979 // fshr: X << (BW - C) | Y >> C 6980 // where C = Z % BW is not zero 6981 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6982 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6983 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 6984 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 6985 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6986 } else { 6987 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6988 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6989 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 6990 if (isPowerOf2_32(BW)) { 6991 // Z % BW -> Z & (BW - 1) 6992 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6993 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6994 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6995 } else { 6996 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6997 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6998 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6999 } 7000 7001 SDValue One = DAG.getConstant(1, DL, ShVT); 7002 if (IsFSHL) { 7003 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 7004 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 7005 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 7006 } else { 7007 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 7008 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 7009 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 7010 } 7011 } 7012 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 7013 } 7014 7015 // TODO: Merge with expandFunnelShift. 7016 SDValue TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 7017 SelectionDAG &DAG) const { 7018 EVT VT = Node->getValueType(0); 7019 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 7020 bool IsLeft = Node->getOpcode() == ISD::ROTL; 7021 SDValue Op0 = Node->getOperand(0); 7022 SDValue Op1 = Node->getOperand(1); 7023 SDLoc DL(SDValue(Node, 0)); 7024 7025 EVT ShVT = Op1.getValueType(); 7026 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7027 7028 // If a rotate in the other direction is more supported, use it. 7029 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 7030 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7031 isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 7032 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7033 return DAG.getNode(RevRot, DL, VT, Op0, Sub); 7034 } 7035 7036 if (!AllowVectorOps && VT.isVector() && 7037 (!isOperationLegalOrCustom(ISD::SHL, VT) || 7038 !isOperationLegalOrCustom(ISD::SRL, VT) || 7039 !isOperationLegalOrCustom(ISD::SUB, VT) || 7040 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 7041 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 7042 return SDValue(); 7043 7044 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 7045 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 7046 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 7047 SDValue ShVal; 7048 SDValue HsVal; 7049 if (isPowerOf2_32(EltSizeInBits)) { 7050 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 7051 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 7052 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7053 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 7054 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7055 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 7056 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 7057 } else { 7058 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 7059 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 7060 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 7061 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 7062 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7063 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 7064 SDValue One = DAG.getConstant(1, DL, ShVT); 7065 HsVal = 7066 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 7067 } 7068 return DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 7069 } 7070 7071 void TargetLowering::expandShiftParts(SDNode *Node, SDValue &Lo, SDValue &Hi, 7072 SelectionDAG &DAG) const { 7073 assert(Node->getNumOperands() == 3 && "Not a double-shift!"); 7074 EVT VT = Node->getValueType(0); 7075 unsigned VTBits = VT.getScalarSizeInBits(); 7076 assert(isPowerOf2_32(VTBits) && "Power-of-two integer type expected"); 7077 7078 bool IsSHL = Node->getOpcode() == ISD::SHL_PARTS; 7079 bool IsSRA = Node->getOpcode() == ISD::SRA_PARTS; 7080 SDValue ShOpLo = Node->getOperand(0); 7081 SDValue ShOpHi = Node->getOperand(1); 7082 SDValue ShAmt = Node->getOperand(2); 7083 EVT ShAmtVT = ShAmt.getValueType(); 7084 EVT ShAmtCCVT = 7085 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShAmtVT); 7086 SDLoc dl(Node); 7087 7088 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and 7089 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's usually optimized 7090 // away during isel. 7091 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7092 DAG.getConstant(VTBits - 1, dl, ShAmtVT)); 7093 SDValue Tmp1 = IsSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7094 DAG.getConstant(VTBits - 1, dl, ShAmtVT)) 7095 : DAG.getConstant(0, dl, VT); 7096 7097 SDValue Tmp2, Tmp3; 7098 if (IsSHL) { 7099 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); 7100 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); 7101 } else { 7102 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); 7103 Tmp3 = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); 7104 } 7105 7106 // If the shift amount is larger or equal than the width of a part we don't 7107 // use the result from the FSHL/FSHR. Insert a test and select the appropriate 7108 // values for large shift amounts. 7109 SDValue AndNode = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7110 DAG.getConstant(VTBits, dl, ShAmtVT)); 7111 SDValue Cond = DAG.getSetCC(dl, ShAmtCCVT, AndNode, 7112 DAG.getConstant(0, dl, ShAmtVT), ISD::SETNE); 7113 7114 if (IsSHL) { 7115 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7116 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7117 } else { 7118 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7119 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7120 } 7121 } 7122 7123 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 7124 SelectionDAG &DAG) const { 7125 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7126 SDValue Src = Node->getOperand(OpNo); 7127 EVT SrcVT = Src.getValueType(); 7128 EVT DstVT = Node->getValueType(0); 7129 SDLoc dl(SDValue(Node, 0)); 7130 7131 // FIXME: Only f32 to i64 conversions are supported. 7132 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 7133 return false; 7134 7135 if (Node->isStrictFPOpcode()) 7136 // When a NaN is converted to an integer a trap is allowed. We can't 7137 // use this expansion here because it would eliminate that trap. Other 7138 // traps are also allowed and cannot be eliminated. See 7139 // IEEE 754-2008 sec 5.8. 7140 return false; 7141 7142 // Expand f32 -> i64 conversion 7143 // This algorithm comes from compiler-rt's implementation of fixsfdi: 7144 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c 7145 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 7146 EVT IntVT = SrcVT.changeTypeToInteger(); 7147 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 7148 7149 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 7150 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 7151 SDValue Bias = DAG.getConstant(127, dl, IntVT); 7152 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 7153 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 7154 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 7155 7156 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 7157 7158 SDValue ExponentBits = DAG.getNode( 7159 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 7160 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 7161 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 7162 7163 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 7164 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 7165 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 7166 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 7167 7168 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 7169 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 7170 DAG.getConstant(0x00800000, dl, IntVT)); 7171 7172 R = DAG.getZExtOrTrunc(R, dl, DstVT); 7173 7174 R = DAG.getSelectCC( 7175 dl, Exponent, ExponentLoBit, 7176 DAG.getNode(ISD::SHL, dl, DstVT, R, 7177 DAG.getZExtOrTrunc( 7178 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 7179 dl, IntShVT)), 7180 DAG.getNode(ISD::SRL, dl, DstVT, R, 7181 DAG.getZExtOrTrunc( 7182 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 7183 dl, IntShVT)), 7184 ISD::SETGT); 7185 7186 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 7187 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 7188 7189 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 7190 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 7191 return true; 7192 } 7193 7194 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 7195 SDValue &Chain, 7196 SelectionDAG &DAG) const { 7197 SDLoc dl(SDValue(Node, 0)); 7198 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7199 SDValue Src = Node->getOperand(OpNo); 7200 7201 EVT SrcVT = Src.getValueType(); 7202 EVT DstVT = Node->getValueType(0); 7203 EVT SetCCVT = 7204 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7205 EVT DstSetCCVT = 7206 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7207 7208 // Only expand vector types if we have the appropriate vector bit operations. 7209 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 7210 ISD::FP_TO_SINT; 7211 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 7212 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 7213 return false; 7214 7215 // If the maximum float value is smaller then the signed integer range, 7216 // the destination signmask can't be represented by the float, so we can 7217 // just use FP_TO_SINT directly. 7218 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 7219 APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits())); 7220 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 7221 if (APFloat::opOverflow & 7222 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 7223 if (Node->isStrictFPOpcode()) { 7224 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7225 { Node->getOperand(0), Src }); 7226 Chain = Result.getValue(1); 7227 } else 7228 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7229 return true; 7230 } 7231 7232 // Don't expand it if there isn't cheap fsub instruction. 7233 if (!isOperationLegalOrCustom( 7234 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 7235 return false; 7236 7237 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7238 SDValue Sel; 7239 7240 if (Node->isStrictFPOpcode()) { 7241 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7242 Node->getOperand(0), /*IsSignaling*/ true); 7243 Chain = Sel.getValue(1); 7244 } else { 7245 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 7246 } 7247 7248 bool Strict = Node->isStrictFPOpcode() || 7249 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 7250 7251 if (Strict) { 7252 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 7253 // signmask then offset (the result of which should be fully representable). 7254 // Sel = Src < 0x8000000000000000 7255 // FltOfs = select Sel, 0, 0x8000000000000000 7256 // IntOfs = select Sel, 0, 0x8000000000000000 7257 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7258 7259 // TODO: Should any fast-math-flags be set for the FSUB? 7260 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 7261 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7262 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7263 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 7264 DAG.getConstant(0, dl, DstVT), 7265 DAG.getConstant(SignMask, dl, DstVT)); 7266 SDValue SInt; 7267 if (Node->isStrictFPOpcode()) { 7268 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 7269 { Chain, Src, FltOfs }); 7270 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7271 { Val.getValue(1), Val }); 7272 Chain = SInt.getValue(1); 7273 } else { 7274 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 7275 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 7276 } 7277 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7278 } else { 7279 // Expand based on maximum range of FP_TO_SINT: 7280 // True = fp_to_sint(Src) 7281 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 7282 // Result = select (Src < 0x8000000000000000), True, False 7283 7284 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7285 // TODO: Should any fast-math-flags be set for the FSUB? 7286 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 7287 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 7288 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 7289 DAG.getConstant(SignMask, dl, DstVT)); 7290 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7291 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 7292 } 7293 return true; 7294 } 7295 7296 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 7297 SDValue &Chain, 7298 SelectionDAG &DAG) const { 7299 // This transform is not correct for converting 0 when rounding mode is set 7300 // to round toward negative infinity which will produce -0.0. So disable under 7301 // strictfp. 7302 if (Node->isStrictFPOpcode()) 7303 return false; 7304 7305 SDValue Src = Node->getOperand(0); 7306 EVT SrcVT = Src.getValueType(); 7307 EVT DstVT = Node->getValueType(0); 7308 7309 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 7310 return false; 7311 7312 // Only expand vector types if we have the appropriate vector bit operations. 7313 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 7314 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 7315 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 7316 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 7317 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 7318 return false; 7319 7320 SDLoc dl(SDValue(Node, 0)); 7321 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 7322 7323 // Implementation of unsigned i64 to f64 following the algorithm in 7324 // __floatundidf in compiler_rt. This implementation performs rounding 7325 // correctly in all rounding modes with the exception of converting 0 7326 // when rounding toward negative infinity. In that case the fsub will produce 7327 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 7328 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 7329 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 7330 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 7331 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 7332 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 7333 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 7334 7335 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 7336 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 7337 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 7338 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 7339 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 7340 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 7341 SDValue HiSub = 7342 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 7343 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 7344 return true; 7345 } 7346 7347 SDValue 7348 TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node, 7349 SelectionDAG &DAG) const { 7350 unsigned Opcode = Node->getOpcode(); 7351 assert((Opcode == ISD::FMINNUM || Opcode == ISD::FMAXNUM || 7352 Opcode == ISD::STRICT_FMINNUM || Opcode == ISD::STRICT_FMAXNUM) && 7353 "Wrong opcode"); 7354 7355 if (Node->getFlags().hasNoNaNs()) { 7356 ISD::CondCode Pred = Opcode == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 7357 SDValue Op1 = Node->getOperand(0); 7358 SDValue Op2 = Node->getOperand(1); 7359 SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred); 7360 // Copy FMF flags, but always set the no-signed-zeros flag 7361 // as this is implied by the FMINNUM/FMAXNUM semantics. 7362 SDNodeFlags Flags = Node->getFlags(); 7363 Flags.setNoSignedZeros(true); 7364 SelCC->setFlags(Flags); 7365 return SelCC; 7366 } 7367 7368 return SDValue(); 7369 } 7370 7371 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 7372 SelectionDAG &DAG) const { 7373 SDLoc dl(Node); 7374 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 7375 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 7376 EVT VT = Node->getValueType(0); 7377 7378 if (VT.isScalableVector()) 7379 report_fatal_error( 7380 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 7381 7382 if (isOperationLegalOrCustom(NewOp, VT)) { 7383 SDValue Quiet0 = Node->getOperand(0); 7384 SDValue Quiet1 = Node->getOperand(1); 7385 7386 if (!Node->getFlags().hasNoNaNs()) { 7387 // Insert canonicalizes if it's possible we need to quiet to get correct 7388 // sNaN behavior. 7389 if (!DAG.isKnownNeverSNaN(Quiet0)) { 7390 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 7391 Node->getFlags()); 7392 } 7393 if (!DAG.isKnownNeverSNaN(Quiet1)) { 7394 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 7395 Node->getFlags()); 7396 } 7397 } 7398 7399 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 7400 } 7401 7402 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 7403 // instead if there are no NaNs. 7404 if (Node->getFlags().hasNoNaNs()) { 7405 unsigned IEEE2018Op = 7406 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 7407 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 7408 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 7409 Node->getOperand(1), Node->getFlags()); 7410 } 7411 } 7412 7413 if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG)) 7414 return SelCC; 7415 7416 return SDValue(); 7417 } 7418 7419 // Only expand vector types if we have the appropriate vector bit operations. 7420 static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT) { 7421 assert(VT.isVector() && "Expected vector type"); 7422 unsigned Len = VT.getScalarSizeInBits(); 7423 return TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 7424 TLI.isOperationLegalOrCustom(ISD::SUB, VT) && 7425 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 7426 (Len == 8 || TLI.isOperationLegalOrCustom(ISD::MUL, VT)) && 7427 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT); 7428 } 7429 7430 SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const { 7431 SDLoc dl(Node); 7432 EVT VT = Node->getValueType(0); 7433 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7434 SDValue Op = Node->getOperand(0); 7435 unsigned Len = VT.getScalarSizeInBits(); 7436 assert(VT.isInteger() && "CTPOP not implemented for this type."); 7437 7438 // TODO: Add support for irregular type lengths. 7439 if (!(Len <= 128 && Len % 8 == 0)) 7440 return SDValue(); 7441 7442 // Only expand vector types if we have the appropriate vector bit operations. 7443 if (VT.isVector() && !canExpandVectorCTPOP(*this, VT)) 7444 return SDValue(); 7445 7446 // This is the "best" algorithm from 7447 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 7448 SDValue Mask55 = 7449 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 7450 SDValue Mask33 = 7451 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 7452 SDValue Mask0F = 7453 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 7454 SDValue Mask01 = 7455 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 7456 7457 // v = v - ((v >> 1) & 0x55555555...) 7458 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 7459 DAG.getNode(ISD::AND, dl, VT, 7460 DAG.getNode(ISD::SRL, dl, VT, Op, 7461 DAG.getConstant(1, dl, ShVT)), 7462 Mask55)); 7463 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 7464 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 7465 DAG.getNode(ISD::AND, dl, VT, 7466 DAG.getNode(ISD::SRL, dl, VT, Op, 7467 DAG.getConstant(2, dl, ShVT)), 7468 Mask33)); 7469 // v = (v + (v >> 4)) & 0x0F0F0F0F... 7470 Op = DAG.getNode(ISD::AND, dl, VT, 7471 DAG.getNode(ISD::ADD, dl, VT, Op, 7472 DAG.getNode(ISD::SRL, dl, VT, Op, 7473 DAG.getConstant(4, dl, ShVT))), 7474 Mask0F); 7475 // v = (v * 0x01010101...) >> (Len - 8) 7476 if (Len > 8) 7477 Op = 7478 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 7479 DAG.getConstant(Len - 8, dl, ShVT)); 7480 7481 return Op; 7482 } 7483 7484 SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { 7485 SDLoc dl(Node); 7486 EVT VT = Node->getValueType(0); 7487 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7488 SDValue Op = Node->getOperand(0); 7489 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7490 7491 // If the non-ZERO_UNDEF version is supported we can use that instead. 7492 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 7493 isOperationLegalOrCustom(ISD::CTLZ, VT)) 7494 return DAG.getNode(ISD::CTLZ, dl, VT, Op); 7495 7496 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7497 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 7498 EVT SetCCVT = 7499 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7500 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 7501 SDValue Zero = DAG.getConstant(0, dl, VT); 7502 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7503 return DAG.getSelect(dl, VT, SrcIsZero, 7504 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 7505 } 7506 7507 // Only expand vector types if we have the appropriate vector bit operations. 7508 // This includes the operations needed to expand CTPOP if it isn't supported. 7509 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7510 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7511 !canExpandVectorCTPOP(*this, VT)) || 7512 !isOperationLegalOrCustom(ISD::SRL, VT) || 7513 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7514 return SDValue(); 7515 7516 // for now, we do this: 7517 // x = x | (x >> 1); 7518 // x = x | (x >> 2); 7519 // ... 7520 // x = x | (x >>16); 7521 // x = x | (x >>32); // for 64-bit input 7522 // return popcount(~x); 7523 // 7524 // Ref: "Hacker's Delight" by Henry Warren 7525 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 7526 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 7527 Op = DAG.getNode(ISD::OR, dl, VT, Op, 7528 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 7529 } 7530 Op = DAG.getNOT(dl, Op, VT); 7531 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 7532 } 7533 7534 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { 7535 SDLoc dl(Node); 7536 EVT VT = Node->getValueType(0); 7537 SDValue Op = Node->getOperand(0); 7538 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7539 7540 // If the non-ZERO_UNDEF version is supported we can use that instead. 7541 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 7542 isOperationLegalOrCustom(ISD::CTTZ, VT)) 7543 return DAG.getNode(ISD::CTTZ, dl, VT, Op); 7544 7545 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7546 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 7547 EVT SetCCVT = 7548 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7549 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 7550 SDValue Zero = DAG.getConstant(0, dl, VT); 7551 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7552 return DAG.getSelect(dl, VT, SrcIsZero, 7553 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 7554 } 7555 7556 // Only expand vector types if we have the appropriate vector bit operations. 7557 // This includes the operations needed to expand CTPOP if it isn't supported. 7558 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7559 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7560 !isOperationLegalOrCustom(ISD::CTLZ, VT) && 7561 !canExpandVectorCTPOP(*this, VT)) || 7562 !isOperationLegalOrCustom(ISD::SUB, VT) || 7563 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 7564 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7565 return SDValue(); 7566 7567 // for now, we use: { return popcount(~x & (x - 1)); } 7568 // unless the target has ctlz but not ctpop, in which case we use: 7569 // { return 32 - nlz(~x & (x-1)); } 7570 // Ref: "Hacker's Delight" by Henry Warren 7571 SDValue Tmp = DAG.getNode( 7572 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 7573 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 7574 7575 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 7576 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 7577 return DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 7578 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 7579 } 7580 7581 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 7582 } 7583 7584 SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, 7585 bool IsNegative) const { 7586 SDLoc dl(N); 7587 EVT VT = N->getValueType(0); 7588 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7589 SDValue Op = N->getOperand(0); 7590 7591 // abs(x) -> smax(x,sub(0,x)) 7592 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7593 isOperationLegal(ISD::SMAX, VT)) { 7594 SDValue Zero = DAG.getConstant(0, dl, VT); 7595 return DAG.getNode(ISD::SMAX, dl, VT, Op, 7596 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7597 } 7598 7599 // abs(x) -> umin(x,sub(0,x)) 7600 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7601 isOperationLegal(ISD::UMIN, VT)) { 7602 SDValue Zero = DAG.getConstant(0, dl, VT); 7603 return DAG.getNode(ISD::UMIN, dl, VT, Op, 7604 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7605 } 7606 7607 // 0 - abs(x) -> smin(x, sub(0,x)) 7608 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 7609 isOperationLegal(ISD::SMIN, VT)) { 7610 SDValue Zero = DAG.getConstant(0, dl, VT); 7611 return DAG.getNode(ISD::SMIN, dl, VT, Op, 7612 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7613 } 7614 7615 // Only expand vector types if we have the appropriate vector operations. 7616 if (VT.isVector() && 7617 (!isOperationLegalOrCustom(ISD::SRA, VT) || 7618 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 7619 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 7620 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7621 return SDValue(); 7622 7623 SDValue Shift = 7624 DAG.getNode(ISD::SRA, dl, VT, Op, 7625 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 7626 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 7627 7628 // abs(x) -> Y = sra (X, size(X)-1); sub (xor (X, Y), Y) 7629 if (!IsNegative) 7630 return DAG.getNode(ISD::SUB, dl, VT, Xor, Shift); 7631 7632 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 7633 return DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 7634 } 7635 7636 SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const { 7637 SDLoc dl(N); 7638 EVT VT = N->getValueType(0); 7639 SDValue Op = N->getOperand(0); 7640 7641 if (!VT.isSimple()) 7642 return SDValue(); 7643 7644 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7645 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 7646 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 7647 default: 7648 return SDValue(); 7649 case MVT::i16: 7650 // Use a rotate by 8. This can be further expanded if necessary. 7651 return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7652 case MVT::i32: 7653 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7654 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7655 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7656 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7657 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 7658 DAG.getConstant(0xFF0000, dl, VT)); 7659 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); 7660 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 7661 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 7662 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 7663 case MVT::i64: 7664 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 7665 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 7666 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7667 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7668 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7669 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7670 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 7671 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 7672 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, 7673 DAG.getConstant(255ULL<<48, dl, VT)); 7674 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, 7675 DAG.getConstant(255ULL<<40, dl, VT)); 7676 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, 7677 DAG.getConstant(255ULL<<32, dl, VT)); 7678 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, 7679 DAG.getConstant(255ULL<<24, dl, VT)); 7680 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 7681 DAG.getConstant(255ULL<<16, dl, VT)); 7682 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, 7683 DAG.getConstant(255ULL<<8 , dl, VT)); 7684 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 7685 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 7686 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 7687 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 7688 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 7689 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 7690 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 7691 } 7692 } 7693 7694 SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 7695 SDLoc dl(N); 7696 EVT VT = N->getValueType(0); 7697 SDValue Op = N->getOperand(0); 7698 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7699 unsigned Sz = VT.getScalarSizeInBits(); 7700 7701 SDValue Tmp, Tmp2, Tmp3; 7702 7703 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 7704 // and finally the i1 pairs. 7705 // TODO: We can easily support i4/i2 legal types if any target ever does. 7706 if (Sz >= 8 && isPowerOf2_32(Sz)) { 7707 // Create the masks - repeating the pattern every byte. 7708 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 7709 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 7710 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 7711 7712 // BSWAP if the type is wider than a single byte. 7713 Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); 7714 7715 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 7716 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT)); 7717 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask4, dl, VT)); 7718 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT)); 7719 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); 7720 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7721 7722 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 7723 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT)); 7724 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask2, dl, VT)); 7725 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT)); 7726 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); 7727 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7728 7729 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 7730 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT)); 7731 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask1, dl, VT)); 7732 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT)); 7733 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); 7734 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7735 return Tmp; 7736 } 7737 7738 Tmp = DAG.getConstant(0, dl, VT); 7739 for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { 7740 if (I < J) 7741 Tmp2 = 7742 DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); 7743 else 7744 Tmp2 = 7745 DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); 7746 7747 APInt Shift(Sz, 1); 7748 Shift <<= J; 7749 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); 7750 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); 7751 } 7752 7753 return Tmp; 7754 } 7755 7756 std::pair<SDValue, SDValue> 7757 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 7758 SelectionDAG &DAG) const { 7759 SDLoc SL(LD); 7760 SDValue Chain = LD->getChain(); 7761 SDValue BasePTR = LD->getBasePtr(); 7762 EVT SrcVT = LD->getMemoryVT(); 7763 EVT DstVT = LD->getValueType(0); 7764 ISD::LoadExtType ExtType = LD->getExtensionType(); 7765 7766 if (SrcVT.isScalableVector()) 7767 report_fatal_error("Cannot scalarize scalable vector loads"); 7768 7769 unsigned NumElem = SrcVT.getVectorNumElements(); 7770 7771 EVT SrcEltVT = SrcVT.getScalarType(); 7772 EVT DstEltVT = DstVT.getScalarType(); 7773 7774 // A vector must always be stored in memory as-is, i.e. without any padding 7775 // between the elements, since various code depend on it, e.g. in the 7776 // handling of a bitcast of a vector type to int, which may be done with a 7777 // vector store followed by an integer load. A vector that does not have 7778 // elements that are byte-sized must therefore be stored as an integer 7779 // built out of the extracted vector elements. 7780 if (!SrcEltVT.isByteSized()) { 7781 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 7782 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 7783 7784 unsigned NumSrcBits = SrcVT.getSizeInBits(); 7785 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 7786 7787 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 7788 SDValue SrcEltBitMask = DAG.getConstant( 7789 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 7790 7791 // Load the whole vector and avoid masking off the top bits as it makes 7792 // the codegen worse. 7793 SDValue Load = 7794 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 7795 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 7796 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7797 7798 SmallVector<SDValue, 8> Vals; 7799 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7800 unsigned ShiftIntoIdx = 7801 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7802 SDValue ShiftAmount = 7803 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 7804 LoadVT, SL, /*LegalTypes=*/false); 7805 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 7806 SDValue Elt = 7807 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 7808 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 7809 7810 if (ExtType != ISD::NON_EXTLOAD) { 7811 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 7812 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 7813 } 7814 7815 Vals.push_back(Scalar); 7816 } 7817 7818 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7819 return std::make_pair(Value, Load.getValue(1)); 7820 } 7821 7822 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 7823 assert(SrcEltVT.isByteSized()); 7824 7825 SmallVector<SDValue, 8> Vals; 7826 SmallVector<SDValue, 8> LoadChains; 7827 7828 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7829 SDValue ScalarLoad = 7830 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 7831 LD->getPointerInfo().getWithOffset(Idx * Stride), 7832 SrcEltVT, LD->getOriginalAlign(), 7833 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7834 7835 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 7836 7837 Vals.push_back(ScalarLoad.getValue(0)); 7838 LoadChains.push_back(ScalarLoad.getValue(1)); 7839 } 7840 7841 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 7842 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7843 7844 return std::make_pair(Value, NewChain); 7845 } 7846 7847 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 7848 SelectionDAG &DAG) const { 7849 SDLoc SL(ST); 7850 7851 SDValue Chain = ST->getChain(); 7852 SDValue BasePtr = ST->getBasePtr(); 7853 SDValue Value = ST->getValue(); 7854 EVT StVT = ST->getMemoryVT(); 7855 7856 if (StVT.isScalableVector()) 7857 report_fatal_error("Cannot scalarize scalable vector stores"); 7858 7859 // The type of the data we want to save 7860 EVT RegVT = Value.getValueType(); 7861 EVT RegSclVT = RegVT.getScalarType(); 7862 7863 // The type of data as saved in memory. 7864 EVT MemSclVT = StVT.getScalarType(); 7865 7866 unsigned NumElem = StVT.getVectorNumElements(); 7867 7868 // A vector must always be stored in memory as-is, i.e. without any padding 7869 // between the elements, since various code depend on it, e.g. in the 7870 // handling of a bitcast of a vector type to int, which may be done with a 7871 // vector store followed by an integer load. A vector that does not have 7872 // elements that are byte-sized must therefore be stored as an integer 7873 // built out of the extracted vector elements. 7874 if (!MemSclVT.isByteSized()) { 7875 unsigned NumBits = StVT.getSizeInBits(); 7876 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 7877 7878 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 7879 7880 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7881 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7882 DAG.getVectorIdxConstant(Idx, SL)); 7883 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 7884 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 7885 unsigned ShiftIntoIdx = 7886 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7887 SDValue ShiftAmount = 7888 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 7889 SDValue ShiftedElt = 7890 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 7891 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 7892 } 7893 7894 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 7895 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7896 ST->getAAInfo()); 7897 } 7898 7899 // Store Stride in bytes 7900 unsigned Stride = MemSclVT.getSizeInBits() / 8; 7901 assert(Stride && "Zero stride!"); 7902 // Extract each of the elements from the original vector and save them into 7903 // memory individually. 7904 SmallVector<SDValue, 8> Stores; 7905 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7906 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7907 DAG.getVectorIdxConstant(Idx, SL)); 7908 7909 SDValue Ptr = 7910 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 7911 7912 // This scalar TruncStore may be illegal, but we legalize it later. 7913 SDValue Store = DAG.getTruncStore( 7914 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 7915 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7916 ST->getAAInfo()); 7917 7918 Stores.push_back(Store); 7919 } 7920 7921 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 7922 } 7923 7924 std::pair<SDValue, SDValue> 7925 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 7926 assert(LD->getAddressingMode() == ISD::UNINDEXED && 7927 "unaligned indexed loads not implemented!"); 7928 SDValue Chain = LD->getChain(); 7929 SDValue Ptr = LD->getBasePtr(); 7930 EVT VT = LD->getValueType(0); 7931 EVT LoadedVT = LD->getMemoryVT(); 7932 SDLoc dl(LD); 7933 auto &MF = DAG.getMachineFunction(); 7934 7935 if (VT.isFloatingPoint() || VT.isVector()) { 7936 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 7937 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 7938 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 7939 LoadedVT.isVector()) { 7940 // Scalarize the load and let the individual components be handled. 7941 return scalarizeVectorLoad(LD, DAG); 7942 } 7943 7944 // Expand to a (misaligned) integer load of the same size, 7945 // then bitconvert to floating point or vector. 7946 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 7947 LD->getMemOperand()); 7948 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 7949 if (LoadedVT != VT) 7950 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 7951 ISD::ANY_EXTEND, dl, VT, Result); 7952 7953 return std::make_pair(Result, newLoad.getValue(1)); 7954 } 7955 7956 // Copy the value to a (aligned) stack slot using (unaligned) integer 7957 // loads and stores, then do a (aligned) load from the stack slot. 7958 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 7959 unsigned LoadedBytes = LoadedVT.getStoreSize(); 7960 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7961 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 7962 7963 // Make sure the stack slot is also aligned for the register type. 7964 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 7965 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 7966 SmallVector<SDValue, 8> Stores; 7967 SDValue StackPtr = StackBase; 7968 unsigned Offset = 0; 7969 7970 EVT PtrVT = Ptr.getValueType(); 7971 EVT StackPtrVT = StackPtr.getValueType(); 7972 7973 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7974 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7975 7976 // Do all but one copies using the full register width. 7977 for (unsigned i = 1; i < NumRegs; i++) { 7978 // Load one integer register's worth from the original location. 7979 SDValue Load = DAG.getLoad( 7980 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 7981 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7982 LD->getAAInfo()); 7983 // Follow the load with a store to the stack slot. Remember the store. 7984 Stores.push_back(DAG.getStore( 7985 Load.getValue(1), dl, Load, StackPtr, 7986 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 7987 // Increment the pointers. 7988 Offset += RegBytes; 7989 7990 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7991 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7992 } 7993 7994 // The last copy may be partial. Do an extending load. 7995 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 7996 8 * (LoadedBytes - Offset)); 7997 SDValue Load = 7998 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 7999 LD->getPointerInfo().getWithOffset(Offset), MemVT, 8000 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 8001 LD->getAAInfo()); 8002 // Follow the load with a store to the stack slot. Remember the store. 8003 // On big-endian machines this requires a truncating store to ensure 8004 // that the bits end up in the right place. 8005 Stores.push_back(DAG.getTruncStore( 8006 Load.getValue(1), dl, Load, StackPtr, 8007 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 8008 8009 // The order of the stores doesn't matter - say it with a TokenFactor. 8010 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8011 8012 // Finally, perform the original load only redirected to the stack slot. 8013 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 8014 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 8015 LoadedVT); 8016 8017 // Callers expect a MERGE_VALUES node. 8018 return std::make_pair(Load, TF); 8019 } 8020 8021 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 8022 "Unaligned load of unsupported type."); 8023 8024 // Compute the new VT that is half the size of the old one. This is an 8025 // integer MVT. 8026 unsigned NumBits = LoadedVT.getSizeInBits(); 8027 EVT NewLoadedVT; 8028 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 8029 NumBits >>= 1; 8030 8031 Align Alignment = LD->getOriginalAlign(); 8032 unsigned IncrementSize = NumBits / 8; 8033 ISD::LoadExtType HiExtType = LD->getExtensionType(); 8034 8035 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 8036 if (HiExtType == ISD::NON_EXTLOAD) 8037 HiExtType = ISD::ZEXTLOAD; 8038 8039 // Load the value in two parts 8040 SDValue Lo, Hi; 8041 if (DAG.getDataLayout().isLittleEndian()) { 8042 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8043 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8044 LD->getAAInfo()); 8045 8046 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8047 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 8048 LD->getPointerInfo().getWithOffset(IncrementSize), 8049 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8050 LD->getAAInfo()); 8051 } else { 8052 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8053 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8054 LD->getAAInfo()); 8055 8056 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8057 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 8058 LD->getPointerInfo().getWithOffset(IncrementSize), 8059 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8060 LD->getAAInfo()); 8061 } 8062 8063 // aggregate the two parts 8064 SDValue ShiftAmount = 8065 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 8066 DAG.getDataLayout())); 8067 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 8068 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 8069 8070 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 8071 Hi.getValue(1)); 8072 8073 return std::make_pair(Result, TF); 8074 } 8075 8076 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 8077 SelectionDAG &DAG) const { 8078 assert(ST->getAddressingMode() == ISD::UNINDEXED && 8079 "unaligned indexed stores not implemented!"); 8080 SDValue Chain = ST->getChain(); 8081 SDValue Ptr = ST->getBasePtr(); 8082 SDValue Val = ST->getValue(); 8083 EVT VT = Val.getValueType(); 8084 Align Alignment = ST->getOriginalAlign(); 8085 auto &MF = DAG.getMachineFunction(); 8086 EVT StoreMemVT = ST->getMemoryVT(); 8087 8088 SDLoc dl(ST); 8089 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 8090 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 8091 if (isTypeLegal(intVT)) { 8092 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 8093 StoreMemVT.isVector()) { 8094 // Scalarize the store and let the individual components be handled. 8095 SDValue Result = scalarizeVectorStore(ST, DAG); 8096 return Result; 8097 } 8098 // Expand to a bitconvert of the value to the integer type of the 8099 // same size, then a (misaligned) int store. 8100 // FIXME: Does not handle truncating floating point stores! 8101 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 8102 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 8103 Alignment, ST->getMemOperand()->getFlags()); 8104 return Result; 8105 } 8106 // Do a (aligned) store to a stack slot, then copy from the stack slot 8107 // to the final destination using (unaligned) integer loads and stores. 8108 MVT RegVT = getRegisterType( 8109 *DAG.getContext(), 8110 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 8111 EVT PtrVT = Ptr.getValueType(); 8112 unsigned StoredBytes = StoreMemVT.getStoreSize(); 8113 unsigned RegBytes = RegVT.getSizeInBits() / 8; 8114 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 8115 8116 // Make sure the stack slot is also aligned for the register type. 8117 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 8118 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 8119 8120 // Perform the original store, only redirected to the stack slot. 8121 SDValue Store = DAG.getTruncStore( 8122 Chain, dl, Val, StackPtr, 8123 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 8124 8125 EVT StackPtrVT = StackPtr.getValueType(); 8126 8127 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 8128 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 8129 SmallVector<SDValue, 8> Stores; 8130 unsigned Offset = 0; 8131 8132 // Do all but one copies using the full register width. 8133 for (unsigned i = 1; i < NumRegs; i++) { 8134 // Load one integer register's worth from the stack slot. 8135 SDValue Load = DAG.getLoad( 8136 RegVT, dl, Store, StackPtr, 8137 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 8138 // Store it to the final location. Remember the store. 8139 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 8140 ST->getPointerInfo().getWithOffset(Offset), 8141 ST->getOriginalAlign(), 8142 ST->getMemOperand()->getFlags())); 8143 // Increment the pointers. 8144 Offset += RegBytes; 8145 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 8146 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 8147 } 8148 8149 // The last store may be partial. Do a truncating store. On big-endian 8150 // machines this requires an extending load from the stack slot to ensure 8151 // that the bits are in the right place. 8152 EVT LoadMemVT = 8153 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 8154 8155 // Load from the stack slot. 8156 SDValue Load = DAG.getExtLoad( 8157 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 8158 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 8159 8160 Stores.push_back( 8161 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 8162 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 8163 ST->getOriginalAlign(), 8164 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 8165 // The order of the stores doesn't matter - say it with a TokenFactor. 8166 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8167 return Result; 8168 } 8169 8170 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 8171 "Unaligned store of unknown type."); 8172 // Get the half-size VT 8173 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 8174 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 8175 unsigned IncrementSize = NumBits / 8; 8176 8177 // Divide the stored value in two parts. 8178 SDValue ShiftAmount = DAG.getConstant( 8179 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 8180 SDValue Lo = Val; 8181 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 8182 8183 // Store the two parts 8184 SDValue Store1, Store2; 8185 Store1 = DAG.getTruncStore(Chain, dl, 8186 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 8187 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 8188 ST->getMemOperand()->getFlags()); 8189 8190 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8191 Store2 = DAG.getTruncStore( 8192 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 8193 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 8194 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 8195 8196 SDValue Result = 8197 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 8198 return Result; 8199 } 8200 8201 SDValue 8202 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 8203 const SDLoc &DL, EVT DataVT, 8204 SelectionDAG &DAG, 8205 bool IsCompressedMemory) const { 8206 SDValue Increment; 8207 EVT AddrVT = Addr.getValueType(); 8208 EVT MaskVT = Mask.getValueType(); 8209 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 8210 "Incompatible types of Data and Mask"); 8211 if (IsCompressedMemory) { 8212 if (DataVT.isScalableVector()) 8213 report_fatal_error( 8214 "Cannot currently handle compressed memory with scalable vectors"); 8215 // Incrementing the pointer according to number of '1's in the mask. 8216 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 8217 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 8218 if (MaskIntVT.getSizeInBits() < 32) { 8219 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 8220 MaskIntVT = MVT::i32; 8221 } 8222 8223 // Count '1's with POPCNT. 8224 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 8225 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 8226 // Scale is an element size in bytes. 8227 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 8228 AddrVT); 8229 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 8230 } else if (DataVT.isScalableVector()) { 8231 Increment = DAG.getVScale(DL, AddrVT, 8232 APInt(AddrVT.getFixedSizeInBits(), 8233 DataVT.getStoreSize().getKnownMinSize())); 8234 } else 8235 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 8236 8237 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 8238 } 8239 8240 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, 8241 EVT VecVT, const SDLoc &dl, 8242 ElementCount SubEC) { 8243 assert(!(SubEC.isScalable() && VecVT.isFixedLengthVector()) && 8244 "Cannot index a scalable vector within a fixed-width vector"); 8245 8246 unsigned NElts = VecVT.getVectorMinNumElements(); 8247 unsigned NumSubElts = SubEC.getKnownMinValue(); 8248 EVT IdxVT = Idx.getValueType(); 8249 8250 if (VecVT.isScalableVector() && !SubEC.isScalable()) { 8251 // If this is a constant index and we know the value plus the number of the 8252 // elements in the subvector minus one is less than the minimum number of 8253 // elements then it's safe to return Idx. 8254 if (auto *IdxCst = dyn_cast<ConstantSDNode>(Idx)) 8255 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts) 8256 return Idx; 8257 SDValue VS = 8258 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getFixedSizeInBits(), NElts)); 8259 unsigned SubOpcode = NumSubElts <= NElts ? ISD::SUB : ISD::USUBSAT; 8260 SDValue Sub = DAG.getNode(SubOpcode, dl, IdxVT, VS, 8261 DAG.getConstant(NumSubElts, dl, IdxVT)); 8262 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 8263 } 8264 if (isPowerOf2_32(NElts) && NumSubElts == 1) { 8265 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), Log2_32(NElts)); 8266 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 8267 DAG.getConstant(Imm, dl, IdxVT)); 8268 } 8269 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0; 8270 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 8271 DAG.getConstant(MaxIndex, dl, IdxVT)); 8272 } 8273 8274 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 8275 SDValue VecPtr, EVT VecVT, 8276 SDValue Index) const { 8277 return getVectorSubVecPointer( 8278 DAG, VecPtr, VecVT, 8279 EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1), 8280 Index); 8281 } 8282 8283 SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, 8284 SDValue VecPtr, EVT VecVT, 8285 EVT SubVecVT, 8286 SDValue Index) const { 8287 SDLoc dl(Index); 8288 // Make sure the index type is big enough to compute in. 8289 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 8290 8291 EVT EltVT = VecVT.getVectorElementType(); 8292 8293 // Calculate the element offset and add it to the pointer. 8294 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 8295 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 8296 "Converting bits to bytes lost precision"); 8297 assert(SubVecVT.getVectorElementType() == EltVT && 8298 "Sub-vector must be a vector with matching element type"); 8299 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl, 8300 SubVecVT.getVectorElementCount()); 8301 8302 EVT IdxVT = Index.getValueType(); 8303 if (SubVecVT.isScalableVector()) 8304 Index = 8305 DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8306 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getSizeInBits(), 1))); 8307 8308 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8309 DAG.getConstant(EltSize, dl, IdxVT)); 8310 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 8311 } 8312 8313 //===----------------------------------------------------------------------===// 8314 // Implementation of Emulated TLS Model 8315 //===----------------------------------------------------------------------===// 8316 8317 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 8318 SelectionDAG &DAG) const { 8319 // Access to address of TLS varialbe xyz is lowered to a function call: 8320 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 8321 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8322 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 8323 SDLoc dl(GA); 8324 8325 ArgListTy Args; 8326 ArgListEntry Entry; 8327 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 8328 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 8329 StringRef EmuTlsVarName(NameString); 8330 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 8331 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 8332 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 8333 Entry.Ty = VoidPtrType; 8334 Args.push_back(Entry); 8335 8336 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 8337 8338 TargetLowering::CallLoweringInfo CLI(DAG); 8339 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 8340 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 8341 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 8342 8343 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 8344 // At last for X86 targets, maybe good for other targets too? 8345 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8346 MFI.setAdjustsStack(true); // Is this only for X86 target? 8347 MFI.setHasCalls(true); 8348 8349 assert((GA->getOffset() == 0) && 8350 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 8351 return CallResult.first; 8352 } 8353 8354 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 8355 SelectionDAG &DAG) const { 8356 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 8357 if (!isCtlzFast()) 8358 return SDValue(); 8359 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8360 SDLoc dl(Op); 8361 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 8362 if (C->isZero() && CC == ISD::SETEQ) { 8363 EVT VT = Op.getOperand(0).getValueType(); 8364 SDValue Zext = Op.getOperand(0); 8365 if (VT.bitsLT(MVT::i32)) { 8366 VT = MVT::i32; 8367 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 8368 } 8369 unsigned Log2b = Log2_32(VT.getSizeInBits()); 8370 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 8371 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 8372 DAG.getConstant(Log2b, dl, MVT::i32)); 8373 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 8374 } 8375 } 8376 return SDValue(); 8377 } 8378 8379 // Convert redundant addressing modes (e.g. scaling is redundant 8380 // when accessing bytes). 8381 ISD::MemIndexType 8382 TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT, 8383 SDValue Offsets) const { 8384 bool IsScaledIndex = 8385 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED); 8386 bool IsSignedIndex = 8387 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED); 8388 8389 // Scaling is unimportant for bytes, canonicalize to unscaled. 8390 if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) 8391 return IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED; 8392 8393 return IndexType; 8394 } 8395 8396 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 8397 SDValue Op0 = Node->getOperand(0); 8398 SDValue Op1 = Node->getOperand(1); 8399 EVT VT = Op0.getValueType(); 8400 unsigned Opcode = Node->getOpcode(); 8401 SDLoc DL(Node); 8402 8403 // umin(x,y) -> sub(x,usubsat(x,y)) 8404 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 8405 isOperationLegal(ISD::USUBSAT, VT)) { 8406 return DAG.getNode(ISD::SUB, DL, VT, Op0, 8407 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 8408 } 8409 8410 // umax(x,y) -> add(x,usubsat(y,x)) 8411 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 8412 isOperationLegal(ISD::USUBSAT, VT)) { 8413 return DAG.getNode(ISD::ADD, DL, VT, Op0, 8414 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 8415 } 8416 8417 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 8418 ISD::CondCode CC; 8419 switch (Opcode) { 8420 default: llvm_unreachable("How did we get here?"); 8421 case ISD::SMAX: CC = ISD::SETGT; break; 8422 case ISD::SMIN: CC = ISD::SETLT; break; 8423 case ISD::UMAX: CC = ISD::SETUGT; break; 8424 case ISD::UMIN: CC = ISD::SETULT; break; 8425 } 8426 8427 // FIXME: Should really try to split the vector in case it's legal on a 8428 // subvector. 8429 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8430 return DAG.UnrollVectorOp(Node); 8431 8432 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8433 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 8434 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 8435 } 8436 8437 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 8438 unsigned Opcode = Node->getOpcode(); 8439 SDValue LHS = Node->getOperand(0); 8440 SDValue RHS = Node->getOperand(1); 8441 EVT VT = LHS.getValueType(); 8442 SDLoc dl(Node); 8443 8444 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8445 assert(VT.isInteger() && "Expected operands to be integers"); 8446 8447 // usub.sat(a, b) -> umax(a, b) - b 8448 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 8449 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 8450 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 8451 } 8452 8453 // uadd.sat(a, b) -> umin(a, ~b) + b 8454 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 8455 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 8456 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 8457 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 8458 } 8459 8460 unsigned OverflowOp; 8461 switch (Opcode) { 8462 case ISD::SADDSAT: 8463 OverflowOp = ISD::SADDO; 8464 break; 8465 case ISD::UADDSAT: 8466 OverflowOp = ISD::UADDO; 8467 break; 8468 case ISD::SSUBSAT: 8469 OverflowOp = ISD::SSUBO; 8470 break; 8471 case ISD::USUBSAT: 8472 OverflowOp = ISD::USUBO; 8473 break; 8474 default: 8475 llvm_unreachable("Expected method to receive signed or unsigned saturation " 8476 "addition or subtraction node."); 8477 } 8478 8479 // FIXME: Should really try to split the vector in case it's legal on a 8480 // subvector. 8481 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8482 return DAG.UnrollVectorOp(Node); 8483 8484 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 8485 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8486 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8487 SDValue SumDiff = Result.getValue(0); 8488 SDValue Overflow = Result.getValue(1); 8489 SDValue Zero = DAG.getConstant(0, dl, VT); 8490 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 8491 8492 if (Opcode == ISD::UADDSAT) { 8493 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8494 // (LHS + RHS) | OverflowMask 8495 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8496 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 8497 } 8498 // Overflow ? 0xffff.... : (LHS + RHS) 8499 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 8500 } 8501 8502 if (Opcode == ISD::USUBSAT) { 8503 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8504 // (LHS - RHS) & ~OverflowMask 8505 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8506 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 8507 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 8508 } 8509 // Overflow ? 0 : (LHS - RHS) 8510 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 8511 } 8512 8513 // Overflow ? (SumDiff >> BW) ^ MinVal : SumDiff 8514 APInt MinVal = APInt::getSignedMinValue(BitWidth); 8515 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8516 SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, SumDiff, 8517 DAG.getConstant(BitWidth - 1, dl, VT)); 8518 Result = DAG.getNode(ISD::XOR, dl, VT, Shift, SatMin); 8519 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 8520 } 8521 8522 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 8523 unsigned Opcode = Node->getOpcode(); 8524 bool IsSigned = Opcode == ISD::SSHLSAT; 8525 SDValue LHS = Node->getOperand(0); 8526 SDValue RHS = Node->getOperand(1); 8527 EVT VT = LHS.getValueType(); 8528 SDLoc dl(Node); 8529 8530 assert((Node->getOpcode() == ISD::SSHLSAT || 8531 Node->getOpcode() == ISD::USHLSAT) && 8532 "Expected a SHLSAT opcode"); 8533 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8534 assert(VT.isInteger() && "Expected operands to be integers"); 8535 8536 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 8537 8538 unsigned BW = VT.getScalarSizeInBits(); 8539 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 8540 SDValue Orig = 8541 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 8542 8543 SDValue SatVal; 8544 if (IsSigned) { 8545 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 8546 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 8547 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 8548 SatMin, SatMax, ISD::SETLT); 8549 } else { 8550 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 8551 } 8552 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 8553 8554 return Result; 8555 } 8556 8557 SDValue 8558 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 8559 assert((Node->getOpcode() == ISD::SMULFIX || 8560 Node->getOpcode() == ISD::UMULFIX || 8561 Node->getOpcode() == ISD::SMULFIXSAT || 8562 Node->getOpcode() == ISD::UMULFIXSAT) && 8563 "Expected a fixed point multiplication opcode"); 8564 8565 SDLoc dl(Node); 8566 SDValue LHS = Node->getOperand(0); 8567 SDValue RHS = Node->getOperand(1); 8568 EVT VT = LHS.getValueType(); 8569 unsigned Scale = Node->getConstantOperandVal(2); 8570 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 8571 Node->getOpcode() == ISD::UMULFIXSAT); 8572 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 8573 Node->getOpcode() == ISD::SMULFIXSAT); 8574 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8575 unsigned VTSize = VT.getScalarSizeInBits(); 8576 8577 if (!Scale) { 8578 // [us]mul.fix(a, b, 0) -> mul(a, b) 8579 if (!Saturating) { 8580 if (isOperationLegalOrCustom(ISD::MUL, VT)) 8581 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8582 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 8583 SDValue Result = 8584 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8585 SDValue Product = Result.getValue(0); 8586 SDValue Overflow = Result.getValue(1); 8587 SDValue Zero = DAG.getConstant(0, dl, VT); 8588 8589 APInt MinVal = APInt::getSignedMinValue(VTSize); 8590 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 8591 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8592 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8593 // Xor the inputs, if resulting sign bit is 0 the product will be 8594 // positive, else negative. 8595 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS); 8596 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Xor, Zero, ISD::SETLT); 8597 Result = DAG.getSelect(dl, VT, ProdNeg, SatMin, SatMax); 8598 return DAG.getSelect(dl, VT, Overflow, Result, Product); 8599 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 8600 SDValue Result = 8601 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8602 SDValue Product = Result.getValue(0); 8603 SDValue Overflow = Result.getValue(1); 8604 8605 APInt MaxVal = APInt::getMaxValue(VTSize); 8606 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8607 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 8608 } 8609 } 8610 8611 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 8612 "Expected scale to be less than the number of bits if signed or at " 8613 "most the number of bits if unsigned."); 8614 assert(LHS.getValueType() == RHS.getValueType() && 8615 "Expected both operands to be the same type"); 8616 8617 // Get the upper and lower bits of the result. 8618 SDValue Lo, Hi; 8619 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 8620 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 8621 if (isOperationLegalOrCustom(LoHiOp, VT)) { 8622 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 8623 Lo = Result.getValue(0); 8624 Hi = Result.getValue(1); 8625 } else if (isOperationLegalOrCustom(HiOp, VT)) { 8626 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8627 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 8628 } else if (VT.isVector()) { 8629 return SDValue(); 8630 } else { 8631 report_fatal_error("Unable to expand fixed point multiplication."); 8632 } 8633 8634 if (Scale == VTSize) 8635 // Result is just the top half since we'd be shifting by the width of the 8636 // operand. Overflow impossible so this works for both UMULFIX and 8637 // UMULFIXSAT. 8638 return Hi; 8639 8640 // The result will need to be shifted right by the scale since both operands 8641 // are scaled. The result is given to us in 2 halves, so we only want part of 8642 // both in the result. 8643 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8644 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 8645 DAG.getConstant(Scale, dl, ShiftTy)); 8646 if (!Saturating) 8647 return Result; 8648 8649 if (!Signed) { 8650 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 8651 // widened multiplication) aren't all zeroes. 8652 8653 // Saturate to max if ((Hi >> Scale) != 0), 8654 // which is the same as if (Hi > ((1 << Scale) - 1)) 8655 APInt MaxVal = APInt::getMaxValue(VTSize); 8656 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 8657 dl, VT); 8658 Result = DAG.getSelectCC(dl, Hi, LowMask, 8659 DAG.getConstant(MaxVal, dl, VT), Result, 8660 ISD::SETUGT); 8661 8662 return Result; 8663 } 8664 8665 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 8666 // widened multiplication) aren't all ones or all zeroes. 8667 8668 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 8669 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 8670 8671 if (Scale == 0) { 8672 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 8673 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 8674 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 8675 // Saturated to SatMin if wide product is negative, and SatMax if wide 8676 // product is positive ... 8677 SDValue Zero = DAG.getConstant(0, dl, VT); 8678 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 8679 ISD::SETLT); 8680 // ... but only if we overflowed. 8681 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 8682 } 8683 8684 // We handled Scale==0 above so all the bits to examine is in Hi. 8685 8686 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 8687 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 8688 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 8689 dl, VT); 8690 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 8691 // Saturate to min if (Hi >> (Scale - 1)) < -1), 8692 // which is the same as if (HI < (-1 << (Scale - 1)) 8693 SDValue HighMask = 8694 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 8695 dl, VT); 8696 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 8697 return Result; 8698 } 8699 8700 SDValue 8701 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 8702 SDValue LHS, SDValue RHS, 8703 unsigned Scale, SelectionDAG &DAG) const { 8704 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 8705 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 8706 "Expected a fixed point division opcode"); 8707 8708 EVT VT = LHS.getValueType(); 8709 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 8710 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 8711 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8712 8713 // If there is enough room in the type to upscale the LHS or downscale the 8714 // RHS before the division, we can perform it in this type without having to 8715 // resize. For signed operations, the LHS headroom is the number of 8716 // redundant sign bits, and for unsigned ones it is the number of zeroes. 8717 // The headroom for the RHS is the number of trailing zeroes. 8718 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 8719 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 8720 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 8721 8722 // For signed saturating operations, we need to be able to detect true integer 8723 // division overflow; that is, when you have MIN / -EPS. However, this 8724 // is undefined behavior and if we emit divisions that could take such 8725 // values it may cause undesired behavior (arithmetic exceptions on x86, for 8726 // example). 8727 // Avoid this by requiring an extra bit so that we never get this case. 8728 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 8729 // signed saturating division, we need to emit a whopping 32-bit division. 8730 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 8731 return SDValue(); 8732 8733 unsigned LHSShift = std::min(LHSLead, Scale); 8734 unsigned RHSShift = Scale - LHSShift; 8735 8736 // At this point, we know that if we shift the LHS up by LHSShift and the 8737 // RHS down by RHSShift, we can emit a regular division with a final scaling 8738 // factor of Scale. 8739 8740 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8741 if (LHSShift) 8742 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 8743 DAG.getConstant(LHSShift, dl, ShiftTy)); 8744 if (RHSShift) 8745 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 8746 DAG.getConstant(RHSShift, dl, ShiftTy)); 8747 8748 SDValue Quot; 8749 if (Signed) { 8750 // For signed operations, if the resulting quotient is negative and the 8751 // remainder is nonzero, subtract 1 from the quotient to round towards 8752 // negative infinity. 8753 SDValue Rem; 8754 // FIXME: Ideally we would always produce an SDIVREM here, but if the 8755 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 8756 // we couldn't just form a libcall, but the type legalizer doesn't do it. 8757 if (isTypeLegal(VT) && 8758 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 8759 Quot = DAG.getNode(ISD::SDIVREM, dl, 8760 DAG.getVTList(VT, VT), 8761 LHS, RHS); 8762 Rem = Quot.getValue(1); 8763 Quot = Quot.getValue(0); 8764 } else { 8765 Quot = DAG.getNode(ISD::SDIV, dl, VT, 8766 LHS, RHS); 8767 Rem = DAG.getNode(ISD::SREM, dl, VT, 8768 LHS, RHS); 8769 } 8770 SDValue Zero = DAG.getConstant(0, dl, VT); 8771 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 8772 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 8773 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 8774 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 8775 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 8776 DAG.getConstant(1, dl, VT)); 8777 Quot = DAG.getSelect(dl, VT, 8778 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 8779 Sub1, Quot); 8780 } else 8781 Quot = DAG.getNode(ISD::UDIV, dl, VT, 8782 LHS, RHS); 8783 8784 return Quot; 8785 } 8786 8787 void TargetLowering::expandUADDSUBO( 8788 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 8789 SDLoc dl(Node); 8790 SDValue LHS = Node->getOperand(0); 8791 SDValue RHS = Node->getOperand(1); 8792 bool IsAdd = Node->getOpcode() == ISD::UADDO; 8793 8794 // If ADD/SUBCARRY is legal, use that instead. 8795 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 8796 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 8797 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 8798 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 8799 { LHS, RHS, CarryIn }); 8800 Result = SDValue(NodeCarry.getNode(), 0); 8801 Overflow = SDValue(NodeCarry.getNode(), 1); 8802 return; 8803 } 8804 8805 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 8806 LHS.getValueType(), LHS, RHS); 8807 8808 EVT ResultType = Node->getValueType(1); 8809 EVT SetCCType = getSetCCResultType( 8810 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 8811 SDValue SetCC; 8812 if (IsAdd && isOneConstant(RHS)) { 8813 // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces 8814 // the live range of X. We assume comparing with 0 is cheap. 8815 // TODO: This generalizes to (X + C) < C. 8816 SetCC = 8817 DAG.getSetCC(dl, SetCCType, Result, 8818 DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); 8819 } else { 8820 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 8821 SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 8822 } 8823 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 8824 } 8825 8826 void TargetLowering::expandSADDSUBO( 8827 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 8828 SDLoc dl(Node); 8829 SDValue LHS = Node->getOperand(0); 8830 SDValue RHS = Node->getOperand(1); 8831 bool IsAdd = Node->getOpcode() == ISD::SADDO; 8832 8833 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 8834 LHS.getValueType(), LHS, RHS); 8835 8836 EVT ResultType = Node->getValueType(1); 8837 EVT OType = getSetCCResultType( 8838 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 8839 8840 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 8841 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 8842 if (isOperationLegal(OpcSat, LHS.getValueType())) { 8843 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 8844 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 8845 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 8846 return; 8847 } 8848 8849 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 8850 8851 // For an addition, the result should be less than one of the operands (LHS) 8852 // if and only if the other operand (RHS) is negative, otherwise there will 8853 // be overflow. 8854 // For a subtraction, the result should be less than one of the operands 8855 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 8856 // otherwise there will be overflow. 8857 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 8858 SDValue ConditionRHS = 8859 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 8860 8861 Overflow = DAG.getBoolExtOrTrunc( 8862 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 8863 ResultType, ResultType); 8864 } 8865 8866 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 8867 SDValue &Overflow, SelectionDAG &DAG) const { 8868 SDLoc dl(Node); 8869 EVT VT = Node->getValueType(0); 8870 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8871 SDValue LHS = Node->getOperand(0); 8872 SDValue RHS = Node->getOperand(1); 8873 bool isSigned = Node->getOpcode() == ISD::SMULO; 8874 8875 // For power-of-two multiplications we can use a simpler shift expansion. 8876 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 8877 const APInt &C = RHSC->getAPIntValue(); 8878 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 8879 if (C.isPowerOf2()) { 8880 // smulo(x, signed_min) is same as umulo(x, signed_min). 8881 bool UseArithShift = isSigned && !C.isMinSignedValue(); 8882 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8883 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 8884 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 8885 Overflow = DAG.getSetCC(dl, SetCCVT, 8886 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 8887 dl, VT, Result, ShiftAmt), 8888 LHS, ISD::SETNE); 8889 return true; 8890 } 8891 } 8892 8893 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 8894 if (VT.isVector()) 8895 WideVT = 8896 EVT::getVectorVT(*DAG.getContext(), WideVT, VT.getVectorElementCount()); 8897 8898 SDValue BottomHalf; 8899 SDValue TopHalf; 8900 static const unsigned Ops[2][3] = 8901 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 8902 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 8903 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 8904 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8905 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 8906 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 8907 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 8908 RHS); 8909 TopHalf = BottomHalf.getValue(1); 8910 } else if (isTypeLegal(WideVT)) { 8911 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 8912 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 8913 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 8914 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 8915 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 8916 getShiftAmountTy(WideVT, DAG.getDataLayout())); 8917 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 8918 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 8919 } else { 8920 if (VT.isVector()) 8921 return false; 8922 8923 // We can fall back to a libcall with an illegal type for the MUL if we 8924 // have a libcall big enough. 8925 // Also, we can fall back to a division in some cases, but that's a big 8926 // performance hit in the general case. 8927 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 8928 if (WideVT == MVT::i16) 8929 LC = RTLIB::MUL_I16; 8930 else if (WideVT == MVT::i32) 8931 LC = RTLIB::MUL_I32; 8932 else if (WideVT == MVT::i64) 8933 LC = RTLIB::MUL_I64; 8934 else if (WideVT == MVT::i128) 8935 LC = RTLIB::MUL_I128; 8936 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 8937 8938 SDValue HiLHS; 8939 SDValue HiRHS; 8940 if (isSigned) { 8941 // The high part is obtained by SRA'ing all but one of the bits of low 8942 // part. 8943 unsigned LoSize = VT.getFixedSizeInBits(); 8944 HiLHS = 8945 DAG.getNode(ISD::SRA, dl, VT, LHS, 8946 DAG.getConstant(LoSize - 1, dl, 8947 getPointerTy(DAG.getDataLayout()))); 8948 HiRHS = 8949 DAG.getNode(ISD::SRA, dl, VT, RHS, 8950 DAG.getConstant(LoSize - 1, dl, 8951 getPointerTy(DAG.getDataLayout()))); 8952 } else { 8953 HiLHS = DAG.getConstant(0, dl, VT); 8954 HiRHS = DAG.getConstant(0, dl, VT); 8955 } 8956 8957 // Here we're passing the 2 arguments explicitly as 4 arguments that are 8958 // pre-lowered to the correct types. This all depends upon WideVT not 8959 // being a legal type for the architecture and thus has to be split to 8960 // two arguments. 8961 SDValue Ret; 8962 TargetLowering::MakeLibCallOptions CallOptions; 8963 CallOptions.setSExt(isSigned); 8964 CallOptions.setIsPostTypeLegalization(true); 8965 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 8966 // Halves of WideVT are packed into registers in different order 8967 // depending on platform endianness. This is usually handled by 8968 // the C calling convention, but we can't defer to it in 8969 // the legalizer. 8970 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 8971 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8972 } else { 8973 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 8974 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8975 } 8976 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 8977 "Ret value is a collection of constituent nodes holding result."); 8978 if (DAG.getDataLayout().isLittleEndian()) { 8979 // Same as above. 8980 BottomHalf = Ret.getOperand(0); 8981 TopHalf = Ret.getOperand(1); 8982 } else { 8983 BottomHalf = Ret.getOperand(1); 8984 TopHalf = Ret.getOperand(0); 8985 } 8986 } 8987 8988 Result = BottomHalf; 8989 if (isSigned) { 8990 SDValue ShiftAmt = DAG.getConstant( 8991 VT.getScalarSizeInBits() - 1, dl, 8992 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 8993 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 8994 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 8995 } else { 8996 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 8997 DAG.getConstant(0, dl, VT), ISD::SETNE); 8998 } 8999 9000 // Truncate the result if SetCC returns a larger type than needed. 9001 EVT RType = Node->getValueType(1); 9002 if (RType.bitsLT(Overflow.getValueType())) 9003 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 9004 9005 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 9006 "Unexpected result type for S/UMULO legalization"); 9007 return true; 9008 } 9009 9010 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 9011 SDLoc dl(Node); 9012 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9013 SDValue Op = Node->getOperand(0); 9014 EVT VT = Op.getValueType(); 9015 9016 if (VT.isScalableVector()) 9017 report_fatal_error( 9018 "Expanding reductions for scalable vectors is undefined."); 9019 9020 // Try to use a shuffle reduction for power of two vectors. 9021 if (VT.isPow2VectorType()) { 9022 while (VT.getVectorNumElements() > 1) { 9023 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 9024 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 9025 break; 9026 9027 SDValue Lo, Hi; 9028 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 9029 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 9030 VT = HalfVT; 9031 } 9032 } 9033 9034 EVT EltVT = VT.getVectorElementType(); 9035 unsigned NumElts = VT.getVectorNumElements(); 9036 9037 SmallVector<SDValue, 8> Ops; 9038 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 9039 9040 SDValue Res = Ops[0]; 9041 for (unsigned i = 1; i < NumElts; i++) 9042 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 9043 9044 // Result type may be wider than element type. 9045 if (EltVT != Node->getValueType(0)) 9046 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 9047 return Res; 9048 } 9049 9050 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 9051 SDLoc dl(Node); 9052 SDValue AccOp = Node->getOperand(0); 9053 SDValue VecOp = Node->getOperand(1); 9054 SDNodeFlags Flags = Node->getFlags(); 9055 9056 EVT VT = VecOp.getValueType(); 9057 EVT EltVT = VT.getVectorElementType(); 9058 9059 if (VT.isScalableVector()) 9060 report_fatal_error( 9061 "Expanding reductions for scalable vectors is undefined."); 9062 9063 unsigned NumElts = VT.getVectorNumElements(); 9064 9065 SmallVector<SDValue, 8> Ops; 9066 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 9067 9068 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9069 9070 SDValue Res = AccOp; 9071 for (unsigned i = 0; i < NumElts; i++) 9072 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 9073 9074 return Res; 9075 } 9076 9077 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 9078 SelectionDAG &DAG) const { 9079 EVT VT = Node->getValueType(0); 9080 SDLoc dl(Node); 9081 bool isSigned = Node->getOpcode() == ISD::SREM; 9082 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 9083 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 9084 SDValue Dividend = Node->getOperand(0); 9085 SDValue Divisor = Node->getOperand(1); 9086 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 9087 SDVTList VTs = DAG.getVTList(VT, VT); 9088 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 9089 return true; 9090 } 9091 if (isOperationLegalOrCustom(DivOpc, VT)) { 9092 // X % Y -> X-X/Y*Y 9093 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 9094 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 9095 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 9096 return true; 9097 } 9098 return false; 9099 } 9100 9101 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 9102 SelectionDAG &DAG) const { 9103 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 9104 SDLoc dl(SDValue(Node, 0)); 9105 SDValue Src = Node->getOperand(0); 9106 9107 // DstVT is the result type, while SatVT is the size to which we saturate 9108 EVT SrcVT = Src.getValueType(); 9109 EVT DstVT = Node->getValueType(0); 9110 9111 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 9112 unsigned SatWidth = SatVT.getScalarSizeInBits(); 9113 unsigned DstWidth = DstVT.getScalarSizeInBits(); 9114 assert(SatWidth <= DstWidth && 9115 "Expected saturation width smaller than result width"); 9116 9117 // Determine minimum and maximum integer values and their corresponding 9118 // floating-point values. 9119 APInt MinInt, MaxInt; 9120 if (IsSigned) { 9121 MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth); 9122 MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth); 9123 } else { 9124 MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth); 9125 MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth); 9126 } 9127 9128 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 9129 // libcall emission cannot handle this. Large result types will fail. 9130 if (SrcVT == MVT::f16) { 9131 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 9132 SrcVT = Src.getValueType(); 9133 } 9134 9135 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9136 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9137 9138 APFloat::opStatus MinStatus = 9139 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 9140 APFloat::opStatus MaxStatus = 9141 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 9142 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 9143 !(MaxStatus & APFloat::opStatus::opInexact); 9144 9145 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 9146 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 9147 9148 // If the integer bounds are exactly representable as floats and min/max are 9149 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 9150 // of comparisons and selects. 9151 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 9152 isOperationLegal(ISD::FMAXNUM, SrcVT); 9153 if (AreExactFloatBounds && MinMaxLegal) { 9154 SDValue Clamped = Src; 9155 9156 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 9157 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 9158 // Clamp by MaxFloat from above. NaN cannot occur. 9159 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 9160 // Convert clamped value to integer. 9161 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 9162 dl, DstVT, Clamped); 9163 9164 // In the unsigned case we're done, because we mapped NaN to MinFloat, 9165 // which will cast to zero. 9166 if (!IsSigned) 9167 return FpToInt; 9168 9169 // Otherwise, select 0 if Src is NaN. 9170 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9171 return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt, 9172 ISD::CondCode::SETUO); 9173 } 9174 9175 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 9176 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 9177 9178 // Result of direct conversion. The assumption here is that the operation is 9179 // non-trapping and it's fine to apply it to an out-of-range value if we 9180 // select it away later. 9181 SDValue FpToInt = 9182 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 9183 9184 SDValue Select = FpToInt; 9185 9186 // If Src ULT MinFloat, select MinInt. In particular, this also selects 9187 // MinInt if Src is NaN. 9188 Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select, 9189 ISD::CondCode::SETULT); 9190 // If Src OGT MaxFloat, select MaxInt. 9191 Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select, 9192 ISD::CondCode::SETOGT); 9193 9194 // In the unsigned case we are done, because we mapped NaN to MinInt, which 9195 // is already zero. 9196 if (!IsSigned) 9197 return Select; 9198 9199 // Otherwise, select 0 if Src is NaN. 9200 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9201 return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO); 9202 } 9203 9204 SDValue TargetLowering::expandVectorSplice(SDNode *Node, 9205 SelectionDAG &DAG) const { 9206 assert(Node->getOpcode() == ISD::VECTOR_SPLICE && "Unexpected opcode!"); 9207 assert(Node->getValueType(0).isScalableVector() && 9208 "Fixed length vector types expected to use SHUFFLE_VECTOR!"); 9209 9210 EVT VT = Node->getValueType(0); 9211 SDValue V1 = Node->getOperand(0); 9212 SDValue V2 = Node->getOperand(1); 9213 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue(); 9214 SDLoc DL(Node); 9215 9216 // Expand through memory thusly: 9217 // Alloca CONCAT_VECTORS_TYPES(V1, V2) Ptr 9218 // Store V1, Ptr 9219 // Store V2, Ptr + sizeof(V1) 9220 // If (Imm < 0) 9221 // TrailingElts = -Imm 9222 // Ptr = Ptr + sizeof(V1) - (TrailingElts * sizeof(VT.Elt)) 9223 // else 9224 // Ptr = Ptr + (Imm * sizeof(VT.Elt)) 9225 // Res = Load Ptr 9226 9227 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 9228 9229 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 9230 VT.getVectorElementCount() * 2); 9231 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 9232 EVT PtrVT = StackPtr.getValueType(); 9233 auto &MF = DAG.getMachineFunction(); 9234 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 9235 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 9236 9237 // Store the lo part of CONCAT_VECTORS(V1, V2) 9238 SDValue StoreV1 = DAG.getStore(DAG.getEntryNode(), DL, V1, StackPtr, PtrInfo); 9239 // Store the hi part of CONCAT_VECTORS(V1, V2) 9240 SDValue OffsetToV2 = DAG.getVScale( 9241 DL, PtrVT, 9242 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9243 SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); 9244 SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); 9245 9246 if (Imm >= 0) { 9247 // Load back the required element. getVectorElementPointer takes care of 9248 // clamping the index if it's out-of-bounds. 9249 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2)); 9250 // Load the spliced result 9251 return DAG.getLoad(VT, DL, StoreV2, StackPtr, 9252 MachinePointerInfo::getUnknownStack(MF)); 9253 } 9254 9255 uint64_t TrailingElts = -Imm; 9256 9257 // NOTE: TrailingElts must be clamped so as not to read outside of V1:V2. 9258 TypeSize EltByteSize = VT.getVectorElementType().getStoreSize(); 9259 SDValue TrailingBytes = 9260 DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT); 9261 9262 if (TrailingElts > VT.getVectorMinNumElements()) { 9263 SDValue VLBytes = DAG.getVScale( 9264 DL, PtrVT, 9265 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9266 TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); 9267 } 9268 9269 // Calculate the start address of the spliced result. 9270 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes); 9271 9272 // Load the spliced result 9273 return DAG.getLoad(VT, DL, StoreV2, StackPtr2, 9274 MachinePointerInfo::getUnknownStack(MF)); 9275 } 9276 9277 bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, 9278 SDValue &LHS, SDValue &RHS, 9279 SDValue &CC, SDValue Mask, 9280 SDValue EVL, bool &NeedInvert, 9281 const SDLoc &dl, SDValue &Chain, 9282 bool IsSignaling) const { 9283 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9284 MVT OpVT = LHS.getSimpleValueType(); 9285 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 9286 NeedInvert = false; 9287 assert(!EVL == !Mask && "VP Mask and EVL must either both be set or unset"); 9288 bool IsNonVP = !EVL; 9289 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 9290 default: 9291 llvm_unreachable("Unknown condition code action!"); 9292 case TargetLowering::Legal: 9293 // Nothing to do. 9294 break; 9295 case TargetLowering::Expand: { 9296 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 9297 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9298 std::swap(LHS, RHS); 9299 CC = DAG.getCondCode(InvCC); 9300 return true; 9301 } 9302 // Swapping operands didn't work. Try inverting the condition. 9303 bool NeedSwap = false; 9304 InvCC = getSetCCInverse(CCCode, OpVT); 9305 if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9306 // If inverting the condition is not enough, try swapping operands 9307 // on top of it. 9308 InvCC = ISD::getSetCCSwappedOperands(InvCC); 9309 NeedSwap = true; 9310 } 9311 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9312 CC = DAG.getCondCode(InvCC); 9313 NeedInvert = true; 9314 if (NeedSwap) 9315 std::swap(LHS, RHS); 9316 return true; 9317 } 9318 9319 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 9320 unsigned Opc = 0; 9321 switch (CCCode) { 9322 default: 9323 llvm_unreachable("Don't know how to expand this condition!"); 9324 case ISD::SETUO: 9325 if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { 9326 CC1 = ISD::SETUNE; 9327 CC2 = ISD::SETUNE; 9328 Opc = ISD::OR; 9329 break; 9330 } 9331 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9332 "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); 9333 NeedInvert = true; 9334 LLVM_FALLTHROUGH; 9335 case ISD::SETO: 9336 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9337 "If SETO is expanded, SETOEQ must be legal!"); 9338 CC1 = ISD::SETOEQ; 9339 CC2 = ISD::SETOEQ; 9340 Opc = ISD::AND; 9341 break; 9342 case ISD::SETONE: 9343 case ISD::SETUEQ: 9344 // If the SETUO or SETO CC isn't legal, we might be able to use 9345 // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one 9346 // of SETOGT/SETOLT to be legal, the other can be emulated by swapping 9347 // the operands. 9348 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9349 if (!TLI.isCondCodeLegal(CC2, OpVT) && 9350 (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) || 9351 TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) { 9352 CC1 = ISD::SETOGT; 9353 CC2 = ISD::SETOLT; 9354 Opc = ISD::OR; 9355 NeedInvert = ((unsigned)CCCode & 0x8U); 9356 break; 9357 } 9358 LLVM_FALLTHROUGH; 9359 case ISD::SETOEQ: 9360 case ISD::SETOGT: 9361 case ISD::SETOGE: 9362 case ISD::SETOLT: 9363 case ISD::SETOLE: 9364 case ISD::SETUNE: 9365 case ISD::SETUGT: 9366 case ISD::SETUGE: 9367 case ISD::SETULT: 9368 case ISD::SETULE: 9369 // If we are floating point, assign and break, otherwise fall through. 9370 if (!OpVT.isInteger()) { 9371 // We can use the 4th bit to tell if we are the unordered 9372 // or ordered version of the opcode. 9373 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9374 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 9375 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 9376 break; 9377 } 9378 // Fallthrough if we are unsigned integer. 9379 LLVM_FALLTHROUGH; 9380 case ISD::SETLE: 9381 case ISD::SETGT: 9382 case ISD::SETGE: 9383 case ISD::SETLT: 9384 case ISD::SETNE: 9385 case ISD::SETEQ: 9386 // If all combinations of inverting the condition and swapping operands 9387 // didn't work then we have no means to expand the condition. 9388 llvm_unreachable("Don't know how to expand this condition!"); 9389 } 9390 9391 SDValue SetCC1, SetCC2; 9392 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 9393 // If we aren't the ordered or unorder operation, 9394 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 9395 if (IsNonVP) { 9396 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); 9397 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); 9398 } else { 9399 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL); 9400 SetCC2 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL); 9401 } 9402 } else { 9403 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 9404 if (IsNonVP) { 9405 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); 9406 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); 9407 } else { 9408 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL); 9409 SetCC2 = DAG.getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL); 9410 } 9411 } 9412 if (Chain) 9413 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), 9414 SetCC2.getValue(1)); 9415 if (IsNonVP) 9416 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 9417 else { 9418 // Transform the binary opcode to the VP equivalent. 9419 assert((Opc == ISD::OR || Opc == ISD::AND) && "Unexpected opcode"); 9420 Opc = Opc == ISD::OR ? ISD::VP_OR : ISD::VP_AND; 9421 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL); 9422 } 9423 RHS = SDValue(); 9424 CC = SDValue(); 9425 return true; 9426 } 9427 } 9428 return false; 9429 } 9430