1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetLoweringObjectFile.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore NoAlias and NonNull because they don't affect the 61 // call sequence. 62 AttributeList CallerAttrs = F.getAttributes(); 63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) 64 .removeAttribute(Attribute::NoAlias) 65 .removeAttribute(Attribute::NonNull) 66 .hasAttributes()) 67 return false; 68 69 // It's not safe to eliminate the sign / zero extension of the return value. 70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || 71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 72 return false; 73 74 // Check if the only use is a function return node. 75 return isUsedByReturnOnly(Node, Chain); 76 } 77 78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 79 const uint32_t *CallerPreservedMask, 80 const SmallVectorImpl<CCValAssign> &ArgLocs, 81 const SmallVectorImpl<SDValue> &OutVals) const { 82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 83 const CCValAssign &ArgLoc = ArgLocs[I]; 84 if (!ArgLoc.isRegLoc()) 85 continue; 86 Register Reg = ArgLoc.getLocReg(); 87 // Only look at callee saved registers. 88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 89 continue; 90 // Check that we pass the value used for the caller. 91 // (We look for a CopyFromReg reading a virtual register that is used 92 // for the function live-in value of register Reg) 93 SDValue Value = OutVals[I]; 94 if (Value->getOpcode() != ISD::CopyFromReg) 95 return false; 96 unsigned ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 97 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 98 return false; 99 } 100 return true; 101 } 102 103 /// Set CallLoweringInfo attribute flags based on a call instruction 104 /// and called function attributes. 105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 106 unsigned ArgIdx) { 107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 113 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 114 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 115 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 116 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 117 Alignment = Call->getParamAlignment(ArgIdx); 118 ByValType = nullptr; 119 if (Call->paramHasAttr(ArgIdx, Attribute::ByVal)) 120 ByValType = Call->getParamByValType(ArgIdx); 121 } 122 123 /// Generate a libcall taking the given operands as arguments and returning a 124 /// result of type RetVT. 125 std::pair<SDValue, SDValue> 126 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 127 ArrayRef<SDValue> Ops, 128 MakeLibCallOptions CallOptions, 129 const SDLoc &dl, 130 SDValue InChain) const { 131 if (!InChain) 132 InChain = DAG.getEntryNode(); 133 134 TargetLowering::ArgListTy Args; 135 Args.reserve(Ops.size()); 136 137 TargetLowering::ArgListEntry Entry; 138 for (unsigned i = 0; i < Ops.size(); ++i) { 139 SDValue NewOp = Ops[i]; 140 Entry.Node = NewOp; 141 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 142 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 143 CallOptions.IsSExt); 144 Entry.IsZExt = !Entry.IsSExt; 145 146 if (CallOptions.IsSoften && 147 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 148 Entry.IsSExt = Entry.IsZExt = false; 149 } 150 Args.push_back(Entry); 151 } 152 153 if (LC == RTLIB::UNKNOWN_LIBCALL) 154 report_fatal_error("Unsupported library call operation!"); 155 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 156 getPointerTy(DAG.getDataLayout())); 157 158 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 159 TargetLowering::CallLoweringInfo CLI(DAG); 160 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 161 bool zeroExtend = !signExtend; 162 163 if (CallOptions.IsSoften && 164 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 165 signExtend = zeroExtend = false; 166 } 167 168 CLI.setDebugLoc(dl) 169 .setChain(InChain) 170 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 171 .setNoReturn(CallOptions.DoesNotReturn) 172 .setDiscardResult(!CallOptions.IsReturnValueUsed) 173 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 174 .setSExtResult(signExtend) 175 .setZExtResult(zeroExtend); 176 return LowerCallTo(CLI); 177 } 178 179 bool 180 TargetLowering::findOptimalMemOpLowering(std::vector<EVT> &MemOps, 181 unsigned Limit, uint64_t Size, 182 unsigned DstAlign, unsigned SrcAlign, 183 bool IsMemset, 184 bool ZeroMemset, 185 bool MemcpyStrSrc, 186 bool AllowOverlap, 187 unsigned DstAS, unsigned SrcAS, 188 const AttributeList &FuncAttributes) const { 189 // If 'SrcAlign' is zero, that means the memory operation does not need to 190 // load the value, i.e. memset or memcpy from constant string. Otherwise, 191 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 192 // is the specified alignment of the memory operation. If it is zero, that 193 // means it's possible to change the alignment of the destination. 194 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 195 // not need to be loaded. 196 if (!(SrcAlign == 0 || SrcAlign >= DstAlign)) 197 return false; 198 199 EVT VT = getOptimalMemOpType(Size, DstAlign, SrcAlign, 200 IsMemset, ZeroMemset, MemcpyStrSrc, 201 FuncAttributes); 202 203 if (VT == MVT::Other) { 204 // Use the largest integer type whose alignment constraints are satisfied. 205 // We only need to check DstAlign here as SrcAlign is always greater or 206 // equal to DstAlign (or zero). 207 VT = MVT::i64; 208 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 209 !allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 210 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 211 assert(VT.isInteger()); 212 213 // Find the largest legal integer type. 214 MVT LVT = MVT::i64; 215 while (!isTypeLegal(LVT)) 216 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 217 assert(LVT.isInteger()); 218 219 // If the type we've chosen is larger than the largest legal integer type 220 // then use that instead. 221 if (VT.bitsGT(LVT)) 222 VT = LVT; 223 } 224 225 unsigned NumMemOps = 0; 226 while (Size != 0) { 227 unsigned VTSize = VT.getSizeInBits() / 8; 228 while (VTSize > Size) { 229 // For now, only use non-vector load / store's for the left-over pieces. 230 EVT NewVT = VT; 231 unsigned NewVTSize; 232 233 bool Found = false; 234 if (VT.isVector() || VT.isFloatingPoint()) { 235 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 236 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 237 isSafeMemOpType(NewVT.getSimpleVT())) 238 Found = true; 239 else if (NewVT == MVT::i64 && 240 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 241 isSafeMemOpType(MVT::f64)) { 242 // i64 is usually not legal on 32-bit targets, but f64 may be. 243 NewVT = MVT::f64; 244 Found = true; 245 } 246 } 247 248 if (!Found) { 249 do { 250 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 251 if (NewVT == MVT::i8) 252 break; 253 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 254 } 255 NewVTSize = NewVT.getSizeInBits() / 8; 256 257 // If the new VT cannot cover all of the remaining bits, then consider 258 // issuing a (or a pair of) unaligned and overlapping load / store. 259 bool Fast; 260 if (NumMemOps && AllowOverlap && NewVTSize < Size && 261 allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, 262 MachineMemOperand::MONone, &Fast) && 263 Fast) 264 VTSize = Size; 265 else { 266 VT = NewVT; 267 VTSize = NewVTSize; 268 } 269 } 270 271 if (++NumMemOps > Limit) 272 return false; 273 274 MemOps.push_back(VT); 275 Size -= VTSize; 276 } 277 278 return true; 279 } 280 281 /// Soften the operands of a comparison. This code is shared among BR_CC, 282 /// SELECT_CC, and SETCC handlers. 283 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 284 SDValue &NewLHS, SDValue &NewRHS, 285 ISD::CondCode &CCCode, 286 const SDLoc &dl, const SDValue OldLHS, 287 const SDValue OldRHS) const { 288 SDValue Chain; 289 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 290 OldRHS, Chain); 291 } 292 293 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 294 SDValue &NewLHS, SDValue &NewRHS, 295 ISD::CondCode &CCCode, 296 const SDLoc &dl, const SDValue OldLHS, 297 const SDValue OldRHS, 298 SDValue &Chain, 299 bool IsSignaling) const { 300 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 301 // not supporting it. We can update this code when libgcc provides such 302 // functions. 303 304 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 305 && "Unsupported setcc type!"); 306 307 // Expand into one or more soft-fp libcall(s). 308 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 309 bool ShouldInvertCC = false; 310 switch (CCCode) { 311 case ISD::SETEQ: 312 case ISD::SETOEQ: 313 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 314 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 315 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 316 break; 317 case ISD::SETNE: 318 case ISD::SETUNE: 319 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 320 (VT == MVT::f64) ? RTLIB::UNE_F64 : 321 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 322 break; 323 case ISD::SETGE: 324 case ISD::SETOGE: 325 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 326 (VT == MVT::f64) ? RTLIB::OGE_F64 : 327 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 328 break; 329 case ISD::SETLT: 330 case ISD::SETOLT: 331 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 332 (VT == MVT::f64) ? RTLIB::OLT_F64 : 333 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 334 break; 335 case ISD::SETLE: 336 case ISD::SETOLE: 337 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 338 (VT == MVT::f64) ? RTLIB::OLE_F64 : 339 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 340 break; 341 case ISD::SETGT: 342 case ISD::SETOGT: 343 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 344 (VT == MVT::f64) ? RTLIB::OGT_F64 : 345 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 346 break; 347 case ISD::SETO: 348 ShouldInvertCC = true; 349 LLVM_FALLTHROUGH; 350 case ISD::SETUO: 351 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 352 (VT == MVT::f64) ? RTLIB::UO_F64 : 353 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 354 break; 355 case ISD::SETONE: 356 // SETONE = O && UNE 357 ShouldInvertCC = true; 358 LLVM_FALLTHROUGH; 359 case ISD::SETUEQ: 360 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 361 (VT == MVT::f64) ? RTLIB::UO_F64 : 362 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 363 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 364 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 365 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 366 break; 367 default: 368 // Invert CC for unordered comparisons 369 ShouldInvertCC = true; 370 switch (CCCode) { 371 case ISD::SETULT: 372 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 373 (VT == MVT::f64) ? RTLIB::OGE_F64 : 374 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 375 break; 376 case ISD::SETULE: 377 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 378 (VT == MVT::f64) ? RTLIB::OGT_F64 : 379 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 380 break; 381 case ISD::SETUGT: 382 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 383 (VT == MVT::f64) ? RTLIB::OLE_F64 : 384 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 385 break; 386 case ISD::SETUGE: 387 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 388 (VT == MVT::f64) ? RTLIB::OLT_F64 : 389 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 390 break; 391 default: llvm_unreachable("Do not know how to soften this setcc!"); 392 } 393 } 394 395 // Use the target specific return value for comparions lib calls. 396 EVT RetVT = getCmpLibcallReturnType(); 397 SDValue Ops[2] = {NewLHS, NewRHS}; 398 TargetLowering::MakeLibCallOptions CallOptions; 399 EVT OpsVT[2] = { OldLHS.getValueType(), 400 OldRHS.getValueType() }; 401 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 402 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 403 NewLHS = Call.first; 404 NewRHS = DAG.getConstant(0, dl, RetVT); 405 406 CCCode = getCmpLibcallCC(LC1); 407 if (ShouldInvertCC) { 408 assert(RetVT.isInteger()); 409 CCCode = getSetCCInverse(CCCode, RetVT); 410 } 411 412 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 413 // Update Chain. 414 Chain = Call.second; 415 } else { 416 EVT SetCCVT = 417 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 418 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 419 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 420 CCCode = getCmpLibcallCC(LC2); 421 if (ShouldInvertCC) 422 CCCode = getSetCCInverse(CCCode, RetVT); 423 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 424 if (Chain) 425 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 426 Call2.second); 427 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 428 Tmp.getValueType(), Tmp, NewLHS); 429 NewRHS = SDValue(); 430 } 431 } 432 433 /// Return the entry encoding for a jump table in the current function. The 434 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 435 unsigned TargetLowering::getJumpTableEncoding() const { 436 // In non-pic modes, just use the address of a block. 437 if (!isPositionIndependent()) 438 return MachineJumpTableInfo::EK_BlockAddress; 439 440 // In PIC mode, if the target supports a GPRel32 directive, use it. 441 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 442 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 443 444 // Otherwise, use a label difference. 445 return MachineJumpTableInfo::EK_LabelDifference32; 446 } 447 448 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 449 SelectionDAG &DAG) const { 450 // If our PIC model is GP relative, use the global offset table as the base. 451 unsigned JTEncoding = getJumpTableEncoding(); 452 453 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 454 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 455 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 456 457 return Table; 458 } 459 460 /// This returns the relocation base for the given PIC jumptable, the same as 461 /// getPICJumpTableRelocBase, but as an MCExpr. 462 const MCExpr * 463 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 464 unsigned JTI,MCContext &Ctx) const{ 465 // The normal PIC reloc base is the label at the start of the jump table. 466 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 467 } 468 469 bool 470 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 471 const TargetMachine &TM = getTargetMachine(); 472 const GlobalValue *GV = GA->getGlobal(); 473 474 // If the address is not even local to this DSO we will have to load it from 475 // a got and then add the offset. 476 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 477 return false; 478 479 // If the code is position independent we will have to add a base register. 480 if (isPositionIndependent()) 481 return false; 482 483 // Otherwise we can do it. 484 return true; 485 } 486 487 //===----------------------------------------------------------------------===// 488 // Optimization Methods 489 //===----------------------------------------------------------------------===// 490 491 /// If the specified instruction has a constant integer operand and there are 492 /// bits set in that constant that are not demanded, then clear those bits and 493 /// return true. 494 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded, 495 TargetLoweringOpt &TLO) const { 496 SDLoc DL(Op); 497 unsigned Opcode = Op.getOpcode(); 498 499 // Do target-specific constant optimization. 500 if (targetShrinkDemandedConstant(Op, Demanded, TLO)) 501 return TLO.New.getNode(); 502 503 // FIXME: ISD::SELECT, ISD::SELECT_CC 504 switch (Opcode) { 505 default: 506 break; 507 case ISD::XOR: 508 case ISD::AND: 509 case ISD::OR: { 510 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 511 if (!Op1C) 512 return false; 513 514 // If this is a 'not' op, don't touch it because that's a canonical form. 515 const APInt &C = Op1C->getAPIntValue(); 516 if (Opcode == ISD::XOR && Demanded.isSubsetOf(C)) 517 return false; 518 519 if (!C.isSubsetOf(Demanded)) { 520 EVT VT = Op.getValueType(); 521 SDValue NewC = TLO.DAG.getConstant(Demanded & C, DL, VT); 522 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 523 return TLO.CombineTo(Op, NewOp); 524 } 525 526 break; 527 } 528 } 529 530 return false; 531 } 532 533 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 534 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 535 /// generalized for targets with other types of implicit widening casts. 536 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 537 const APInt &Demanded, 538 TargetLoweringOpt &TLO) const { 539 assert(Op.getNumOperands() == 2 && 540 "ShrinkDemandedOp only supports binary operators!"); 541 assert(Op.getNode()->getNumValues() == 1 && 542 "ShrinkDemandedOp only supports nodes with one result!"); 543 544 SelectionDAG &DAG = TLO.DAG; 545 SDLoc dl(Op); 546 547 // Early return, as this function cannot handle vector types. 548 if (Op.getValueType().isVector()) 549 return false; 550 551 // Don't do this if the node has another user, which may require the 552 // full value. 553 if (!Op.getNode()->hasOneUse()) 554 return false; 555 556 // Search for the smallest integer type with free casts to and from 557 // Op's type. For expedience, just check power-of-2 integer types. 558 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 559 unsigned DemandedSize = Demanded.getActiveBits(); 560 unsigned SmallVTBits = DemandedSize; 561 if (!isPowerOf2_32(SmallVTBits)) 562 SmallVTBits = NextPowerOf2(SmallVTBits); 563 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 564 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 565 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 566 TLI.isZExtFree(SmallVT, Op.getValueType())) { 567 // We found a type with free casts. 568 SDValue X = DAG.getNode( 569 Op.getOpcode(), dl, SmallVT, 570 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 571 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 572 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 573 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 574 return TLO.CombineTo(Op, Z); 575 } 576 } 577 return false; 578 } 579 580 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 581 DAGCombinerInfo &DCI) const { 582 SelectionDAG &DAG = DCI.DAG; 583 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 584 !DCI.isBeforeLegalizeOps()); 585 KnownBits Known; 586 587 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 588 if (Simplified) { 589 DCI.AddToWorklist(Op.getNode()); 590 DCI.CommitTargetLoweringOpt(TLO); 591 } 592 return Simplified; 593 } 594 595 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 596 KnownBits &Known, 597 TargetLoweringOpt &TLO, 598 unsigned Depth, 599 bool AssumeSingleUse) const { 600 EVT VT = Op.getValueType(); 601 APInt DemandedElts = VT.isVector() 602 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 603 : APInt(1, 1); 604 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 605 AssumeSingleUse); 606 } 607 608 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 609 // TODO: Under what circumstances can we create nodes? Constant folding? 610 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 611 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 612 SelectionDAG &DAG, unsigned Depth) const { 613 // Limit search depth. 614 if (Depth >= SelectionDAG::MaxRecursionDepth) 615 return SDValue(); 616 617 // Ignore UNDEFs. 618 if (Op.isUndef()) 619 return SDValue(); 620 621 // Not demanding any bits/elts from Op. 622 if (DemandedBits == 0 || DemandedElts == 0) 623 return DAG.getUNDEF(Op.getValueType()); 624 625 unsigned NumElts = DemandedElts.getBitWidth(); 626 KnownBits LHSKnown, RHSKnown; 627 switch (Op.getOpcode()) { 628 case ISD::BITCAST: { 629 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 630 EVT SrcVT = Src.getValueType(); 631 EVT DstVT = Op.getValueType(); 632 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 633 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 634 635 if (NumSrcEltBits == NumDstEltBits) 636 if (SDValue V = SimplifyMultipleUseDemandedBits( 637 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 638 return DAG.getBitcast(DstVT, V); 639 640 // TODO - bigendian once we have test coverage. 641 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 && 642 DAG.getDataLayout().isLittleEndian()) { 643 unsigned Scale = NumDstEltBits / NumSrcEltBits; 644 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 645 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 646 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 647 for (unsigned i = 0; i != Scale; ++i) { 648 unsigned Offset = i * NumSrcEltBits; 649 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 650 if (!Sub.isNullValue()) { 651 DemandedSrcBits |= Sub; 652 for (unsigned j = 0; j != NumElts; ++j) 653 if (DemandedElts[j]) 654 DemandedSrcElts.setBit((j * Scale) + i); 655 } 656 } 657 658 if (SDValue V = SimplifyMultipleUseDemandedBits( 659 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 660 return DAG.getBitcast(DstVT, V); 661 } 662 663 // TODO - bigendian once we have test coverage. 664 if ((NumSrcEltBits % NumDstEltBits) == 0 && 665 DAG.getDataLayout().isLittleEndian()) { 666 unsigned Scale = NumSrcEltBits / NumDstEltBits; 667 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 668 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 669 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 670 for (unsigned i = 0; i != NumElts; ++i) 671 if (DemandedElts[i]) { 672 unsigned Offset = (i % Scale) * NumDstEltBits; 673 DemandedSrcBits.insertBits(DemandedBits, Offset); 674 DemandedSrcElts.setBit(i / Scale); 675 } 676 677 if (SDValue V = SimplifyMultipleUseDemandedBits( 678 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 679 return DAG.getBitcast(DstVT, V); 680 } 681 682 break; 683 } 684 case ISD::AND: { 685 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 686 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 687 688 // If all of the demanded bits are known 1 on one side, return the other. 689 // These bits cannot contribute to the result of the 'and' in this 690 // context. 691 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 692 return Op.getOperand(0); 693 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 694 return Op.getOperand(1); 695 break; 696 } 697 case ISD::OR: { 698 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 699 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 700 701 // If all of the demanded bits are known zero on one side, return the 702 // other. These bits cannot contribute to the result of the 'or' in this 703 // context. 704 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 705 return Op.getOperand(0); 706 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 707 return Op.getOperand(1); 708 break; 709 } 710 case ISD::XOR: { 711 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 712 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 713 714 // If all of the demanded bits are known zero on one side, return the 715 // other. 716 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 717 return Op.getOperand(0); 718 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 719 return Op.getOperand(1); 720 break; 721 } 722 case ISD::SETCC: { 723 SDValue Op0 = Op.getOperand(0); 724 SDValue Op1 = Op.getOperand(1); 725 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 726 // If (1) we only need the sign-bit, (2) the setcc operands are the same 727 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 728 // -1, we may be able to bypass the setcc. 729 if (DemandedBits.isSignMask() && 730 Op0.getScalarValueSizeInBits() == DemandedBits.getBitWidth() && 731 getBooleanContents(Op0.getValueType()) == 732 BooleanContent::ZeroOrNegativeOneBooleanContent) { 733 // If we're testing X < 0, then this compare isn't needed - just use X! 734 // FIXME: We're limiting to integer types here, but this should also work 735 // if we don't care about FP signed-zero. The use of SETLT with FP means 736 // that we don't care about NaNs. 737 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 738 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 739 return Op0; 740 } 741 break; 742 } 743 case ISD::SIGN_EXTEND_INREG: { 744 // If none of the extended bits are demanded, eliminate the sextinreg. 745 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 746 if (DemandedBits.getActiveBits() <= ExVT.getScalarSizeInBits()) 747 return Op.getOperand(0); 748 break; 749 } 750 case ISD::INSERT_VECTOR_ELT: { 751 // If we don't demand the inserted element, return the base vector. 752 SDValue Vec = Op.getOperand(0); 753 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 754 EVT VecVT = Vec.getValueType(); 755 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 756 !DemandedElts[CIdx->getZExtValue()]) 757 return Vec; 758 break; 759 } 760 case ISD::VECTOR_SHUFFLE: { 761 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 762 763 // If all the demanded elts are from one operand and are inline, 764 // then we can use the operand directly. 765 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 766 for (unsigned i = 0; i != NumElts; ++i) { 767 int M = ShuffleMask[i]; 768 if (M < 0 || !DemandedElts[i]) 769 continue; 770 AllUndef = false; 771 IdentityLHS &= (M == (int)i); 772 IdentityRHS &= ((M - NumElts) == i); 773 } 774 775 if (AllUndef) 776 return DAG.getUNDEF(Op.getValueType()); 777 if (IdentityLHS) 778 return Op.getOperand(0); 779 if (IdentityRHS) 780 return Op.getOperand(1); 781 break; 782 } 783 default: 784 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 785 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 786 Op, DemandedBits, DemandedElts, DAG, Depth)) 787 return V; 788 break; 789 } 790 return SDValue(); 791 } 792 793 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 794 /// result of Op are ever used downstream. If we can use this information to 795 /// simplify Op, create a new simplified DAG node and return true, returning the 796 /// original and new nodes in Old and New. Otherwise, analyze the expression and 797 /// return a mask of Known bits for the expression (used to simplify the 798 /// caller). The Known bits may only be accurate for those bits in the 799 /// OriginalDemandedBits and OriginalDemandedElts. 800 bool TargetLowering::SimplifyDemandedBits( 801 SDValue Op, const APInt &OriginalDemandedBits, 802 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 803 unsigned Depth, bool AssumeSingleUse) const { 804 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 805 assert(Op.getScalarValueSizeInBits() == BitWidth && 806 "Mask size mismatches value type size!"); 807 808 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 809 assert((!Op.getValueType().isVector() || 810 NumElts == Op.getValueType().getVectorNumElements()) && 811 "Unexpected vector size"); 812 813 APInt DemandedBits = OriginalDemandedBits; 814 APInt DemandedElts = OriginalDemandedElts; 815 SDLoc dl(Op); 816 auto &DL = TLO.DAG.getDataLayout(); 817 818 // Don't know anything. 819 Known = KnownBits(BitWidth); 820 821 // Undef operand. 822 if (Op.isUndef()) 823 return false; 824 825 if (Op.getOpcode() == ISD::Constant) { 826 // We know all of the bits for a constant! 827 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 828 Known.Zero = ~Known.One; 829 return false; 830 } 831 832 // Other users may use these bits. 833 EVT VT = Op.getValueType(); 834 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 835 if (Depth != 0) { 836 // If not at the root, Just compute the Known bits to 837 // simplify things downstream. 838 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 839 return false; 840 } 841 // If this is the root being simplified, allow it to have multiple uses, 842 // just set the DemandedBits/Elts to all bits. 843 DemandedBits = APInt::getAllOnesValue(BitWidth); 844 DemandedElts = APInt::getAllOnesValue(NumElts); 845 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 846 // Not demanding any bits/elts from Op. 847 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 848 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 849 // Limit search depth. 850 return false; 851 } 852 853 KnownBits Known2, KnownOut; 854 switch (Op.getOpcode()) { 855 case ISD::TargetConstant: 856 llvm_unreachable("Can't simplify this node"); 857 case ISD::SCALAR_TO_VECTOR: { 858 if (!DemandedElts[0]) 859 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 860 861 KnownBits SrcKnown; 862 SDValue Src = Op.getOperand(0); 863 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 864 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 865 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 866 return true; 867 Known = SrcKnown.zextOrTrunc(BitWidth, false); 868 break; 869 } 870 case ISD::BUILD_VECTOR: 871 // Collect the known bits that are shared by every demanded element. 872 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 873 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 874 return false; // Don't fall through, will infinitely loop. 875 case ISD::LOAD: { 876 LoadSDNode *LD = cast<LoadSDNode>(Op); 877 if (getTargetConstantFromLoad(LD)) { 878 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 879 return false; // Don't fall through, will infinitely loop. 880 } 881 break; 882 } 883 case ISD::INSERT_VECTOR_ELT: { 884 SDValue Vec = Op.getOperand(0); 885 SDValue Scl = Op.getOperand(1); 886 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 887 EVT VecVT = Vec.getValueType(); 888 889 // If index isn't constant, assume we need all vector elements AND the 890 // inserted element. 891 APInt DemandedVecElts(DemandedElts); 892 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 893 unsigned Idx = CIdx->getZExtValue(); 894 DemandedVecElts.clearBit(Idx); 895 896 // Inserted element is not required. 897 if (!DemandedElts[Idx]) 898 return TLO.CombineTo(Op, Vec); 899 } 900 901 KnownBits KnownScl; 902 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 903 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 904 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 905 return true; 906 907 Known = KnownScl.zextOrTrunc(BitWidth, false); 908 909 KnownBits KnownVec; 910 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 911 Depth + 1)) 912 return true; 913 914 if (!!DemandedVecElts) { 915 Known.One &= KnownVec.One; 916 Known.Zero &= KnownVec.Zero; 917 } 918 919 return false; 920 } 921 case ISD::INSERT_SUBVECTOR: { 922 SDValue Base = Op.getOperand(0); 923 SDValue Sub = Op.getOperand(1); 924 EVT SubVT = Sub.getValueType(); 925 unsigned NumSubElts = SubVT.getVectorNumElements(); 926 927 // If index isn't constant, assume we need the original demanded base 928 // elements and ALL the inserted subvector elements. 929 APInt BaseElts = DemandedElts; 930 APInt SubElts = APInt::getAllOnesValue(NumSubElts); 931 if (isa<ConstantSDNode>(Op.getOperand(2))) { 932 const APInt &Idx = Op.getConstantOperandAPInt(2); 933 if (Idx.ule(NumElts - NumSubElts)) { 934 unsigned SubIdx = Idx.getZExtValue(); 935 SubElts = DemandedElts.extractBits(NumSubElts, SubIdx); 936 BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx); 937 } 938 } 939 940 KnownBits KnownSub, KnownBase; 941 if (SimplifyDemandedBits(Sub, DemandedBits, SubElts, KnownSub, TLO, 942 Depth + 1)) 943 return true; 944 if (SimplifyDemandedBits(Base, DemandedBits, BaseElts, KnownBase, TLO, 945 Depth + 1)) 946 return true; 947 948 Known.Zero.setAllBits(); 949 Known.One.setAllBits(); 950 if (!!SubElts) { 951 Known.One &= KnownSub.One; 952 Known.Zero &= KnownSub.Zero; 953 } 954 if (!!BaseElts) { 955 Known.One &= KnownBase.One; 956 Known.Zero &= KnownBase.Zero; 957 } 958 break; 959 } 960 case ISD::EXTRACT_SUBVECTOR: { 961 // If index isn't constant, assume we need all the source vector elements. 962 SDValue Src = Op.getOperand(0); 963 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 964 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 965 APInt SrcElts = APInt::getAllOnesValue(NumSrcElts); 966 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 967 // Offset the demanded elts by the subvector index. 968 uint64_t Idx = SubIdx->getZExtValue(); 969 SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 970 } 971 if (SimplifyDemandedBits(Src, DemandedBits, SrcElts, Known, TLO, Depth + 1)) 972 return true; 973 break; 974 } 975 case ISD::CONCAT_VECTORS: { 976 Known.Zero.setAllBits(); 977 Known.One.setAllBits(); 978 EVT SubVT = Op.getOperand(0).getValueType(); 979 unsigned NumSubVecs = Op.getNumOperands(); 980 unsigned NumSubElts = SubVT.getVectorNumElements(); 981 for (unsigned i = 0; i != NumSubVecs; ++i) { 982 APInt DemandedSubElts = 983 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 984 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 985 Known2, TLO, Depth + 1)) 986 return true; 987 // Known bits are shared by every demanded subvector element. 988 if (!!DemandedSubElts) { 989 Known.One &= Known2.One; 990 Known.Zero &= Known2.Zero; 991 } 992 } 993 break; 994 } 995 case ISD::VECTOR_SHUFFLE: { 996 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 997 998 // Collect demanded elements from shuffle operands.. 999 APInt DemandedLHS(NumElts, 0); 1000 APInt DemandedRHS(NumElts, 0); 1001 for (unsigned i = 0; i != NumElts; ++i) { 1002 if (!DemandedElts[i]) 1003 continue; 1004 int M = ShuffleMask[i]; 1005 if (M < 0) { 1006 // For UNDEF elements, we don't know anything about the common state of 1007 // the shuffle result. 1008 DemandedLHS.clearAllBits(); 1009 DemandedRHS.clearAllBits(); 1010 break; 1011 } 1012 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1013 if (M < (int)NumElts) 1014 DemandedLHS.setBit(M); 1015 else 1016 DemandedRHS.setBit(M - NumElts); 1017 } 1018 1019 if (!!DemandedLHS || !!DemandedRHS) { 1020 SDValue Op0 = Op.getOperand(0); 1021 SDValue Op1 = Op.getOperand(1); 1022 1023 Known.Zero.setAllBits(); 1024 Known.One.setAllBits(); 1025 if (!!DemandedLHS) { 1026 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1027 Depth + 1)) 1028 return true; 1029 Known.One &= Known2.One; 1030 Known.Zero &= Known2.Zero; 1031 } 1032 if (!!DemandedRHS) { 1033 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1034 Depth + 1)) 1035 return true; 1036 Known.One &= Known2.One; 1037 Known.Zero &= Known2.Zero; 1038 } 1039 1040 // Attempt to avoid multi-use ops if we don't need anything from them. 1041 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1042 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1043 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1044 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1045 if (DemandedOp0 || DemandedOp1) { 1046 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1047 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1048 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1049 return TLO.CombineTo(Op, NewOp); 1050 } 1051 } 1052 break; 1053 } 1054 case ISD::AND: { 1055 SDValue Op0 = Op.getOperand(0); 1056 SDValue Op1 = Op.getOperand(1); 1057 1058 // If the RHS is a constant, check to see if the LHS would be zero without 1059 // using the bits from the RHS. Below, we use knowledge about the RHS to 1060 // simplify the LHS, here we're using information from the LHS to simplify 1061 // the RHS. 1062 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1063 // Do not increment Depth here; that can cause an infinite loop. 1064 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1065 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1066 if ((LHSKnown.Zero & DemandedBits) == 1067 (~RHSC->getAPIntValue() & DemandedBits)) 1068 return TLO.CombineTo(Op, Op0); 1069 1070 // If any of the set bits in the RHS are known zero on the LHS, shrink 1071 // the constant. 1072 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, TLO)) 1073 return true; 1074 1075 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1076 // constant, but if this 'and' is only clearing bits that were just set by 1077 // the xor, then this 'and' can be eliminated by shrinking the mask of 1078 // the xor. For example, for a 32-bit X: 1079 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1080 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1081 LHSKnown.One == ~RHSC->getAPIntValue()) { 1082 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1083 return TLO.CombineTo(Op, Xor); 1084 } 1085 } 1086 1087 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1088 Depth + 1)) 1089 return true; 1090 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1091 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1092 Known2, TLO, Depth + 1)) 1093 return true; 1094 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1095 1096 // Attempt to avoid multi-use ops if we don't need anything from them. 1097 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1098 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1099 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1100 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1101 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1102 if (DemandedOp0 || DemandedOp1) { 1103 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1104 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1105 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1106 return TLO.CombineTo(Op, NewOp); 1107 } 1108 } 1109 1110 // If all of the demanded bits are known one on one side, return the other. 1111 // These bits cannot contribute to the result of the 'and'. 1112 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1113 return TLO.CombineTo(Op, Op0); 1114 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1115 return TLO.CombineTo(Op, Op1); 1116 // If all of the demanded bits in the inputs are known zeros, return zero. 1117 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1118 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1119 // If the RHS is a constant, see if we can simplify it. 1120 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, TLO)) 1121 return true; 1122 // If the operation can be done in a smaller type, do so. 1123 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1124 return true; 1125 1126 // Output known-1 bits are only known if set in both the LHS & RHS. 1127 Known.One &= Known2.One; 1128 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1129 Known.Zero |= Known2.Zero; 1130 break; 1131 } 1132 case ISD::OR: { 1133 SDValue Op0 = Op.getOperand(0); 1134 SDValue Op1 = Op.getOperand(1); 1135 1136 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1137 Depth + 1)) 1138 return true; 1139 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1140 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1141 Known2, TLO, Depth + 1)) 1142 return true; 1143 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1144 1145 // Attempt to avoid multi-use ops if we don't need anything from them. 1146 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1147 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1148 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1149 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1150 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1151 if (DemandedOp0 || DemandedOp1) { 1152 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1153 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1154 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1155 return TLO.CombineTo(Op, NewOp); 1156 } 1157 } 1158 1159 // If all of the demanded bits are known zero on one side, return the other. 1160 // These bits cannot contribute to the result of the 'or'. 1161 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1162 return TLO.CombineTo(Op, Op0); 1163 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1164 return TLO.CombineTo(Op, Op1); 1165 // If the RHS is a constant, see if we can simplify it. 1166 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1167 return true; 1168 // If the operation can be done in a smaller type, do so. 1169 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1170 return true; 1171 1172 // Output known-0 bits are only known if clear in both the LHS & RHS. 1173 Known.Zero &= Known2.Zero; 1174 // Output known-1 are known to be set if set in either the LHS | RHS. 1175 Known.One |= Known2.One; 1176 break; 1177 } 1178 case ISD::XOR: { 1179 SDValue Op0 = Op.getOperand(0); 1180 SDValue Op1 = Op.getOperand(1); 1181 1182 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1183 Depth + 1)) 1184 return true; 1185 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1186 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1187 Depth + 1)) 1188 return true; 1189 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1190 1191 // Attempt to avoid multi-use ops if we don't need anything from them. 1192 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1193 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1194 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1195 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1196 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1197 if (DemandedOp0 || DemandedOp1) { 1198 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1199 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1200 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1201 return TLO.CombineTo(Op, NewOp); 1202 } 1203 } 1204 1205 // If all of the demanded bits are known zero on one side, return the other. 1206 // These bits cannot contribute to the result of the 'xor'. 1207 if (DemandedBits.isSubsetOf(Known.Zero)) 1208 return TLO.CombineTo(Op, Op0); 1209 if (DemandedBits.isSubsetOf(Known2.Zero)) 1210 return TLO.CombineTo(Op, Op1); 1211 // If the operation can be done in a smaller type, do so. 1212 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1213 return true; 1214 1215 // If all of the unknown bits are known to be zero on one side or the other 1216 // (but not both) turn this into an *inclusive* or. 1217 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1218 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1219 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1220 1221 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1222 KnownOut.Zero = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1223 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1224 KnownOut.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1225 1226 if (ConstantSDNode *C = isConstOrConstSplat(Op1)) { 1227 // If one side is a constant, and all of the known set bits on the other 1228 // side are also set in the constant, turn this into an AND, as we know 1229 // the bits will be cleared. 1230 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1231 // NB: it is okay if more bits are known than are requested 1232 if (C->getAPIntValue() == Known2.One) { 1233 SDValue ANDC = 1234 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1235 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1236 } 1237 1238 // If the RHS is a constant, see if we can change it. Don't alter a -1 1239 // constant because that's a 'not' op, and that is better for combining 1240 // and codegen. 1241 if (!C->isAllOnesValue()) { 1242 if (DemandedBits.isSubsetOf(C->getAPIntValue())) { 1243 // We're flipping all demanded bits. Flip the undemanded bits too. 1244 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1245 return TLO.CombineTo(Op, New); 1246 } 1247 // If we can't turn this into a 'not', try to shrink the constant. 1248 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1249 return true; 1250 } 1251 } 1252 1253 Known = std::move(KnownOut); 1254 break; 1255 } 1256 case ISD::SELECT: 1257 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1258 Depth + 1)) 1259 return true; 1260 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1261 Depth + 1)) 1262 return true; 1263 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1264 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1265 1266 // If the operands are constants, see if we can simplify them. 1267 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1268 return true; 1269 1270 // Only known if known in both the LHS and RHS. 1271 Known.One &= Known2.One; 1272 Known.Zero &= Known2.Zero; 1273 break; 1274 case ISD::SELECT_CC: 1275 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1276 Depth + 1)) 1277 return true; 1278 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1279 Depth + 1)) 1280 return true; 1281 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1282 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1283 1284 // If the operands are constants, see if we can simplify them. 1285 if (ShrinkDemandedConstant(Op, DemandedBits, TLO)) 1286 return true; 1287 1288 // Only known if known in both the LHS and RHS. 1289 Known.One &= Known2.One; 1290 Known.Zero &= Known2.Zero; 1291 break; 1292 case ISD::SETCC: { 1293 SDValue Op0 = Op.getOperand(0); 1294 SDValue Op1 = Op.getOperand(1); 1295 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1296 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1297 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1298 // -1, we may be able to bypass the setcc. 1299 if (DemandedBits.isSignMask() && 1300 Op0.getScalarValueSizeInBits() == BitWidth && 1301 getBooleanContents(Op0.getValueType()) == 1302 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1303 // If we're testing X < 0, then this compare isn't needed - just use X! 1304 // FIXME: We're limiting to integer types here, but this should also work 1305 // if we don't care about FP signed-zero. The use of SETLT with FP means 1306 // that we don't care about NaNs. 1307 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1308 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1309 return TLO.CombineTo(Op, Op0); 1310 1311 // TODO: Should we check for other forms of sign-bit comparisons? 1312 // Examples: X <= -1, X >= 0 1313 } 1314 if (getBooleanContents(Op0.getValueType()) == 1315 TargetLowering::ZeroOrOneBooleanContent && 1316 BitWidth > 1) 1317 Known.Zero.setBitsFrom(1); 1318 break; 1319 } 1320 case ISD::SHL: { 1321 SDValue Op0 = Op.getOperand(0); 1322 SDValue Op1 = Op.getOperand(1); 1323 1324 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1325 // If the shift count is an invalid immediate, don't do anything. 1326 if (SA->getAPIntValue().uge(BitWidth)) 1327 break; 1328 1329 unsigned ShAmt = SA->getZExtValue(); 1330 if (ShAmt == 0) 1331 return TLO.CombineTo(Op, Op0); 1332 1333 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1334 // single shift. We can do this if the bottom bits (which are shifted 1335 // out) are never demanded. 1336 // TODO - support non-uniform vector amounts. 1337 if (Op0.getOpcode() == ISD::SRL) { 1338 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1339 if (ConstantSDNode *SA2 = 1340 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) { 1341 if (SA2->getAPIntValue().ult(BitWidth)) { 1342 unsigned C1 = SA2->getZExtValue(); 1343 unsigned Opc = ISD::SHL; 1344 int Diff = ShAmt - C1; 1345 if (Diff < 0) { 1346 Diff = -Diff; 1347 Opc = ISD::SRL; 1348 } 1349 1350 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, Op1.getValueType()); 1351 return TLO.CombineTo( 1352 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1353 } 1354 } 1355 } 1356 } 1357 1358 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1359 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1360 Depth + 1)) 1361 return true; 1362 1363 // Try shrinking the operation as long as the shift amount will still be 1364 // in range. 1365 if ((ShAmt < DemandedBits.getActiveBits()) && 1366 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1367 return true; 1368 1369 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1370 // are not demanded. This will likely allow the anyext to be folded away. 1371 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1372 SDValue InnerOp = Op0.getOperand(0); 1373 EVT InnerVT = InnerOp.getValueType(); 1374 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1375 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1376 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1377 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1378 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1379 ShTy = InnerVT; 1380 SDValue NarrowShl = 1381 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1382 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1383 return TLO.CombineTo( 1384 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1385 } 1386 // Repeat the SHL optimization above in cases where an extension 1387 // intervenes: (shl (anyext (shr x, c1)), c2) to 1388 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1389 // aren't demanded (as above) and that the shifted upper c1 bits of 1390 // x aren't demanded. 1391 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1392 InnerOp.hasOneUse()) { 1393 if (ConstantSDNode *SA2 = 1394 isConstOrConstSplat(InnerOp.getOperand(1))) { 1395 unsigned InnerShAmt = SA2->getLimitedValue(InnerBits); 1396 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1397 DemandedBits.getActiveBits() <= 1398 (InnerBits - InnerShAmt + ShAmt) && 1399 DemandedBits.countTrailingZeros() >= ShAmt) { 1400 SDValue NewSA = TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, 1401 Op1.getValueType()); 1402 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1403 InnerOp.getOperand(0)); 1404 return TLO.CombineTo( 1405 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1406 } 1407 } 1408 } 1409 } 1410 1411 Known.Zero <<= ShAmt; 1412 Known.One <<= ShAmt; 1413 // low bits known zero. 1414 Known.Zero.setLowBits(ShAmt); 1415 } 1416 break; 1417 } 1418 case ISD::SRL: { 1419 SDValue Op0 = Op.getOperand(0); 1420 SDValue Op1 = Op.getOperand(1); 1421 1422 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1423 // If the shift count is an invalid immediate, don't do anything. 1424 if (SA->getAPIntValue().uge(BitWidth)) 1425 break; 1426 1427 unsigned ShAmt = SA->getZExtValue(); 1428 if (ShAmt == 0) 1429 return TLO.CombineTo(Op, Op0); 1430 1431 EVT ShiftVT = Op1.getValueType(); 1432 APInt InDemandedMask = (DemandedBits << ShAmt); 1433 1434 // If the shift is exact, then it does demand the low bits (and knows that 1435 // they are zero). 1436 if (Op->getFlags().hasExact()) 1437 InDemandedMask.setLowBits(ShAmt); 1438 1439 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1440 // single shift. We can do this if the top bits (which are shifted out) 1441 // are never demanded. 1442 // TODO - support non-uniform vector amounts. 1443 if (Op0.getOpcode() == ISD::SHL) { 1444 if (ConstantSDNode *SA2 = 1445 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) { 1446 if (!DemandedBits.intersects( 1447 APInt::getHighBitsSet(BitWidth, ShAmt))) { 1448 if (SA2->getAPIntValue().ult(BitWidth)) { 1449 unsigned C1 = SA2->getZExtValue(); 1450 unsigned Opc = ISD::SRL; 1451 int Diff = ShAmt - C1; 1452 if (Diff < 0) { 1453 Diff = -Diff; 1454 Opc = ISD::SHL; 1455 } 1456 1457 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1458 return TLO.CombineTo( 1459 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1460 } 1461 } 1462 } 1463 } 1464 1465 // Compute the new bits that are at the top now. 1466 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1467 Depth + 1)) 1468 return true; 1469 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1470 Known.Zero.lshrInPlace(ShAmt); 1471 Known.One.lshrInPlace(ShAmt); 1472 1473 Known.Zero.setHighBits(ShAmt); // High bits known zero. 1474 } 1475 break; 1476 } 1477 case ISD::SRA: { 1478 SDValue Op0 = Op.getOperand(0); 1479 SDValue Op1 = Op.getOperand(1); 1480 1481 // If this is an arithmetic shift right and only the low-bit is set, we can 1482 // always convert this into a logical shr, even if the shift amount is 1483 // variable. The low bit of the shift cannot be an input sign bit unless 1484 // the shift amount is >= the size of the datatype, which is undefined. 1485 if (DemandedBits.isOneValue()) 1486 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1487 1488 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1489 // If the shift count is an invalid immediate, don't do anything. 1490 if (SA->getAPIntValue().uge(BitWidth)) 1491 break; 1492 1493 unsigned ShAmt = SA->getZExtValue(); 1494 if (ShAmt == 0) 1495 return TLO.CombineTo(Op, Op0); 1496 1497 APInt InDemandedMask = (DemandedBits << ShAmt); 1498 1499 // If the shift is exact, then it does demand the low bits (and knows that 1500 // they are zero). 1501 if (Op->getFlags().hasExact()) 1502 InDemandedMask.setLowBits(ShAmt); 1503 1504 // If any of the demanded bits are produced by the sign extension, we also 1505 // demand the input sign bit. 1506 if (DemandedBits.countLeadingZeros() < ShAmt) 1507 InDemandedMask.setSignBit(); 1508 1509 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1510 Depth + 1)) 1511 return true; 1512 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1513 Known.Zero.lshrInPlace(ShAmt); 1514 Known.One.lshrInPlace(ShAmt); 1515 1516 // If the input sign bit is known to be zero, or if none of the top bits 1517 // are demanded, turn this into an unsigned shift right. 1518 if (Known.Zero[BitWidth - ShAmt - 1] || 1519 DemandedBits.countLeadingZeros() >= ShAmt) { 1520 SDNodeFlags Flags; 1521 Flags.setExact(Op->getFlags().hasExact()); 1522 return TLO.CombineTo( 1523 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1524 } 1525 1526 int Log2 = DemandedBits.exactLogBase2(); 1527 if (Log2 >= 0) { 1528 // The bit must come from the sign. 1529 SDValue NewSA = 1530 TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, Op1.getValueType()); 1531 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1532 } 1533 1534 if (Known.One[BitWidth - ShAmt - 1]) 1535 // New bits are known one. 1536 Known.One.setHighBits(ShAmt); 1537 } 1538 break; 1539 } 1540 case ISD::FSHL: 1541 case ISD::FSHR: { 1542 SDValue Op0 = Op.getOperand(0); 1543 SDValue Op1 = Op.getOperand(1); 1544 SDValue Op2 = Op.getOperand(2); 1545 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1546 1547 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1548 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1549 1550 // For fshl, 0-shift returns the 1st arg. 1551 // For fshr, 0-shift returns the 2nd arg. 1552 if (Amt == 0) { 1553 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1554 Known, TLO, Depth + 1)) 1555 return true; 1556 break; 1557 } 1558 1559 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1560 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1561 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1562 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1563 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1564 Depth + 1)) 1565 return true; 1566 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1567 Depth + 1)) 1568 return true; 1569 1570 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1571 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1572 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1573 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1574 Known.One |= Known2.One; 1575 Known.Zero |= Known2.Zero; 1576 } 1577 break; 1578 } 1579 case ISD::BITREVERSE: { 1580 SDValue Src = Op.getOperand(0); 1581 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1582 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1583 Depth + 1)) 1584 return true; 1585 Known.One = Known2.One.reverseBits(); 1586 Known.Zero = Known2.Zero.reverseBits(); 1587 break; 1588 } 1589 case ISD::BSWAP: { 1590 SDValue Src = Op.getOperand(0); 1591 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1592 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1593 Depth + 1)) 1594 return true; 1595 Known.One = Known2.One.byteSwap(); 1596 Known.Zero = Known2.Zero.byteSwap(); 1597 break; 1598 } 1599 case ISD::SIGN_EXTEND_INREG: { 1600 SDValue Op0 = Op.getOperand(0); 1601 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1602 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1603 1604 // If we only care about the highest bit, don't bother shifting right. 1605 if (DemandedBits.isSignMask()) { 1606 unsigned NumSignBits = TLO.DAG.ComputeNumSignBits(Op0); 1607 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1; 1608 // However if the input is already sign extended we expect the sign 1609 // extension to be dropped altogether later and do not simplify. 1610 if (!AlreadySignExtended) { 1611 // Compute the correct shift amount type, which must be getShiftAmountTy 1612 // for scalar types after legalization. 1613 EVT ShiftAmtTy = VT; 1614 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1615 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1616 1617 SDValue ShiftAmt = 1618 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1619 return TLO.CombineTo(Op, 1620 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1621 } 1622 } 1623 1624 // If none of the extended bits are demanded, eliminate the sextinreg. 1625 if (DemandedBits.getActiveBits() <= ExVTBits) 1626 return TLO.CombineTo(Op, Op0); 1627 1628 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1629 1630 // Since the sign extended bits are demanded, we know that the sign 1631 // bit is demanded. 1632 InputDemandedBits.setBit(ExVTBits - 1); 1633 1634 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1635 return true; 1636 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1637 1638 // If the sign bit of the input is known set or clear, then we know the 1639 // top bits of the result. 1640 1641 // If the input sign bit is known zero, convert this into a zero extension. 1642 if (Known.Zero[ExVTBits - 1]) 1643 return TLO.CombineTo( 1644 Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT.getScalarType())); 1645 1646 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1647 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1648 Known.One.setBitsFrom(ExVTBits); 1649 Known.Zero &= Mask; 1650 } else { // Input sign bit unknown 1651 Known.Zero &= Mask; 1652 Known.One &= Mask; 1653 } 1654 break; 1655 } 1656 case ISD::BUILD_PAIR: { 1657 EVT HalfVT = Op.getOperand(0).getValueType(); 1658 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1659 1660 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1661 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1662 1663 KnownBits KnownLo, KnownHi; 1664 1665 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1666 return true; 1667 1668 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1669 return true; 1670 1671 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1672 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1673 1674 Known.One = KnownLo.One.zext(BitWidth) | 1675 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1676 break; 1677 } 1678 case ISD::ZERO_EXTEND: 1679 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1680 SDValue Src = Op.getOperand(0); 1681 EVT SrcVT = Src.getValueType(); 1682 unsigned InBits = SrcVT.getScalarSizeInBits(); 1683 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1684 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1685 1686 // If none of the top bits are demanded, convert this into an any_extend. 1687 if (DemandedBits.getActiveBits() <= InBits) { 1688 // If we only need the non-extended bits of the bottom element 1689 // then we can just bitcast to the result. 1690 if (IsVecInReg && DemandedElts == 1 && 1691 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1692 TLO.DAG.getDataLayout().isLittleEndian()) 1693 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1694 1695 unsigned Opc = 1696 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1697 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1698 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1699 } 1700 1701 APInt InDemandedBits = DemandedBits.trunc(InBits); 1702 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1703 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1704 Depth + 1)) 1705 return true; 1706 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1707 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1708 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 1709 break; 1710 } 1711 case ISD::SIGN_EXTEND: 1712 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1713 SDValue Src = Op.getOperand(0); 1714 EVT SrcVT = Src.getValueType(); 1715 unsigned InBits = SrcVT.getScalarSizeInBits(); 1716 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1717 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1718 1719 // If none of the top bits are demanded, convert this into an any_extend. 1720 if (DemandedBits.getActiveBits() <= InBits) { 1721 // If we only need the non-extended bits of the bottom element 1722 // then we can just bitcast to the result. 1723 if (IsVecInReg && DemandedElts == 1 && 1724 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1725 TLO.DAG.getDataLayout().isLittleEndian()) 1726 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1727 1728 unsigned Opc = 1729 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1730 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1731 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1732 } 1733 1734 APInt InDemandedBits = DemandedBits.trunc(InBits); 1735 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1736 1737 // Since some of the sign extended bits are demanded, we know that the sign 1738 // bit is demanded. 1739 InDemandedBits.setBit(InBits - 1); 1740 1741 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1742 Depth + 1)) 1743 return true; 1744 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1745 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1746 1747 // If the sign bit is known one, the top bits match. 1748 Known = Known.sext(BitWidth); 1749 1750 // If the sign bit is known zero, convert this to a zero extend. 1751 if (Known.isNonNegative()) { 1752 unsigned Opc = 1753 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 1754 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1755 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1756 } 1757 break; 1758 } 1759 case ISD::ANY_EXTEND: 1760 case ISD::ANY_EXTEND_VECTOR_INREG: { 1761 SDValue Src = Op.getOperand(0); 1762 EVT SrcVT = Src.getValueType(); 1763 unsigned InBits = SrcVT.getScalarSizeInBits(); 1764 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1765 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 1766 1767 // If we only need the bottom element then we can just bitcast. 1768 // TODO: Handle ANY_EXTEND? 1769 if (IsVecInReg && DemandedElts == 1 && 1770 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1771 TLO.DAG.getDataLayout().isLittleEndian()) 1772 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1773 1774 APInt InDemandedBits = DemandedBits.trunc(InBits); 1775 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1776 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1777 Depth + 1)) 1778 return true; 1779 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1780 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1781 Known = Known.zext(BitWidth, false /* => any extend */); 1782 break; 1783 } 1784 case ISD::TRUNCATE: { 1785 SDValue Src = Op.getOperand(0); 1786 1787 // Simplify the input, using demanded bit information, and compute the known 1788 // zero/one bits live out. 1789 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 1790 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 1791 if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1)) 1792 return true; 1793 Known = Known.trunc(BitWidth); 1794 1795 // Attempt to avoid multi-use ops if we don't need anything from them. 1796 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1797 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 1798 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 1799 1800 // If the input is only used by this truncate, see if we can shrink it based 1801 // on the known demanded bits. 1802 if (Src.getNode()->hasOneUse()) { 1803 switch (Src.getOpcode()) { 1804 default: 1805 break; 1806 case ISD::SRL: 1807 // Shrink SRL by a constant if none of the high bits shifted in are 1808 // demanded. 1809 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 1810 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1811 // undesirable. 1812 break; 1813 1814 SDValue ShAmt = Src.getOperand(1); 1815 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt); 1816 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 1817 break; 1818 uint64_t ShVal = ShAmtC->getZExtValue(); 1819 1820 APInt HighBits = 1821 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 1822 HighBits.lshrInPlace(ShVal); 1823 HighBits = HighBits.trunc(BitWidth); 1824 1825 if (!(HighBits & DemandedBits)) { 1826 // None of the shifted in bits are needed. Add a truncate of the 1827 // shift input, then shift it. 1828 if (TLO.LegalTypes()) 1829 ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL)); 1830 SDValue NewTrunc = 1831 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 1832 return TLO.CombineTo( 1833 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt)); 1834 } 1835 break; 1836 } 1837 } 1838 1839 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1840 break; 1841 } 1842 case ISD::AssertZext: { 1843 // AssertZext demands all of the high bits, plus any of the low bits 1844 // demanded by its users. 1845 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1846 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 1847 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 1848 TLO, Depth + 1)) 1849 return true; 1850 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1851 1852 Known.Zero |= ~InMask; 1853 break; 1854 } 1855 case ISD::EXTRACT_VECTOR_ELT: { 1856 SDValue Src = Op.getOperand(0); 1857 SDValue Idx = Op.getOperand(1); 1858 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1859 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 1860 1861 // Demand the bits from every vector element without a constant index. 1862 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 1863 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 1864 if (CIdx->getAPIntValue().ult(NumSrcElts)) 1865 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 1866 1867 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 1868 // anything about the extended bits. 1869 APInt DemandedSrcBits = DemandedBits; 1870 if (BitWidth > EltBitWidth) 1871 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 1872 1873 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 1874 Depth + 1)) 1875 return true; 1876 1877 // Attempt to avoid multi-use ops if we don't need anything from them. 1878 if (!DemandedSrcBits.isAllOnesValue() || 1879 !DemandedSrcElts.isAllOnesValue()) { 1880 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1881 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 1882 SDValue NewOp = 1883 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 1884 return TLO.CombineTo(Op, NewOp); 1885 } 1886 } 1887 1888 Known = Known2; 1889 if (BitWidth > EltBitWidth) 1890 Known = Known.zext(BitWidth, false /* => any extend */); 1891 break; 1892 } 1893 case ISD::BITCAST: { 1894 SDValue Src = Op.getOperand(0); 1895 EVT SrcVT = Src.getValueType(); 1896 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 1897 1898 // If this is an FP->Int bitcast and if the sign bit is the only 1899 // thing demanded, turn this into a FGETSIGN. 1900 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 1901 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 1902 SrcVT.isFloatingPoint()) { 1903 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 1904 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 1905 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 1906 SrcVT != MVT::f128) { 1907 // Cannot eliminate/lower SHL for f128 yet. 1908 EVT Ty = OpVTLegal ? VT : MVT::i32; 1909 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1910 // place. We expect the SHL to be eliminated by other optimizations. 1911 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 1912 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 1913 if (!OpVTLegal && OpVTSizeInBits > 32) 1914 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 1915 unsigned ShVal = Op.getValueSizeInBits() - 1; 1916 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 1917 return TLO.CombineTo(Op, 1918 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 1919 } 1920 } 1921 1922 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 1923 // Demand the elt/bit if any of the original elts/bits are demanded. 1924 // TODO - bigendian once we have test coverage. 1925 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 && 1926 TLO.DAG.getDataLayout().isLittleEndian()) { 1927 unsigned Scale = BitWidth / NumSrcEltBits; 1928 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 1929 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 1930 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 1931 for (unsigned i = 0; i != Scale; ++i) { 1932 unsigned Offset = i * NumSrcEltBits; 1933 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 1934 if (!Sub.isNullValue()) { 1935 DemandedSrcBits |= Sub; 1936 for (unsigned j = 0; j != NumElts; ++j) 1937 if (DemandedElts[j]) 1938 DemandedSrcElts.setBit((j * Scale) + i); 1939 } 1940 } 1941 1942 APInt KnownSrcUndef, KnownSrcZero; 1943 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 1944 KnownSrcZero, TLO, Depth + 1)) 1945 return true; 1946 1947 KnownBits KnownSrcBits; 1948 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 1949 KnownSrcBits, TLO, Depth + 1)) 1950 return true; 1951 } else if ((NumSrcEltBits % BitWidth) == 0 && 1952 TLO.DAG.getDataLayout().isLittleEndian()) { 1953 unsigned Scale = NumSrcEltBits / BitWidth; 1954 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1955 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 1956 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 1957 for (unsigned i = 0; i != NumElts; ++i) 1958 if (DemandedElts[i]) { 1959 unsigned Offset = (i % Scale) * BitWidth; 1960 DemandedSrcBits.insertBits(DemandedBits, Offset); 1961 DemandedSrcElts.setBit(i / Scale); 1962 } 1963 1964 if (SrcVT.isVector()) { 1965 APInt KnownSrcUndef, KnownSrcZero; 1966 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 1967 KnownSrcZero, TLO, Depth + 1)) 1968 return true; 1969 } 1970 1971 KnownBits KnownSrcBits; 1972 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 1973 KnownSrcBits, TLO, Depth + 1)) 1974 return true; 1975 } 1976 1977 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 1978 // recursive call where Known may be useful to the caller. 1979 if (Depth > 0) { 1980 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1981 return false; 1982 } 1983 break; 1984 } 1985 case ISD::ADD: 1986 case ISD::MUL: 1987 case ISD::SUB: { 1988 // Add, Sub, and Mul don't demand any bits in positions beyond that 1989 // of the highest bit demanded of them. 1990 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 1991 SDNodeFlags Flags = Op.getNode()->getFlags(); 1992 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 1993 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 1994 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 1995 Depth + 1) || 1996 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 1997 Depth + 1) || 1998 // See if the operation should be performed at a smaller bit width. 1999 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2000 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2001 // Disable the nsw and nuw flags. We can no longer guarantee that we 2002 // won't wrap after simplification. 2003 Flags.setNoSignedWrap(false); 2004 Flags.setNoUnsignedWrap(false); 2005 SDValue NewOp = 2006 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2007 return TLO.CombineTo(Op, NewOp); 2008 } 2009 return true; 2010 } 2011 2012 // Attempt to avoid multi-use ops if we don't need anything from them. 2013 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 2014 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2015 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2016 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2017 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2018 if (DemandedOp0 || DemandedOp1) { 2019 Flags.setNoSignedWrap(false); 2020 Flags.setNoUnsignedWrap(false); 2021 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2022 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2023 SDValue NewOp = 2024 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2025 return TLO.CombineTo(Op, NewOp); 2026 } 2027 } 2028 2029 // If we have a constant operand, we may be able to turn it into -1 if we 2030 // do not demand the high bits. This can make the constant smaller to 2031 // encode, allow more general folding, or match specialized instruction 2032 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2033 // is probably not useful (and could be detrimental). 2034 ConstantSDNode *C = isConstOrConstSplat(Op1); 2035 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2036 if (C && !C->isAllOnesValue() && !C->isOne() && 2037 (C->getAPIntValue() | HighMask).isAllOnesValue()) { 2038 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2039 // Disable the nsw and nuw flags. We can no longer guarantee that we 2040 // won't wrap after simplification. 2041 Flags.setNoSignedWrap(false); 2042 Flags.setNoUnsignedWrap(false); 2043 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2044 return TLO.CombineTo(Op, NewOp); 2045 } 2046 2047 LLVM_FALLTHROUGH; 2048 } 2049 default: 2050 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2051 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2052 Known, TLO, Depth)) 2053 return true; 2054 break; 2055 } 2056 2057 // Just use computeKnownBits to compute output bits. 2058 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2059 break; 2060 } 2061 2062 // If we know the value of all of the demanded bits, return this as a 2063 // constant. 2064 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2065 // Avoid folding to a constant if any OpaqueConstant is involved. 2066 const SDNode *N = Op.getNode(); 2067 for (SDNodeIterator I = SDNodeIterator::begin(N), 2068 E = SDNodeIterator::end(N); 2069 I != E; ++I) { 2070 SDNode *Op = *I; 2071 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2072 if (C->isOpaque()) 2073 return false; 2074 } 2075 // TODO: Handle float bits as well. 2076 if (VT.isInteger()) 2077 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2078 } 2079 2080 return false; 2081 } 2082 2083 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2084 const APInt &DemandedElts, 2085 APInt &KnownUndef, 2086 APInt &KnownZero, 2087 DAGCombinerInfo &DCI) const { 2088 SelectionDAG &DAG = DCI.DAG; 2089 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2090 !DCI.isBeforeLegalizeOps()); 2091 2092 bool Simplified = 2093 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2094 if (Simplified) { 2095 DCI.AddToWorklist(Op.getNode()); 2096 DCI.CommitTargetLoweringOpt(TLO); 2097 } 2098 2099 return Simplified; 2100 } 2101 2102 /// Given a vector binary operation and known undefined elements for each input 2103 /// operand, compute whether each element of the output is undefined. 2104 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2105 const APInt &UndefOp0, 2106 const APInt &UndefOp1) { 2107 EVT VT = BO.getValueType(); 2108 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2109 "Vector binop only"); 2110 2111 EVT EltVT = VT.getVectorElementType(); 2112 unsigned NumElts = VT.getVectorNumElements(); 2113 assert(UndefOp0.getBitWidth() == NumElts && 2114 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2115 2116 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2117 const APInt &UndefVals) { 2118 if (UndefVals[Index]) 2119 return DAG.getUNDEF(EltVT); 2120 2121 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2122 // Try hard to make sure that the getNode() call is not creating temporary 2123 // nodes. Ignore opaque integers because they do not constant fold. 2124 SDValue Elt = BV->getOperand(Index); 2125 auto *C = dyn_cast<ConstantSDNode>(Elt); 2126 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2127 return Elt; 2128 } 2129 2130 return SDValue(); 2131 }; 2132 2133 APInt KnownUndef = APInt::getNullValue(NumElts); 2134 for (unsigned i = 0; i != NumElts; ++i) { 2135 // If both inputs for this element are either constant or undef and match 2136 // the element type, compute the constant/undef result for this element of 2137 // the vector. 2138 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2139 // not handle FP constants. The code within getNode() should be refactored 2140 // to avoid the danger of creating a bogus temporary node here. 2141 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2142 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2143 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2144 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2145 KnownUndef.setBit(i); 2146 } 2147 return KnownUndef; 2148 } 2149 2150 bool TargetLowering::SimplifyDemandedVectorElts( 2151 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2152 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2153 bool AssumeSingleUse) const { 2154 EVT VT = Op.getValueType(); 2155 APInt DemandedElts = OriginalDemandedElts; 2156 unsigned NumElts = DemandedElts.getBitWidth(); 2157 assert(VT.isVector() && "Expected vector op"); 2158 assert(VT.getVectorNumElements() == NumElts && 2159 "Mask size mismatches value type element count!"); 2160 2161 KnownUndef = KnownZero = APInt::getNullValue(NumElts); 2162 2163 // Undef operand. 2164 if (Op.isUndef()) { 2165 KnownUndef.setAllBits(); 2166 return false; 2167 } 2168 2169 // If Op has other users, assume that all elements are needed. 2170 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2171 DemandedElts.setAllBits(); 2172 2173 // Not demanding any elements from Op. 2174 if (DemandedElts == 0) { 2175 KnownUndef.setAllBits(); 2176 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2177 } 2178 2179 // Limit search depth. 2180 if (Depth >= SelectionDAG::MaxRecursionDepth) 2181 return false; 2182 2183 SDLoc DL(Op); 2184 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2185 2186 switch (Op.getOpcode()) { 2187 case ISD::SCALAR_TO_VECTOR: { 2188 if (!DemandedElts[0]) { 2189 KnownUndef.setAllBits(); 2190 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2191 } 2192 KnownUndef.setHighBits(NumElts - 1); 2193 break; 2194 } 2195 case ISD::BITCAST: { 2196 SDValue Src = Op.getOperand(0); 2197 EVT SrcVT = Src.getValueType(); 2198 2199 // We only handle vectors here. 2200 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2201 if (!SrcVT.isVector()) 2202 break; 2203 2204 // Fast handling of 'identity' bitcasts. 2205 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2206 if (NumSrcElts == NumElts) 2207 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2208 KnownZero, TLO, Depth + 1); 2209 2210 APInt SrcZero, SrcUndef; 2211 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts); 2212 2213 // Bitcast from 'large element' src vector to 'small element' vector, we 2214 // must demand a source element if any DemandedElt maps to it. 2215 if ((NumElts % NumSrcElts) == 0) { 2216 unsigned Scale = NumElts / NumSrcElts; 2217 for (unsigned i = 0; i != NumElts; ++i) 2218 if (DemandedElts[i]) 2219 SrcDemandedElts.setBit(i / Scale); 2220 2221 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2222 TLO, Depth + 1)) 2223 return true; 2224 2225 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2226 // of the large element. 2227 // TODO - bigendian once we have test coverage. 2228 if (TLO.DAG.getDataLayout().isLittleEndian()) { 2229 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2230 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits); 2231 for (unsigned i = 0; i != NumElts; ++i) 2232 if (DemandedElts[i]) { 2233 unsigned Ofs = (i % Scale) * EltSizeInBits; 2234 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2235 } 2236 2237 KnownBits Known; 2238 if (SimplifyDemandedBits(Src, SrcDemandedBits, Known, TLO, Depth + 1)) 2239 return true; 2240 } 2241 2242 // If the src element is zero/undef then all the output elements will be - 2243 // only demanded elements are guaranteed to be correct. 2244 for (unsigned i = 0; i != NumSrcElts; ++i) { 2245 if (SrcDemandedElts[i]) { 2246 if (SrcZero[i]) 2247 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2248 if (SrcUndef[i]) 2249 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2250 } 2251 } 2252 } 2253 2254 // Bitcast from 'small element' src vector to 'large element' vector, we 2255 // demand all smaller source elements covered by the larger demanded element 2256 // of this vector. 2257 if ((NumSrcElts % NumElts) == 0) { 2258 unsigned Scale = NumSrcElts / NumElts; 2259 for (unsigned i = 0; i != NumElts; ++i) 2260 if (DemandedElts[i]) 2261 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale); 2262 2263 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2264 TLO, Depth + 1)) 2265 return true; 2266 2267 // If all the src elements covering an output element are zero/undef, then 2268 // the output element will be as well, assuming it was demanded. 2269 for (unsigned i = 0; i != NumElts; ++i) { 2270 if (DemandedElts[i]) { 2271 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue()) 2272 KnownZero.setBit(i); 2273 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue()) 2274 KnownUndef.setBit(i); 2275 } 2276 } 2277 } 2278 break; 2279 } 2280 case ISD::BUILD_VECTOR: { 2281 // Check all elements and simplify any unused elements with UNDEF. 2282 if (!DemandedElts.isAllOnesValue()) { 2283 // Don't simplify BROADCASTS. 2284 if (llvm::any_of(Op->op_values(), 2285 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2286 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2287 bool Updated = false; 2288 for (unsigned i = 0; i != NumElts; ++i) { 2289 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2290 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2291 KnownUndef.setBit(i); 2292 Updated = true; 2293 } 2294 } 2295 if (Updated) 2296 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2297 } 2298 } 2299 for (unsigned i = 0; i != NumElts; ++i) { 2300 SDValue SrcOp = Op.getOperand(i); 2301 if (SrcOp.isUndef()) { 2302 KnownUndef.setBit(i); 2303 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2304 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2305 KnownZero.setBit(i); 2306 } 2307 } 2308 break; 2309 } 2310 case ISD::CONCAT_VECTORS: { 2311 EVT SubVT = Op.getOperand(0).getValueType(); 2312 unsigned NumSubVecs = Op.getNumOperands(); 2313 unsigned NumSubElts = SubVT.getVectorNumElements(); 2314 for (unsigned i = 0; i != NumSubVecs; ++i) { 2315 SDValue SubOp = Op.getOperand(i); 2316 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2317 APInt SubUndef, SubZero; 2318 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2319 Depth + 1)) 2320 return true; 2321 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2322 KnownZero.insertBits(SubZero, i * NumSubElts); 2323 } 2324 break; 2325 } 2326 case ISD::INSERT_SUBVECTOR: { 2327 if (!isa<ConstantSDNode>(Op.getOperand(2))) 2328 break; 2329 SDValue Base = Op.getOperand(0); 2330 SDValue Sub = Op.getOperand(1); 2331 EVT SubVT = Sub.getValueType(); 2332 unsigned NumSubElts = SubVT.getVectorNumElements(); 2333 const APInt &Idx = Op.getConstantOperandAPInt(2); 2334 if (Idx.ugt(NumElts - NumSubElts)) 2335 break; 2336 unsigned SubIdx = Idx.getZExtValue(); 2337 APInt SubElts = DemandedElts.extractBits(NumSubElts, SubIdx); 2338 APInt SubUndef, SubZero; 2339 if (SimplifyDemandedVectorElts(Sub, SubElts, SubUndef, SubZero, TLO, 2340 Depth + 1)) 2341 return true; 2342 APInt BaseElts = DemandedElts; 2343 BaseElts.insertBits(APInt::getNullValue(NumSubElts), SubIdx); 2344 2345 // If none of the base operand elements are demanded, replace it with undef. 2346 if (!BaseElts && !Base.isUndef()) 2347 return TLO.CombineTo(Op, 2348 TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2349 TLO.DAG.getUNDEF(VT), 2350 Op.getOperand(1), 2351 Op.getOperand(2))); 2352 2353 if (SimplifyDemandedVectorElts(Base, BaseElts, KnownUndef, KnownZero, TLO, 2354 Depth + 1)) 2355 return true; 2356 KnownUndef.insertBits(SubUndef, SubIdx); 2357 KnownZero.insertBits(SubZero, SubIdx); 2358 break; 2359 } 2360 case ISD::EXTRACT_SUBVECTOR: { 2361 SDValue Src = Op.getOperand(0); 2362 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2363 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2364 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2365 // Offset the demanded elts by the subvector index. 2366 uint64_t Idx = SubIdx->getZExtValue(); 2367 APInt SrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2368 APInt SrcUndef, SrcZero; 2369 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO, 2370 Depth + 1)) 2371 return true; 2372 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2373 KnownZero = SrcZero.extractBits(NumElts, Idx); 2374 } 2375 break; 2376 } 2377 case ISD::INSERT_VECTOR_ELT: { 2378 SDValue Vec = Op.getOperand(0); 2379 SDValue Scl = Op.getOperand(1); 2380 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2381 2382 // For a legal, constant insertion index, if we don't need this insertion 2383 // then strip it, else remove it from the demanded elts. 2384 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2385 unsigned Idx = CIdx->getZExtValue(); 2386 if (!DemandedElts[Idx]) 2387 return TLO.CombineTo(Op, Vec); 2388 2389 APInt DemandedVecElts(DemandedElts); 2390 DemandedVecElts.clearBit(Idx); 2391 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2392 KnownZero, TLO, Depth + 1)) 2393 return true; 2394 2395 KnownUndef.clearBit(Idx); 2396 if (Scl.isUndef()) 2397 KnownUndef.setBit(Idx); 2398 2399 KnownZero.clearBit(Idx); 2400 if (isNullConstant(Scl) || isNullFPConstant(Scl)) 2401 KnownZero.setBit(Idx); 2402 break; 2403 } 2404 2405 APInt VecUndef, VecZero; 2406 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2407 Depth + 1)) 2408 return true; 2409 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2410 break; 2411 } 2412 case ISD::VSELECT: { 2413 // Try to transform the select condition based on the current demanded 2414 // elements. 2415 // TODO: If a condition element is undef, we can choose from one arm of the 2416 // select (and if one arm is undef, then we can propagate that to the 2417 // result). 2418 // TODO - add support for constant vselect masks (see IR version of this). 2419 APInt UnusedUndef, UnusedZero; 2420 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2421 UnusedZero, TLO, Depth + 1)) 2422 return true; 2423 2424 // See if we can simplify either vselect operand. 2425 APInt DemandedLHS(DemandedElts); 2426 APInt DemandedRHS(DemandedElts); 2427 APInt UndefLHS, ZeroLHS; 2428 APInt UndefRHS, ZeroRHS; 2429 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2430 ZeroLHS, TLO, Depth + 1)) 2431 return true; 2432 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2433 ZeroRHS, TLO, Depth + 1)) 2434 return true; 2435 2436 KnownUndef = UndefLHS & UndefRHS; 2437 KnownZero = ZeroLHS & ZeroRHS; 2438 break; 2439 } 2440 case ISD::VECTOR_SHUFFLE: { 2441 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2442 2443 // Collect demanded elements from shuffle operands.. 2444 APInt DemandedLHS(NumElts, 0); 2445 APInt DemandedRHS(NumElts, 0); 2446 for (unsigned i = 0; i != NumElts; ++i) { 2447 int M = ShuffleMask[i]; 2448 if (M < 0 || !DemandedElts[i]) 2449 continue; 2450 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2451 if (M < (int)NumElts) 2452 DemandedLHS.setBit(M); 2453 else 2454 DemandedRHS.setBit(M - NumElts); 2455 } 2456 2457 // See if we can simplify either shuffle operand. 2458 APInt UndefLHS, ZeroLHS; 2459 APInt UndefRHS, ZeroRHS; 2460 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2461 ZeroLHS, TLO, Depth + 1)) 2462 return true; 2463 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2464 ZeroRHS, TLO, Depth + 1)) 2465 return true; 2466 2467 // Simplify mask using undef elements from LHS/RHS. 2468 bool Updated = false; 2469 bool IdentityLHS = true, IdentityRHS = true; 2470 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2471 for (unsigned i = 0; i != NumElts; ++i) { 2472 int &M = NewMask[i]; 2473 if (M < 0) 2474 continue; 2475 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2476 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2477 Updated = true; 2478 M = -1; 2479 } 2480 IdentityLHS &= (M < 0) || (M == (int)i); 2481 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2482 } 2483 2484 // Update legal shuffle masks based on demanded elements if it won't reduce 2485 // to Identity which can cause premature removal of the shuffle mask. 2486 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2487 SDValue LegalShuffle = 2488 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2489 NewMask, TLO.DAG); 2490 if (LegalShuffle) 2491 return TLO.CombineTo(Op, LegalShuffle); 2492 } 2493 2494 // Propagate undef/zero elements from LHS/RHS. 2495 for (unsigned i = 0; i != NumElts; ++i) { 2496 int M = ShuffleMask[i]; 2497 if (M < 0) { 2498 KnownUndef.setBit(i); 2499 } else if (M < (int)NumElts) { 2500 if (UndefLHS[M]) 2501 KnownUndef.setBit(i); 2502 if (ZeroLHS[M]) 2503 KnownZero.setBit(i); 2504 } else { 2505 if (UndefRHS[M - NumElts]) 2506 KnownUndef.setBit(i); 2507 if (ZeroRHS[M - NumElts]) 2508 KnownZero.setBit(i); 2509 } 2510 } 2511 break; 2512 } 2513 case ISD::ANY_EXTEND_VECTOR_INREG: 2514 case ISD::SIGN_EXTEND_VECTOR_INREG: 2515 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2516 APInt SrcUndef, SrcZero; 2517 SDValue Src = Op.getOperand(0); 2518 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2519 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2520 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2521 Depth + 1)) 2522 return true; 2523 KnownZero = SrcZero.zextOrTrunc(NumElts); 2524 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2525 2526 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2527 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2528 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) { 2529 // aext - if we just need the bottom element then we can bitcast. 2530 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2531 } 2532 2533 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2534 // zext(undef) upper bits are guaranteed to be zero. 2535 if (DemandedElts.isSubsetOf(KnownUndef)) 2536 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2537 KnownUndef.clearAllBits(); 2538 } 2539 break; 2540 } 2541 2542 // TODO: There are more binop opcodes that could be handled here - MUL, MIN, 2543 // MAX, saturated math, etc. 2544 case ISD::OR: 2545 case ISD::XOR: 2546 case ISD::ADD: 2547 case ISD::SUB: 2548 case ISD::FADD: 2549 case ISD::FSUB: 2550 case ISD::FMUL: 2551 case ISD::FDIV: 2552 case ISD::FREM: { 2553 APInt UndefRHS, ZeroRHS; 2554 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS, 2555 ZeroRHS, TLO, Depth + 1)) 2556 return true; 2557 APInt UndefLHS, ZeroLHS; 2558 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS, 2559 ZeroLHS, TLO, Depth + 1)) 2560 return true; 2561 2562 KnownZero = ZeroLHS & ZeroRHS; 2563 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2564 break; 2565 } 2566 case ISD::SHL: 2567 case ISD::SRL: 2568 case ISD::SRA: 2569 case ISD::ROTL: 2570 case ISD::ROTR: { 2571 APInt UndefRHS, ZeroRHS; 2572 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS, 2573 ZeroRHS, TLO, Depth + 1)) 2574 return true; 2575 APInt UndefLHS, ZeroLHS; 2576 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS, 2577 ZeroLHS, TLO, Depth + 1)) 2578 return true; 2579 2580 KnownZero = ZeroLHS; 2581 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2582 break; 2583 } 2584 case ISD::MUL: 2585 case ISD::AND: { 2586 APInt SrcUndef, SrcZero; 2587 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, SrcUndef, 2588 SrcZero, TLO, Depth + 1)) 2589 return true; 2590 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2591 KnownZero, TLO, Depth + 1)) 2592 return true; 2593 2594 // If either side has a zero element, then the result element is zero, even 2595 // if the other is an UNDEF. 2596 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2597 // and then handle 'and' nodes with the rest of the binop opcodes. 2598 KnownZero |= SrcZero; 2599 KnownUndef &= SrcUndef; 2600 KnownUndef &= ~KnownZero; 2601 break; 2602 } 2603 case ISD::TRUNCATE: 2604 case ISD::SIGN_EXTEND: 2605 case ISD::ZERO_EXTEND: 2606 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2607 KnownZero, TLO, Depth + 1)) 2608 return true; 2609 2610 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2611 // zext(undef) upper bits are guaranteed to be zero. 2612 if (DemandedElts.isSubsetOf(KnownUndef)) 2613 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2614 KnownUndef.clearAllBits(); 2615 } 2616 break; 2617 default: { 2618 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2619 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 2620 KnownZero, TLO, Depth)) 2621 return true; 2622 } else { 2623 KnownBits Known; 2624 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits); 2625 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 2626 TLO, Depth, AssumeSingleUse)) 2627 return true; 2628 } 2629 break; 2630 } 2631 } 2632 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 2633 2634 // Constant fold all undef cases. 2635 // TODO: Handle zero cases as well. 2636 if (DemandedElts.isSubsetOf(KnownUndef)) 2637 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2638 2639 return false; 2640 } 2641 2642 /// Determine which of the bits specified in Mask are known to be either zero or 2643 /// one and return them in the Known. 2644 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 2645 KnownBits &Known, 2646 const APInt &DemandedElts, 2647 const SelectionDAG &DAG, 2648 unsigned Depth) const { 2649 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2650 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2651 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2652 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2653 "Should use MaskedValueIsZero if you don't know whether Op" 2654 " is a target node!"); 2655 Known.resetAll(); 2656 } 2657 2658 void TargetLowering::computeKnownBitsForTargetInstr( 2659 GISelKnownBits &Analysis, Register R, KnownBits &Known, 2660 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 2661 unsigned Depth) const { 2662 Known.resetAll(); 2663 } 2664 2665 void TargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 2666 KnownBits &Known, 2667 const APInt &DemandedElts, 2668 const SelectionDAG &DAG, 2669 unsigned Depth) const { 2670 assert(isa<FrameIndexSDNode>(Op) && "expected FrameIndex"); 2671 2672 if (unsigned Align = DAG.InferPtrAlignment(Op)) { 2673 // The low bits are known zero if the pointer is aligned. 2674 Known.Zero.setLowBits(Log2_32(Align)); 2675 } 2676 } 2677 2678 /// This method can be implemented by targets that want to expose additional 2679 /// information about sign bits to the DAG Combiner. 2680 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 2681 const APInt &, 2682 const SelectionDAG &, 2683 unsigned Depth) const { 2684 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2685 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2686 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2687 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2688 "Should use ComputeNumSignBits if you don't know whether Op" 2689 " is a target node!"); 2690 return 1; 2691 } 2692 2693 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 2694 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 2695 TargetLoweringOpt &TLO, unsigned Depth) const { 2696 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2697 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2698 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2699 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2700 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 2701 " is a target node!"); 2702 return false; 2703 } 2704 2705 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 2706 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2707 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 2708 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2709 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2710 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2711 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2712 "Should use SimplifyDemandedBits if you don't know whether Op" 2713 " is a target node!"); 2714 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 2715 return false; 2716 } 2717 2718 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 2719 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2720 SelectionDAG &DAG, unsigned Depth) const { 2721 assert( 2722 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2723 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2724 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2725 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2726 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 2727 " is a target node!"); 2728 return SDValue(); 2729 } 2730 2731 SDValue 2732 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 2733 SDValue N1, MutableArrayRef<int> Mask, 2734 SelectionDAG &DAG) const { 2735 bool LegalMask = isShuffleMaskLegal(Mask, VT); 2736 if (!LegalMask) { 2737 std::swap(N0, N1); 2738 ShuffleVectorSDNode::commuteMask(Mask); 2739 LegalMask = isShuffleMaskLegal(Mask, VT); 2740 } 2741 2742 if (!LegalMask) 2743 return SDValue(); 2744 2745 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 2746 } 2747 2748 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 2749 return nullptr; 2750 } 2751 2752 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 2753 const SelectionDAG &DAG, 2754 bool SNaN, 2755 unsigned Depth) const { 2756 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2757 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2758 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2759 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2760 "Should use isKnownNeverNaN if you don't know whether Op" 2761 " is a target node!"); 2762 return false; 2763 } 2764 2765 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 2766 // work with truncating build vectors and vectors with elements of less than 2767 // 8 bits. 2768 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 2769 if (!N) 2770 return false; 2771 2772 APInt CVal; 2773 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 2774 CVal = CN->getAPIntValue(); 2775 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 2776 auto *CN = BV->getConstantSplatNode(); 2777 if (!CN) 2778 return false; 2779 2780 // If this is a truncating build vector, truncate the splat value. 2781 // Otherwise, we may fail to match the expected values below. 2782 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 2783 CVal = CN->getAPIntValue(); 2784 if (BVEltWidth < CVal.getBitWidth()) 2785 CVal = CVal.trunc(BVEltWidth); 2786 } else { 2787 return false; 2788 } 2789 2790 switch (getBooleanContents(N->getValueType(0))) { 2791 case UndefinedBooleanContent: 2792 return CVal[0]; 2793 case ZeroOrOneBooleanContent: 2794 return CVal.isOneValue(); 2795 case ZeroOrNegativeOneBooleanContent: 2796 return CVal.isAllOnesValue(); 2797 } 2798 2799 llvm_unreachable("Invalid boolean contents"); 2800 } 2801 2802 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 2803 if (!N) 2804 return false; 2805 2806 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 2807 if (!CN) { 2808 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 2809 if (!BV) 2810 return false; 2811 2812 // Only interested in constant splats, we don't care about undef 2813 // elements in identifying boolean constants and getConstantSplatNode 2814 // returns NULL if all ops are undef; 2815 CN = BV->getConstantSplatNode(); 2816 if (!CN) 2817 return false; 2818 } 2819 2820 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 2821 return !CN->getAPIntValue()[0]; 2822 2823 return CN->isNullValue(); 2824 } 2825 2826 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 2827 bool SExt) const { 2828 if (VT == MVT::i1) 2829 return N->isOne(); 2830 2831 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 2832 switch (Cnt) { 2833 case TargetLowering::ZeroOrOneBooleanContent: 2834 // An extended value of 1 is always true, unless its original type is i1, 2835 // in which case it will be sign extended to -1. 2836 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 2837 case TargetLowering::UndefinedBooleanContent: 2838 case TargetLowering::ZeroOrNegativeOneBooleanContent: 2839 return N->isAllOnesValue() && SExt; 2840 } 2841 llvm_unreachable("Unexpected enumeration."); 2842 } 2843 2844 /// This helper function of SimplifySetCC tries to optimize the comparison when 2845 /// either operand of the SetCC node is a bitwise-and instruction. 2846 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 2847 ISD::CondCode Cond, const SDLoc &DL, 2848 DAGCombinerInfo &DCI) const { 2849 // Match these patterns in any of their permutations: 2850 // (X & Y) == Y 2851 // (X & Y) != Y 2852 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 2853 std::swap(N0, N1); 2854 2855 EVT OpVT = N0.getValueType(); 2856 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 2857 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 2858 return SDValue(); 2859 2860 SDValue X, Y; 2861 if (N0.getOperand(0) == N1) { 2862 X = N0.getOperand(1); 2863 Y = N0.getOperand(0); 2864 } else if (N0.getOperand(1) == N1) { 2865 X = N0.getOperand(0); 2866 Y = N0.getOperand(1); 2867 } else { 2868 return SDValue(); 2869 } 2870 2871 SelectionDAG &DAG = DCI.DAG; 2872 SDValue Zero = DAG.getConstant(0, DL, OpVT); 2873 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 2874 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 2875 // Note that where Y is variable and is known to have at most one bit set 2876 // (for example, if it is Z & 1) we cannot do this; the expressions are not 2877 // equivalent when Y == 0. 2878 assert(OpVT.isInteger()); 2879 Cond = ISD::getSetCCInverse(Cond, OpVT); 2880 if (DCI.isBeforeLegalizeOps() || 2881 isCondCodeLegal(Cond, N0.getSimpleValueType())) 2882 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 2883 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 2884 // If the target supports an 'and-not' or 'and-complement' logic operation, 2885 // try to use that to make a comparison operation more efficient. 2886 // But don't do this transform if the mask is a single bit because there are 2887 // more efficient ways to deal with that case (for example, 'bt' on x86 or 2888 // 'rlwinm' on PPC). 2889 2890 // Bail out if the compare operand that we want to turn into a zero is 2891 // already a zero (otherwise, infinite loop). 2892 auto *YConst = dyn_cast<ConstantSDNode>(Y); 2893 if (YConst && YConst->isNullValue()) 2894 return SDValue(); 2895 2896 // Transform this into: ~X & Y == 0. 2897 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 2898 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 2899 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 2900 } 2901 2902 return SDValue(); 2903 } 2904 2905 /// There are multiple IR patterns that could be checking whether certain 2906 /// truncation of a signed number would be lossy or not. The pattern which is 2907 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 2908 /// We are looking for the following pattern: (KeptBits is a constant) 2909 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 2910 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 2911 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 2912 /// We will unfold it into the natural trunc+sext pattern: 2913 /// ((%x << C) a>> C) dstcond %x 2914 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 2915 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 2916 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 2917 const SDLoc &DL) const { 2918 // We must be comparing with a constant. 2919 ConstantSDNode *C1; 2920 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 2921 return SDValue(); 2922 2923 // N0 should be: add %x, (1 << (KeptBits-1)) 2924 if (N0->getOpcode() != ISD::ADD) 2925 return SDValue(); 2926 2927 // And we must be 'add'ing a constant. 2928 ConstantSDNode *C01; 2929 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 2930 return SDValue(); 2931 2932 SDValue X = N0->getOperand(0); 2933 EVT XVT = X.getValueType(); 2934 2935 // Validate constants ... 2936 2937 APInt I1 = C1->getAPIntValue(); 2938 2939 ISD::CondCode NewCond; 2940 if (Cond == ISD::CondCode::SETULT) { 2941 NewCond = ISD::CondCode::SETEQ; 2942 } else if (Cond == ISD::CondCode::SETULE) { 2943 NewCond = ISD::CondCode::SETEQ; 2944 // But need to 'canonicalize' the constant. 2945 I1 += 1; 2946 } else if (Cond == ISD::CondCode::SETUGT) { 2947 NewCond = ISD::CondCode::SETNE; 2948 // But need to 'canonicalize' the constant. 2949 I1 += 1; 2950 } else if (Cond == ISD::CondCode::SETUGE) { 2951 NewCond = ISD::CondCode::SETNE; 2952 } else 2953 return SDValue(); 2954 2955 APInt I01 = C01->getAPIntValue(); 2956 2957 auto checkConstants = [&I1, &I01]() -> bool { 2958 // Both of them must be power-of-two, and the constant from setcc is bigger. 2959 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 2960 }; 2961 2962 if (checkConstants()) { 2963 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 2964 } else { 2965 // What if we invert constants? (and the target predicate) 2966 I1.negate(); 2967 I01.negate(); 2968 assert(XVT.isInteger()); 2969 NewCond = getSetCCInverse(NewCond, XVT); 2970 if (!checkConstants()) 2971 return SDValue(); 2972 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 2973 } 2974 2975 // They are power-of-two, so which bit is set? 2976 const unsigned KeptBits = I1.logBase2(); 2977 const unsigned KeptBitsMinusOne = I01.logBase2(); 2978 2979 // Magic! 2980 if (KeptBits != (KeptBitsMinusOne + 1)) 2981 return SDValue(); 2982 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 2983 2984 // We don't want to do this in every single case. 2985 SelectionDAG &DAG = DCI.DAG; 2986 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 2987 XVT, KeptBits)) 2988 return SDValue(); 2989 2990 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 2991 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 2992 2993 // Unfold into: ((%x << C) a>> C) cond %x 2994 // Where 'cond' will be either 'eq' or 'ne'. 2995 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 2996 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 2997 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 2998 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 2999 3000 return T2; 3001 } 3002 3003 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3004 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3005 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3006 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3007 assert(isConstOrConstSplat(N1C) && 3008 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && 3009 "Should be a comparison with 0."); 3010 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3011 "Valid only for [in]equality comparisons."); 3012 3013 unsigned NewShiftOpcode; 3014 SDValue X, C, Y; 3015 3016 SelectionDAG &DAG = DCI.DAG; 3017 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3018 3019 // Look for '(C l>>/<< Y)'. 3020 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3021 // The shift should be one-use. 3022 if (!V.hasOneUse()) 3023 return false; 3024 unsigned OldShiftOpcode = V.getOpcode(); 3025 switch (OldShiftOpcode) { 3026 case ISD::SHL: 3027 NewShiftOpcode = ISD::SRL; 3028 break; 3029 case ISD::SRL: 3030 NewShiftOpcode = ISD::SHL; 3031 break; 3032 default: 3033 return false; // must be a logical shift. 3034 } 3035 // We should be shifting a constant. 3036 // FIXME: best to use isConstantOrConstantVector(). 3037 C = V.getOperand(0); 3038 ConstantSDNode *CC = 3039 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3040 if (!CC) 3041 return false; 3042 Y = V.getOperand(1); 3043 3044 ConstantSDNode *XC = 3045 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3046 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3047 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3048 }; 3049 3050 // LHS of comparison should be an one-use 'and'. 3051 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3052 return SDValue(); 3053 3054 X = N0.getOperand(0); 3055 SDValue Mask = N0.getOperand(1); 3056 3057 // 'and' is commutative! 3058 if (!Match(Mask)) { 3059 std::swap(X, Mask); 3060 if (!Match(Mask)) 3061 return SDValue(); 3062 } 3063 3064 EVT VT = X.getValueType(); 3065 3066 // Produce: 3067 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3068 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3069 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3070 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3071 return T2; 3072 } 3073 3074 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3075 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3076 /// handle the commuted versions of these patterns. 3077 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3078 ISD::CondCode Cond, const SDLoc &DL, 3079 DAGCombinerInfo &DCI) const { 3080 unsigned BOpcode = N0.getOpcode(); 3081 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3082 "Unexpected binop"); 3083 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3084 3085 // (X + Y) == X --> Y == 0 3086 // (X - Y) == X --> Y == 0 3087 // (X ^ Y) == X --> Y == 0 3088 SelectionDAG &DAG = DCI.DAG; 3089 EVT OpVT = N0.getValueType(); 3090 SDValue X = N0.getOperand(0); 3091 SDValue Y = N0.getOperand(1); 3092 if (X == N1) 3093 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3094 3095 if (Y != N1) 3096 return SDValue(); 3097 3098 // (X + Y) == Y --> X == 0 3099 // (X ^ Y) == Y --> X == 0 3100 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3101 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3102 3103 // The shift would not be valid if the operands are boolean (i1). 3104 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3105 return SDValue(); 3106 3107 // (X - Y) == Y --> X == Y << 1 3108 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3109 !DCI.isBeforeLegalize()); 3110 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3111 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3112 if (!DCI.isCalledByLegalizer()) 3113 DCI.AddToWorklist(YShl1.getNode()); 3114 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3115 } 3116 3117 /// Try to simplify a setcc built with the specified operands and cc. If it is 3118 /// unable to simplify it, return a null SDValue. 3119 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3120 ISD::CondCode Cond, bool foldBooleans, 3121 DAGCombinerInfo &DCI, 3122 const SDLoc &dl) const { 3123 SelectionDAG &DAG = DCI.DAG; 3124 const DataLayout &Layout = DAG.getDataLayout(); 3125 EVT OpVT = N0.getValueType(); 3126 3127 // Constant fold or commute setcc. 3128 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3129 return Fold; 3130 3131 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3132 // TODO: Handle non-splat vector constants. All undef causes trouble. 3133 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3134 if (isConstOrConstSplat(N0) && 3135 (DCI.isBeforeLegalizeOps() || 3136 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3137 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3138 3139 // If we have a subtract with the same 2 non-constant operands as this setcc 3140 // -- but in reverse order -- then try to commute the operands of this setcc 3141 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3142 // instruction on some targets. 3143 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3144 (DCI.isBeforeLegalizeOps() || 3145 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3146 DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) && 3147 !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } )) 3148 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3149 3150 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3151 const APInt &C1 = N1C->getAPIntValue(); 3152 3153 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3154 // equality comparison, then we're just comparing whether X itself is 3155 // zero. 3156 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && 3157 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3158 N0.getOperand(1).getOpcode() == ISD::Constant) { 3159 const APInt &ShAmt = N0.getConstantOperandAPInt(1); 3160 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3161 ShAmt == Log2_32(N0.getValueSizeInBits())) { 3162 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3163 // (srl (ctlz x), 5) == 0 -> X != 0 3164 // (srl (ctlz x), 5) != 1 -> X != 0 3165 Cond = ISD::SETNE; 3166 } else { 3167 // (srl (ctlz x), 5) != 0 -> X == 0 3168 // (srl (ctlz x), 5) == 1 -> X == 0 3169 Cond = ISD::SETEQ; 3170 } 3171 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3172 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 3173 Zero, Cond); 3174 } 3175 } 3176 3177 SDValue CTPOP = N0; 3178 // Look through truncs that don't change the value of a ctpop. 3179 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE) 3180 CTPOP = N0.getOperand(0); 3181 3182 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && 3183 (N0 == CTPOP || 3184 N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) { 3185 EVT CTVT = CTPOP.getValueType(); 3186 SDValue CTOp = CTPOP.getOperand(0); 3187 3188 // (ctpop x) u< 2 -> (x & x-1) == 0 3189 // (ctpop x) u> 1 -> (x & x-1) != 0 3190 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){ 3191 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3192 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3193 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3194 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3195 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC); 3196 } 3197 3198 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3199 if (C1 == 1 && !isOperationLegalOrCustom(ISD::CTPOP, CTVT) && 3200 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3201 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3202 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3203 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3204 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3205 assert(CTVT.isInteger()); 3206 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3207 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3208 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3209 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3210 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3211 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3212 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3213 } 3214 } 3215 3216 // (zext x) == C --> x == (trunc C) 3217 // (sext x) == C --> x == (trunc C) 3218 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3219 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3220 unsigned MinBits = N0.getValueSizeInBits(); 3221 SDValue PreExt; 3222 bool Signed = false; 3223 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3224 // ZExt 3225 MinBits = N0->getOperand(0).getValueSizeInBits(); 3226 PreExt = N0->getOperand(0); 3227 } else if (N0->getOpcode() == ISD::AND) { 3228 // DAGCombine turns costly ZExts into ANDs 3229 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3230 if ((C->getAPIntValue()+1).isPowerOf2()) { 3231 MinBits = C->getAPIntValue().countTrailingOnes(); 3232 PreExt = N0->getOperand(0); 3233 } 3234 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3235 // SExt 3236 MinBits = N0->getOperand(0).getValueSizeInBits(); 3237 PreExt = N0->getOperand(0); 3238 Signed = true; 3239 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3240 // ZEXTLOAD / SEXTLOAD 3241 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3242 MinBits = LN0->getMemoryVT().getSizeInBits(); 3243 PreExt = N0; 3244 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3245 Signed = true; 3246 MinBits = LN0->getMemoryVT().getSizeInBits(); 3247 PreExt = N0; 3248 } 3249 } 3250 3251 // Figure out how many bits we need to preserve this constant. 3252 unsigned ReqdBits = Signed ? 3253 C1.getBitWidth() - C1.getNumSignBits() + 1 : 3254 C1.getActiveBits(); 3255 3256 // Make sure we're not losing bits from the constant. 3257 if (MinBits > 0 && 3258 MinBits < C1.getBitWidth() && 3259 MinBits >= ReqdBits) { 3260 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3261 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3262 // Will get folded away. 3263 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3264 if (MinBits == 1 && C1 == 1) 3265 // Invert the condition. 3266 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3267 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3268 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3269 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3270 } 3271 3272 // If truncating the setcc operands is not desirable, we can still 3273 // simplify the expression in some cases: 3274 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3275 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3276 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3277 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3278 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3279 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3280 SDValue TopSetCC = N0->getOperand(0); 3281 unsigned N0Opc = N0->getOpcode(); 3282 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3283 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3284 TopSetCC.getOpcode() == ISD::SETCC && 3285 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3286 (isConstFalseVal(N1C) || 3287 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3288 3289 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) || 3290 (!N1C->isNullValue() && Cond == ISD::SETNE); 3291 3292 if (!Inverse) 3293 return TopSetCC; 3294 3295 ISD::CondCode InvCond = ISD::getSetCCInverse( 3296 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3297 TopSetCC.getOperand(0).getValueType()); 3298 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3299 TopSetCC.getOperand(1), 3300 InvCond); 3301 } 3302 } 3303 } 3304 3305 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3306 // equality or unsigned, and all 1 bits of the const are in the same 3307 // partial word, see if we can shorten the load. 3308 if (DCI.isBeforeLegalize() && 3309 !ISD::isSignedIntSetCC(Cond) && 3310 N0.getOpcode() == ISD::AND && C1 == 0 && 3311 N0.getNode()->hasOneUse() && 3312 isa<LoadSDNode>(N0.getOperand(0)) && 3313 N0.getOperand(0).getNode()->hasOneUse() && 3314 isa<ConstantSDNode>(N0.getOperand(1))) { 3315 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3316 APInt bestMask; 3317 unsigned bestWidth = 0, bestOffset = 0; 3318 if (Lod->isSimple() && Lod->isUnindexed()) { 3319 unsigned origWidth = N0.getValueSizeInBits(); 3320 unsigned maskWidth = origWidth; 3321 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3322 // 8 bits, but have to be careful... 3323 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3324 origWidth = Lod->getMemoryVT().getSizeInBits(); 3325 const APInt &Mask = N0.getConstantOperandAPInt(1); 3326 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3327 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3328 for (unsigned offset=0; offset<origWidth/width; offset++) { 3329 if (Mask.isSubsetOf(newMask)) { 3330 if (Layout.isLittleEndian()) 3331 bestOffset = (uint64_t)offset * (width/8); 3332 else 3333 bestOffset = (origWidth/width - offset - 1) * (width/8); 3334 bestMask = Mask.lshr(offset * (width/8) * 8); 3335 bestWidth = width; 3336 break; 3337 } 3338 newMask <<= width; 3339 } 3340 } 3341 } 3342 if (bestWidth) { 3343 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3344 if (newVT.isRound() && 3345 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3346 SDValue Ptr = Lod->getBasePtr(); 3347 if (bestOffset != 0) 3348 Ptr = DAG.getMemBasePlusOffset(Ptr, bestOffset, dl); 3349 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 3350 SDValue NewLoad = DAG.getLoad( 3351 newVT, dl, Lod->getChain(), Ptr, 3352 Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign); 3353 return DAG.getSetCC(dl, VT, 3354 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3355 DAG.getConstant(bestMask.trunc(bestWidth), 3356 dl, newVT)), 3357 DAG.getConstant(0LL, dl, newVT), Cond); 3358 } 3359 } 3360 } 3361 3362 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3363 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3364 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3365 3366 // If the comparison constant has bits in the upper part, the 3367 // zero-extended value could never match. 3368 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3369 C1.getBitWidth() - InSize))) { 3370 switch (Cond) { 3371 case ISD::SETUGT: 3372 case ISD::SETUGE: 3373 case ISD::SETEQ: 3374 return DAG.getConstant(0, dl, VT); 3375 case ISD::SETULT: 3376 case ISD::SETULE: 3377 case ISD::SETNE: 3378 return DAG.getConstant(1, dl, VT); 3379 case ISD::SETGT: 3380 case ISD::SETGE: 3381 // True if the sign bit of C1 is set. 3382 return DAG.getConstant(C1.isNegative(), dl, VT); 3383 case ISD::SETLT: 3384 case ISD::SETLE: 3385 // True if the sign bit of C1 isn't set. 3386 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3387 default: 3388 break; 3389 } 3390 } 3391 3392 // Otherwise, we can perform the comparison with the low bits. 3393 switch (Cond) { 3394 case ISD::SETEQ: 3395 case ISD::SETNE: 3396 case ISD::SETUGT: 3397 case ISD::SETUGE: 3398 case ISD::SETULT: 3399 case ISD::SETULE: { 3400 EVT newVT = N0.getOperand(0).getValueType(); 3401 if (DCI.isBeforeLegalizeOps() || 3402 (isOperationLegal(ISD::SETCC, newVT) && 3403 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3404 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3405 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3406 3407 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3408 NewConst, Cond); 3409 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3410 } 3411 break; 3412 } 3413 default: 3414 break; // todo, be more careful with signed comparisons 3415 } 3416 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3417 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3418 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3419 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3420 EVT ExtDstTy = N0.getValueType(); 3421 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3422 3423 // If the constant doesn't fit into the number of bits for the source of 3424 // the sign extension, it is impossible for both sides to be equal. 3425 if (C1.getMinSignedBits() > ExtSrcTyBits) 3426 return DAG.getConstant(Cond == ISD::SETNE, dl, VT); 3427 3428 SDValue ZextOp; 3429 EVT Op0Ty = N0.getOperand(0).getValueType(); 3430 if (Op0Ty == ExtSrcTy) { 3431 ZextOp = N0.getOperand(0); 3432 } else { 3433 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3434 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 3435 DAG.getConstant(Imm, dl, Op0Ty)); 3436 } 3437 if (!DCI.isCalledByLegalizer()) 3438 DCI.AddToWorklist(ZextOp.getNode()); 3439 // Otherwise, make this a use of a zext. 3440 return DAG.getSetCC(dl, VT, ZextOp, 3441 DAG.getConstant(C1 & APInt::getLowBitsSet( 3442 ExtDstTyBits, 3443 ExtSrcTyBits), 3444 dl, ExtDstTy), 3445 Cond); 3446 } else if ((N1C->isNullValue() || N1C->isOne()) && 3447 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3448 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3449 if (N0.getOpcode() == ISD::SETCC && 3450 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3451 (N0.getValueType() == MVT::i1 || 3452 getBooleanContents(N0.getOperand(0).getValueType()) == 3453 ZeroOrOneBooleanContent)) { 3454 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3455 if (TrueWhenTrue) 3456 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3457 // Invert the condition. 3458 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3459 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3460 if (DCI.isBeforeLegalizeOps() || 3461 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3462 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3463 } 3464 3465 if ((N0.getOpcode() == ISD::XOR || 3466 (N0.getOpcode() == ISD::AND && 3467 N0.getOperand(0).getOpcode() == ISD::XOR && 3468 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3469 isa<ConstantSDNode>(N0.getOperand(1)) && 3470 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) { 3471 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3472 // can only do this if the top bits are known zero. 3473 unsigned BitWidth = N0.getValueSizeInBits(); 3474 if (DAG.MaskedValueIsZero(N0, 3475 APInt::getHighBitsSet(BitWidth, 3476 BitWidth-1))) { 3477 // Okay, get the un-inverted input value. 3478 SDValue Val; 3479 if (N0.getOpcode() == ISD::XOR) { 3480 Val = N0.getOperand(0); 3481 } else { 3482 assert(N0.getOpcode() == ISD::AND && 3483 N0.getOperand(0).getOpcode() == ISD::XOR); 3484 // ((X^1)&1)^1 -> X & 1 3485 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3486 N0.getOperand(0).getOperand(0), 3487 N0.getOperand(1)); 3488 } 3489 3490 return DAG.getSetCC(dl, VT, Val, N1, 3491 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3492 } 3493 } else if (N1C->isOne()) { 3494 SDValue Op0 = N0; 3495 if (Op0.getOpcode() == ISD::TRUNCATE) 3496 Op0 = Op0.getOperand(0); 3497 3498 if ((Op0.getOpcode() == ISD::XOR) && 3499 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3500 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3501 SDValue XorLHS = Op0.getOperand(0); 3502 SDValue XorRHS = Op0.getOperand(1); 3503 // Ensure that the input setccs return an i1 type or 0/1 value. 3504 if (Op0.getValueType() == MVT::i1 || 3505 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3506 ZeroOrOneBooleanContent && 3507 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3508 ZeroOrOneBooleanContent)) { 3509 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3510 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3511 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3512 } 3513 } 3514 if (Op0.getOpcode() == ISD::AND && 3515 isa<ConstantSDNode>(Op0.getOperand(1)) && 3516 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) { 3517 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3518 if (Op0.getValueType().bitsGT(VT)) 3519 Op0 = DAG.getNode(ISD::AND, dl, VT, 3520 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3521 DAG.getConstant(1, dl, VT)); 3522 else if (Op0.getValueType().bitsLT(VT)) 3523 Op0 = DAG.getNode(ISD::AND, dl, VT, 3524 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3525 DAG.getConstant(1, dl, VT)); 3526 3527 return DAG.getSetCC(dl, VT, Op0, 3528 DAG.getConstant(0, dl, Op0.getValueType()), 3529 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3530 } 3531 if (Op0.getOpcode() == ISD::AssertZext && 3532 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3533 return DAG.getSetCC(dl, VT, Op0, 3534 DAG.getConstant(0, dl, Op0.getValueType()), 3535 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3536 } 3537 } 3538 3539 // Given: 3540 // icmp eq/ne (urem %x, %y), 0 3541 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3542 // icmp eq/ne %x, 0 3543 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() && 3544 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3545 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3546 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3547 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3548 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3549 } 3550 3551 if (SDValue V = 3552 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 3553 return V; 3554 } 3555 3556 // These simplifications apply to splat vectors as well. 3557 // TODO: Handle more splat vector cases. 3558 if (auto *N1C = isConstOrConstSplat(N1)) { 3559 const APInt &C1 = N1C->getAPIntValue(); 3560 3561 APInt MinVal, MaxVal; 3562 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 3563 if (ISD::isSignedIntSetCC(Cond)) { 3564 MinVal = APInt::getSignedMinValue(OperandBitSize); 3565 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 3566 } else { 3567 MinVal = APInt::getMinValue(OperandBitSize); 3568 MaxVal = APInt::getMaxValue(OperandBitSize); 3569 } 3570 3571 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 3572 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 3573 // X >= MIN --> true 3574 if (C1 == MinVal) 3575 return DAG.getBoolConstant(true, dl, VT, OpVT); 3576 3577 if (!VT.isVector()) { // TODO: Support this for vectors. 3578 // X >= C0 --> X > (C0 - 1) 3579 APInt C = C1 - 1; 3580 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 3581 if ((DCI.isBeforeLegalizeOps() || 3582 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3583 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3584 isLegalICmpImmediate(C.getSExtValue())))) { 3585 return DAG.getSetCC(dl, VT, N0, 3586 DAG.getConstant(C, dl, N1.getValueType()), 3587 NewCC); 3588 } 3589 } 3590 } 3591 3592 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 3593 // X <= MAX --> true 3594 if (C1 == MaxVal) 3595 return DAG.getBoolConstant(true, dl, VT, OpVT); 3596 3597 // X <= C0 --> X < (C0 + 1) 3598 if (!VT.isVector()) { // TODO: Support this for vectors. 3599 APInt C = C1 + 1; 3600 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 3601 if ((DCI.isBeforeLegalizeOps() || 3602 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3603 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3604 isLegalICmpImmediate(C.getSExtValue())))) { 3605 return DAG.getSetCC(dl, VT, N0, 3606 DAG.getConstant(C, dl, N1.getValueType()), 3607 NewCC); 3608 } 3609 } 3610 } 3611 3612 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 3613 if (C1 == MinVal) 3614 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 3615 3616 // TODO: Support this for vectors after legalize ops. 3617 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3618 // Canonicalize setlt X, Max --> setne X, Max 3619 if (C1 == MaxVal) 3620 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3621 3622 // If we have setult X, 1, turn it into seteq X, 0 3623 if (C1 == MinVal+1) 3624 return DAG.getSetCC(dl, VT, N0, 3625 DAG.getConstant(MinVal, dl, N0.getValueType()), 3626 ISD::SETEQ); 3627 } 3628 } 3629 3630 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 3631 if (C1 == MaxVal) 3632 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 3633 3634 // TODO: Support this for vectors after legalize ops. 3635 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3636 // Canonicalize setgt X, Min --> setne X, Min 3637 if (C1 == MinVal) 3638 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3639 3640 // If we have setugt X, Max-1, turn it into seteq X, Max 3641 if (C1 == MaxVal-1) 3642 return DAG.getSetCC(dl, VT, N0, 3643 DAG.getConstant(MaxVal, dl, N0.getValueType()), 3644 ISD::SETEQ); 3645 } 3646 } 3647 3648 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 3649 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3650 if (C1.isNullValue()) 3651 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 3652 VT, N0, N1, Cond, DCI, dl)) 3653 return CC; 3654 } 3655 3656 // If we have "setcc X, C0", check to see if we can shrink the immediate 3657 // by changing cc. 3658 // TODO: Support this for vectors after legalize ops. 3659 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3660 // SETUGT X, SINTMAX -> SETLT X, 0 3661 if (Cond == ISD::SETUGT && 3662 C1 == APInt::getSignedMaxValue(OperandBitSize)) 3663 return DAG.getSetCC(dl, VT, N0, 3664 DAG.getConstant(0, dl, N1.getValueType()), 3665 ISD::SETLT); 3666 3667 // SETULT X, SINTMIN -> SETGT X, -1 3668 if (Cond == ISD::SETULT && 3669 C1 == APInt::getSignedMinValue(OperandBitSize)) { 3670 SDValue ConstMinusOne = 3671 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl, 3672 N1.getValueType()); 3673 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 3674 } 3675 } 3676 } 3677 3678 // Back to non-vector simplifications. 3679 // TODO: Can we do these for vector splats? 3680 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3681 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3682 const APInt &C1 = N1C->getAPIntValue(); 3683 EVT ShValTy = N0.getValueType(); 3684 3685 // Fold bit comparisons when we can. 3686 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3687 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 3688 N0.getOpcode() == ISD::AND) { 3689 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3690 EVT ShiftTy = 3691 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3692 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 3693 // Perform the xform if the AND RHS is a single bit. 3694 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 3695 if (AndRHS->getAPIntValue().isPowerOf2() && 3696 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3697 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3698 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3699 DAG.getConstant(ShCt, dl, ShiftTy))); 3700 } 3701 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 3702 // (X & 8) == 8 --> (X & 8) >> 3 3703 // Perform the xform if C1 is a single bit. 3704 unsigned ShCt = C1.logBase2(); 3705 if (C1.isPowerOf2() && 3706 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3707 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3708 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3709 DAG.getConstant(ShCt, dl, ShiftTy))); 3710 } 3711 } 3712 } 3713 } 3714 3715 if (C1.getMinSignedBits() <= 64 && 3716 !isLegalICmpImmediate(C1.getSExtValue())) { 3717 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3718 // (X & -256) == 256 -> (X >> 8) == 1 3719 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3720 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 3721 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3722 const APInt &AndRHSC = AndRHS->getAPIntValue(); 3723 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) { 3724 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 3725 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3726 SDValue Shift = 3727 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 3728 DAG.getConstant(ShiftBits, dl, ShiftTy)); 3729 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 3730 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 3731 } 3732 } 3733 } 3734 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 3735 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 3736 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 3737 // X < 0x100000000 -> (X >> 32) < 1 3738 // X >= 0x100000000 -> (X >> 32) >= 1 3739 // X <= 0x0ffffffff -> (X >> 32) < 1 3740 // X > 0x0ffffffff -> (X >> 32) >= 1 3741 unsigned ShiftBits; 3742 APInt NewC = C1; 3743 ISD::CondCode NewCond = Cond; 3744 if (AdjOne) { 3745 ShiftBits = C1.countTrailingOnes(); 3746 NewC = NewC + 1; 3747 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3748 } else { 3749 ShiftBits = C1.countTrailingZeros(); 3750 } 3751 NewC.lshrInPlace(ShiftBits); 3752 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 3753 isLegalICmpImmediate(NewC.getSExtValue()) && 3754 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3755 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3756 DAG.getConstant(ShiftBits, dl, ShiftTy)); 3757 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 3758 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 3759 } 3760 } 3761 } 3762 } 3763 3764 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 3765 auto *CFP = cast<ConstantFPSDNode>(N1); 3766 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 3767 3768 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 3769 // constant if knowing that the operand is non-nan is enough. We prefer to 3770 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 3771 // materialize 0.0. 3772 if (Cond == ISD::SETO || Cond == ISD::SETUO) 3773 return DAG.getSetCC(dl, VT, N0, N0, Cond); 3774 3775 // setcc (fneg x), C -> setcc swap(pred) x, -C 3776 if (N0.getOpcode() == ISD::FNEG) { 3777 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 3778 if (DCI.isBeforeLegalizeOps() || 3779 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 3780 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 3781 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 3782 } 3783 } 3784 3785 // If the condition is not legal, see if we can find an equivalent one 3786 // which is legal. 3787 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 3788 // If the comparison was an awkward floating-point == or != and one of 3789 // the comparison operands is infinity or negative infinity, convert the 3790 // condition to a less-awkward <= or >=. 3791 if (CFP->getValueAPF().isInfinity()) { 3792 if (CFP->getValueAPF().isNegative()) { 3793 if (Cond == ISD::SETOEQ && 3794 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType())) 3795 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE); 3796 if (Cond == ISD::SETUEQ && 3797 isCondCodeLegal(ISD::SETOLE, N0.getSimpleValueType())) 3798 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE); 3799 if (Cond == ISD::SETUNE && 3800 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType())) 3801 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT); 3802 if (Cond == ISD::SETONE && 3803 isCondCodeLegal(ISD::SETUGT, N0.getSimpleValueType())) 3804 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT); 3805 } else { 3806 if (Cond == ISD::SETOEQ && 3807 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType())) 3808 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE); 3809 if (Cond == ISD::SETUEQ && 3810 isCondCodeLegal(ISD::SETOGE, N0.getSimpleValueType())) 3811 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE); 3812 if (Cond == ISD::SETUNE && 3813 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType())) 3814 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT); 3815 if (Cond == ISD::SETONE && 3816 isCondCodeLegal(ISD::SETULT, N0.getSimpleValueType())) 3817 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT); 3818 } 3819 } 3820 } 3821 } 3822 3823 if (N0 == N1) { 3824 // The sext(setcc()) => setcc() optimization relies on the appropriate 3825 // constant being emitted. 3826 assert(!N0.getValueType().isInteger() && 3827 "Integer types should be handled by FoldSetCC"); 3828 3829 bool EqTrue = ISD::isTrueWhenEqual(Cond); 3830 unsigned UOF = ISD::getUnorderedFlavor(Cond); 3831 if (UOF == 2) // FP operators that are undefined on NaNs. 3832 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 3833 if (UOF == unsigned(EqTrue)) 3834 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 3835 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 3836 // if it is not already. 3837 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 3838 if (NewCond != Cond && 3839 (DCI.isBeforeLegalizeOps() || 3840 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 3841 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 3842 } 3843 3844 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3845 N0.getValueType().isInteger()) { 3846 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 3847 N0.getOpcode() == ISD::XOR) { 3848 // Simplify (X+Y) == (X+Z) --> Y == Z 3849 if (N0.getOpcode() == N1.getOpcode()) { 3850 if (N0.getOperand(0) == N1.getOperand(0)) 3851 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 3852 if (N0.getOperand(1) == N1.getOperand(1)) 3853 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 3854 if (isCommutativeBinOp(N0.getOpcode())) { 3855 // If X op Y == Y op X, try other combinations. 3856 if (N0.getOperand(0) == N1.getOperand(1)) 3857 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 3858 Cond); 3859 if (N0.getOperand(1) == N1.getOperand(0)) 3860 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 3861 Cond); 3862 } 3863 } 3864 3865 // If RHS is a legal immediate value for a compare instruction, we need 3866 // to be careful about increasing register pressure needlessly. 3867 bool LegalRHSImm = false; 3868 3869 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 3870 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3871 // Turn (X+C1) == C2 --> X == C2-C1 3872 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 3873 return DAG.getSetCC(dl, VT, N0.getOperand(0), 3874 DAG.getConstant(RHSC->getAPIntValue()- 3875 LHSR->getAPIntValue(), 3876 dl, N0.getValueType()), Cond); 3877 } 3878 3879 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 3880 if (N0.getOpcode() == ISD::XOR) 3881 // If we know that all of the inverted bits are zero, don't bother 3882 // performing the inversion. 3883 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 3884 return 3885 DAG.getSetCC(dl, VT, N0.getOperand(0), 3886 DAG.getConstant(LHSR->getAPIntValue() ^ 3887 RHSC->getAPIntValue(), 3888 dl, N0.getValueType()), 3889 Cond); 3890 } 3891 3892 // Turn (C1-X) == C2 --> X == C1-C2 3893 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 3894 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 3895 return 3896 DAG.getSetCC(dl, VT, N0.getOperand(1), 3897 DAG.getConstant(SUBC->getAPIntValue() - 3898 RHSC->getAPIntValue(), 3899 dl, N0.getValueType()), 3900 Cond); 3901 } 3902 } 3903 3904 // Could RHSC fold directly into a compare? 3905 if (RHSC->getValueType(0).getSizeInBits() <= 64) 3906 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 3907 } 3908 3909 // (X+Y) == X --> Y == 0 and similar folds. 3910 // Don't do this if X is an immediate that can fold into a cmp 3911 // instruction and X+Y has other uses. It could be an induction variable 3912 // chain, and the transform would increase register pressure. 3913 if (!LegalRHSImm || N0.hasOneUse()) 3914 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 3915 return V; 3916 } 3917 3918 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 3919 N1.getOpcode() == ISD::XOR) 3920 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 3921 return V; 3922 3923 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 3924 return V; 3925 } 3926 3927 // Fold remainder of division by a constant. 3928 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 3929 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3930 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 3931 3932 // When division is cheap or optimizing for minimum size, 3933 // fall through to DIVREM creation by skipping this fold. 3934 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) { 3935 if (N0.getOpcode() == ISD::UREM) { 3936 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 3937 return Folded; 3938 } else if (N0.getOpcode() == ISD::SREM) { 3939 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 3940 return Folded; 3941 } 3942 } 3943 } 3944 3945 // Fold away ALL boolean setcc's. 3946 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 3947 SDValue Temp; 3948 switch (Cond) { 3949 default: llvm_unreachable("Unknown integer setcc!"); 3950 case ISD::SETEQ: // X == Y -> ~(X^Y) 3951 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 3952 N0 = DAG.getNOT(dl, Temp, OpVT); 3953 if (!DCI.isCalledByLegalizer()) 3954 DCI.AddToWorklist(Temp.getNode()); 3955 break; 3956 case ISD::SETNE: // X != Y --> (X^Y) 3957 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 3958 break; 3959 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 3960 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 3961 Temp = DAG.getNOT(dl, N0, OpVT); 3962 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 3963 if (!DCI.isCalledByLegalizer()) 3964 DCI.AddToWorklist(Temp.getNode()); 3965 break; 3966 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 3967 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 3968 Temp = DAG.getNOT(dl, N1, OpVT); 3969 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 3970 if (!DCI.isCalledByLegalizer()) 3971 DCI.AddToWorklist(Temp.getNode()); 3972 break; 3973 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 3974 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 3975 Temp = DAG.getNOT(dl, N0, OpVT); 3976 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 3977 if (!DCI.isCalledByLegalizer()) 3978 DCI.AddToWorklist(Temp.getNode()); 3979 break; 3980 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 3981 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 3982 Temp = DAG.getNOT(dl, N1, OpVT); 3983 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 3984 break; 3985 } 3986 if (VT.getScalarType() != MVT::i1) { 3987 if (!DCI.isCalledByLegalizer()) 3988 DCI.AddToWorklist(N0.getNode()); 3989 // FIXME: If running after legalize, we probably can't do this. 3990 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 3991 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 3992 } 3993 return N0; 3994 } 3995 3996 // Could not fold it. 3997 return SDValue(); 3998 } 3999 4000 /// Returns true (and the GlobalValue and the offset) if the node is a 4001 /// GlobalAddress + offset. 4002 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4003 int64_t &Offset) const { 4004 4005 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4006 4007 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4008 GA = GASD->getGlobal(); 4009 Offset += GASD->getOffset(); 4010 return true; 4011 } 4012 4013 if (N->getOpcode() == ISD::ADD) { 4014 SDValue N1 = N->getOperand(0); 4015 SDValue N2 = N->getOperand(1); 4016 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4017 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4018 Offset += V->getSExtValue(); 4019 return true; 4020 } 4021 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4022 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4023 Offset += V->getSExtValue(); 4024 return true; 4025 } 4026 } 4027 } 4028 4029 return false; 4030 } 4031 4032 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4033 DAGCombinerInfo &DCI) const { 4034 // Default implementation: no optimization. 4035 return SDValue(); 4036 } 4037 4038 //===----------------------------------------------------------------------===// 4039 // Inline Assembler Implementation Methods 4040 //===----------------------------------------------------------------------===// 4041 4042 TargetLowering::ConstraintType 4043 TargetLowering::getConstraintType(StringRef Constraint) const { 4044 unsigned S = Constraint.size(); 4045 4046 if (S == 1) { 4047 switch (Constraint[0]) { 4048 default: break; 4049 case 'r': 4050 return C_RegisterClass; 4051 case 'm': // memory 4052 case 'o': // offsetable 4053 case 'V': // not offsetable 4054 return C_Memory; 4055 case 'n': // Simple Integer 4056 case 'E': // Floating Point Constant 4057 case 'F': // Floating Point Constant 4058 return C_Immediate; 4059 case 'i': // Simple Integer or Relocatable Constant 4060 case 's': // Relocatable Constant 4061 case 'p': // Address. 4062 case 'X': // Allow ANY value. 4063 case 'I': // Target registers. 4064 case 'J': 4065 case 'K': 4066 case 'L': 4067 case 'M': 4068 case 'N': 4069 case 'O': 4070 case 'P': 4071 case '<': 4072 case '>': 4073 return C_Other; 4074 } 4075 } 4076 4077 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4078 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4079 return C_Memory; 4080 return C_Register; 4081 } 4082 return C_Unknown; 4083 } 4084 4085 /// Try to replace an X constraint, which matches anything, with another that 4086 /// has more specific requirements based on the type of the corresponding 4087 /// operand. 4088 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4089 if (ConstraintVT.isInteger()) 4090 return "r"; 4091 if (ConstraintVT.isFloatingPoint()) 4092 return "f"; // works for many targets 4093 return nullptr; 4094 } 4095 4096 SDValue TargetLowering::LowerAsmOutputForConstraint( 4097 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo, 4098 SelectionDAG &DAG) const { 4099 return SDValue(); 4100 } 4101 4102 /// Lower the specified operand into the Ops vector. 4103 /// If it is invalid, don't add anything to Ops. 4104 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4105 std::string &Constraint, 4106 std::vector<SDValue> &Ops, 4107 SelectionDAG &DAG) const { 4108 4109 if (Constraint.length() > 1) return; 4110 4111 char ConstraintLetter = Constraint[0]; 4112 switch (ConstraintLetter) { 4113 default: break; 4114 case 'X': // Allows any operand; labels (basic block) use this. 4115 if (Op.getOpcode() == ISD::BasicBlock || 4116 Op.getOpcode() == ISD::TargetBlockAddress) { 4117 Ops.push_back(Op); 4118 return; 4119 } 4120 LLVM_FALLTHROUGH; 4121 case 'i': // Simple Integer or Relocatable Constant 4122 case 'n': // Simple Integer 4123 case 's': { // Relocatable Constant 4124 4125 GlobalAddressSDNode *GA; 4126 ConstantSDNode *C; 4127 BlockAddressSDNode *BA; 4128 uint64_t Offset = 0; 4129 4130 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4131 // etc., since getelementpointer is variadic. We can't use 4132 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4133 // while in this case the GA may be furthest from the root node which is 4134 // likely an ISD::ADD. 4135 while (1) { 4136 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') { 4137 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4138 GA->getValueType(0), 4139 Offset + GA->getOffset())); 4140 return; 4141 } else if ((C = dyn_cast<ConstantSDNode>(Op)) && 4142 ConstraintLetter != 's') { 4143 // gcc prints these as sign extended. Sign extend value to 64 bits 4144 // now; without this it would get ZExt'd later in 4145 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4146 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4147 BooleanContent BCont = getBooleanContents(MVT::i64); 4148 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont) 4149 : ISD::SIGN_EXTEND; 4150 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() 4151 : C->getSExtValue(); 4152 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal, 4153 SDLoc(C), MVT::i64)); 4154 return; 4155 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && 4156 ConstraintLetter != 'n') { 4157 Ops.push_back(DAG.getTargetBlockAddress( 4158 BA->getBlockAddress(), BA->getValueType(0), 4159 Offset + BA->getOffset(), BA->getTargetFlags())); 4160 return; 4161 } else { 4162 const unsigned OpCode = Op.getOpcode(); 4163 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4164 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4165 Op = Op.getOperand(1); 4166 // Subtraction is not commutative. 4167 else if (OpCode == ISD::ADD && 4168 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4169 Op = Op.getOperand(0); 4170 else 4171 return; 4172 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4173 continue; 4174 } 4175 } 4176 return; 4177 } 4178 break; 4179 } 4180 } 4181 } 4182 4183 std::pair<unsigned, const TargetRegisterClass *> 4184 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4185 StringRef Constraint, 4186 MVT VT) const { 4187 if (Constraint.empty() || Constraint[0] != '{') 4188 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4189 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4190 4191 // Remove the braces from around the name. 4192 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4193 4194 std::pair<unsigned, const TargetRegisterClass *> R = 4195 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4196 4197 // Figure out which register class contains this reg. 4198 for (const TargetRegisterClass *RC : RI->regclasses()) { 4199 // If none of the value types for this register class are valid, we 4200 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4201 if (!isLegalRC(*RI, *RC)) 4202 continue; 4203 4204 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 4205 I != E; ++I) { 4206 if (RegName.equals_lower(RI->getRegAsmName(*I))) { 4207 std::pair<unsigned, const TargetRegisterClass *> S = 4208 std::make_pair(*I, RC); 4209 4210 // If this register class has the requested value type, return it, 4211 // otherwise keep searching and return the first class found 4212 // if no other is found which explicitly has the requested type. 4213 if (RI->isTypeLegalForClass(*RC, VT)) 4214 return S; 4215 if (!R.second) 4216 R = S; 4217 } 4218 } 4219 } 4220 4221 return R; 4222 } 4223 4224 //===----------------------------------------------------------------------===// 4225 // Constraint Selection. 4226 4227 /// Return true of this is an input operand that is a matching constraint like 4228 /// "4". 4229 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4230 assert(!ConstraintCode.empty() && "No known constraint!"); 4231 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4232 } 4233 4234 /// If this is an input matching constraint, this method returns the output 4235 /// operand it matches. 4236 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4237 assert(!ConstraintCode.empty() && "No known constraint!"); 4238 return atoi(ConstraintCode.c_str()); 4239 } 4240 4241 /// Split up the constraint string from the inline assembly value into the 4242 /// specific constraints and their prefixes, and also tie in the associated 4243 /// operand values. 4244 /// If this returns an empty vector, and if the constraint string itself 4245 /// isn't empty, there was an error parsing. 4246 TargetLowering::AsmOperandInfoVector 4247 TargetLowering::ParseConstraints(const DataLayout &DL, 4248 const TargetRegisterInfo *TRI, 4249 ImmutableCallSite CS) const { 4250 /// Information about all of the constraints. 4251 AsmOperandInfoVector ConstraintOperands; 4252 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 4253 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4254 4255 // Do a prepass over the constraints, canonicalizing them, and building up the 4256 // ConstraintOperands list. 4257 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4258 unsigned ResNo = 0; // ResNo - The result number of the next output. 4259 4260 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4261 ConstraintOperands.emplace_back(std::move(CI)); 4262 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4263 4264 // Update multiple alternative constraint count. 4265 if (OpInfo.multipleAlternatives.size() > maCount) 4266 maCount = OpInfo.multipleAlternatives.size(); 4267 4268 OpInfo.ConstraintVT = MVT::Other; 4269 4270 // Compute the value type for each operand. 4271 switch (OpInfo.Type) { 4272 case InlineAsm::isOutput: 4273 // Indirect outputs just consume an argument. 4274 if (OpInfo.isIndirect) { 4275 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 4276 break; 4277 } 4278 4279 // The return value of the call is this value. As such, there is no 4280 // corresponding argument. 4281 assert(!CS.getType()->isVoidTy() && 4282 "Bad inline asm!"); 4283 if (StructType *STy = dyn_cast<StructType>(CS.getType())) { 4284 OpInfo.ConstraintVT = 4285 getSimpleValueType(DL, STy->getElementType(ResNo)); 4286 } else { 4287 assert(ResNo == 0 && "Asm only has one result!"); 4288 OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType()); 4289 } 4290 ++ResNo; 4291 break; 4292 case InlineAsm::isInput: 4293 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 4294 break; 4295 case InlineAsm::isClobber: 4296 // Nothing to do. 4297 break; 4298 } 4299 4300 if (OpInfo.CallOperandVal) { 4301 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4302 if (OpInfo.isIndirect) { 4303 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 4304 if (!PtrTy) 4305 report_fatal_error("Indirect operand for inline asm not a pointer!"); 4306 OpTy = PtrTy->getElementType(); 4307 } 4308 4309 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4310 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4311 if (STy->getNumElements() == 1) 4312 OpTy = STy->getElementType(0); 4313 4314 // If OpTy is not a single value, it may be a struct/union that we 4315 // can tile with integers. 4316 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4317 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4318 switch (BitSize) { 4319 default: break; 4320 case 1: 4321 case 8: 4322 case 16: 4323 case 32: 4324 case 64: 4325 case 128: 4326 OpInfo.ConstraintVT = 4327 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4328 break; 4329 } 4330 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4331 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4332 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4333 } else { 4334 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4335 } 4336 } 4337 } 4338 4339 // If we have multiple alternative constraints, select the best alternative. 4340 if (!ConstraintOperands.empty()) { 4341 if (maCount) { 4342 unsigned bestMAIndex = 0; 4343 int bestWeight = -1; 4344 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4345 int weight = -1; 4346 unsigned maIndex; 4347 // Compute the sums of the weights for each alternative, keeping track 4348 // of the best (highest weight) one so far. 4349 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4350 int weightSum = 0; 4351 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4352 cIndex != eIndex; ++cIndex) { 4353 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4354 if (OpInfo.Type == InlineAsm::isClobber) 4355 continue; 4356 4357 // If this is an output operand with a matching input operand, 4358 // look up the matching input. If their types mismatch, e.g. one 4359 // is an integer, the other is floating point, or their sizes are 4360 // different, flag it as an maCantMatch. 4361 if (OpInfo.hasMatchingInput()) { 4362 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4363 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4364 if ((OpInfo.ConstraintVT.isInteger() != 4365 Input.ConstraintVT.isInteger()) || 4366 (OpInfo.ConstraintVT.getSizeInBits() != 4367 Input.ConstraintVT.getSizeInBits())) { 4368 weightSum = -1; // Can't match. 4369 break; 4370 } 4371 } 4372 } 4373 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4374 if (weight == -1) { 4375 weightSum = -1; 4376 break; 4377 } 4378 weightSum += weight; 4379 } 4380 // Update best. 4381 if (weightSum > bestWeight) { 4382 bestWeight = weightSum; 4383 bestMAIndex = maIndex; 4384 } 4385 } 4386 4387 // Now select chosen alternative in each constraint. 4388 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4389 cIndex != eIndex; ++cIndex) { 4390 AsmOperandInfo &cInfo = ConstraintOperands[cIndex]; 4391 if (cInfo.Type == InlineAsm::isClobber) 4392 continue; 4393 cInfo.selectAlternative(bestMAIndex); 4394 } 4395 } 4396 } 4397 4398 // Check and hook up tied operands, choose constraint code to use. 4399 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4400 cIndex != eIndex; ++cIndex) { 4401 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4402 4403 // If this is an output operand with a matching input operand, look up the 4404 // matching input. If their types mismatch, e.g. one is an integer, the 4405 // other is floating point, or their sizes are different, flag it as an 4406 // error. 4407 if (OpInfo.hasMatchingInput()) { 4408 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4409 4410 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4411 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4412 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4413 OpInfo.ConstraintVT); 4414 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4415 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4416 Input.ConstraintVT); 4417 if ((OpInfo.ConstraintVT.isInteger() != 4418 Input.ConstraintVT.isInteger()) || 4419 (MatchRC.second != InputRC.second)) { 4420 report_fatal_error("Unsupported asm: input constraint" 4421 " with a matching output constraint of" 4422 " incompatible type!"); 4423 } 4424 } 4425 } 4426 } 4427 4428 return ConstraintOperands; 4429 } 4430 4431 /// Return an integer indicating how general CT is. 4432 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4433 switch (CT) { 4434 case TargetLowering::C_Immediate: 4435 case TargetLowering::C_Other: 4436 case TargetLowering::C_Unknown: 4437 return 0; 4438 case TargetLowering::C_Register: 4439 return 1; 4440 case TargetLowering::C_RegisterClass: 4441 return 2; 4442 case TargetLowering::C_Memory: 4443 return 3; 4444 } 4445 llvm_unreachable("Invalid constraint type"); 4446 } 4447 4448 /// Examine constraint type and operand type and determine a weight value. 4449 /// This object must already have been set up with the operand type 4450 /// and the current alternative constraint selected. 4451 TargetLowering::ConstraintWeight 4452 TargetLowering::getMultipleConstraintMatchWeight( 4453 AsmOperandInfo &info, int maIndex) const { 4454 InlineAsm::ConstraintCodeVector *rCodes; 4455 if (maIndex >= (int)info.multipleAlternatives.size()) 4456 rCodes = &info.Codes; 4457 else 4458 rCodes = &info.multipleAlternatives[maIndex].Codes; 4459 ConstraintWeight BestWeight = CW_Invalid; 4460 4461 // Loop over the options, keeping track of the most general one. 4462 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 4463 ConstraintWeight weight = 4464 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 4465 if (weight > BestWeight) 4466 BestWeight = weight; 4467 } 4468 4469 return BestWeight; 4470 } 4471 4472 /// Examine constraint type and operand type and determine a weight value. 4473 /// This object must already have been set up with the operand type 4474 /// and the current alternative constraint selected. 4475 TargetLowering::ConstraintWeight 4476 TargetLowering::getSingleConstraintMatchWeight( 4477 AsmOperandInfo &info, const char *constraint) const { 4478 ConstraintWeight weight = CW_Invalid; 4479 Value *CallOperandVal = info.CallOperandVal; 4480 // If we don't have a value, we can't do a match, 4481 // but allow it at the lowest weight. 4482 if (!CallOperandVal) 4483 return CW_Default; 4484 // Look at the constraint type. 4485 switch (*constraint) { 4486 case 'i': // immediate integer. 4487 case 'n': // immediate integer with a known value. 4488 if (isa<ConstantInt>(CallOperandVal)) 4489 weight = CW_Constant; 4490 break; 4491 case 's': // non-explicit intregal immediate. 4492 if (isa<GlobalValue>(CallOperandVal)) 4493 weight = CW_Constant; 4494 break; 4495 case 'E': // immediate float if host format. 4496 case 'F': // immediate float. 4497 if (isa<ConstantFP>(CallOperandVal)) 4498 weight = CW_Constant; 4499 break; 4500 case '<': // memory operand with autodecrement. 4501 case '>': // memory operand with autoincrement. 4502 case 'm': // memory operand. 4503 case 'o': // offsettable memory operand 4504 case 'V': // non-offsettable memory operand 4505 weight = CW_Memory; 4506 break; 4507 case 'r': // general register. 4508 case 'g': // general register, memory operand or immediate integer. 4509 // note: Clang converts "g" to "imr". 4510 if (CallOperandVal->getType()->isIntegerTy()) 4511 weight = CW_Register; 4512 break; 4513 case 'X': // any operand. 4514 default: 4515 weight = CW_Default; 4516 break; 4517 } 4518 return weight; 4519 } 4520 4521 /// If there are multiple different constraints that we could pick for this 4522 /// operand (e.g. "imr") try to pick the 'best' one. 4523 /// This is somewhat tricky: constraints fall into four classes: 4524 /// Other -> immediates and magic values 4525 /// Register -> one specific register 4526 /// RegisterClass -> a group of regs 4527 /// Memory -> memory 4528 /// Ideally, we would pick the most specific constraint possible: if we have 4529 /// something that fits into a register, we would pick it. The problem here 4530 /// is that if we have something that could either be in a register or in 4531 /// memory that use of the register could cause selection of *other* 4532 /// operands to fail: they might only succeed if we pick memory. Because of 4533 /// this the heuristic we use is: 4534 /// 4535 /// 1) If there is an 'other' constraint, and if the operand is valid for 4536 /// that constraint, use it. This makes us take advantage of 'i' 4537 /// constraints when available. 4538 /// 2) Otherwise, pick the most general constraint present. This prefers 4539 /// 'm' over 'r', for example. 4540 /// 4541 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 4542 const TargetLowering &TLI, 4543 SDValue Op, SelectionDAG *DAG) { 4544 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 4545 unsigned BestIdx = 0; 4546 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 4547 int BestGenerality = -1; 4548 4549 // Loop over the options, keeping track of the most general one. 4550 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 4551 TargetLowering::ConstraintType CType = 4552 TLI.getConstraintType(OpInfo.Codes[i]); 4553 4554 // Indirect 'other' or 'immediate' constraints are not allowed. 4555 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 4556 CType == TargetLowering::C_Register || 4557 CType == TargetLowering::C_RegisterClass)) 4558 continue; 4559 4560 // If this is an 'other' or 'immediate' constraint, see if the operand is 4561 // valid for it. For example, on X86 we might have an 'rI' constraint. If 4562 // the operand is an integer in the range [0..31] we want to use I (saving a 4563 // load of a register), otherwise we must use 'r'. 4564 if ((CType == TargetLowering::C_Other || 4565 CType == TargetLowering::C_Immediate) && Op.getNode()) { 4566 assert(OpInfo.Codes[i].size() == 1 && 4567 "Unhandled multi-letter 'other' constraint"); 4568 std::vector<SDValue> ResultOps; 4569 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 4570 ResultOps, *DAG); 4571 if (!ResultOps.empty()) { 4572 BestType = CType; 4573 BestIdx = i; 4574 break; 4575 } 4576 } 4577 4578 // Things with matching constraints can only be registers, per gcc 4579 // documentation. This mainly affects "g" constraints. 4580 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 4581 continue; 4582 4583 // This constraint letter is more general than the previous one, use it. 4584 int Generality = getConstraintGenerality(CType); 4585 if (Generality > BestGenerality) { 4586 BestType = CType; 4587 BestIdx = i; 4588 BestGenerality = Generality; 4589 } 4590 } 4591 4592 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 4593 OpInfo.ConstraintType = BestType; 4594 } 4595 4596 /// Determines the constraint code and constraint type to use for the specific 4597 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4598 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4599 SDValue Op, 4600 SelectionDAG *DAG) const { 4601 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 4602 4603 // Single-letter constraints ('r') are very common. 4604 if (OpInfo.Codes.size() == 1) { 4605 OpInfo.ConstraintCode = OpInfo.Codes[0]; 4606 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4607 } else { 4608 ChooseConstraint(OpInfo, *this, Op, DAG); 4609 } 4610 4611 // 'X' matches anything. 4612 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 4613 // Labels and constants are handled elsewhere ('X' is the only thing 4614 // that matches labels). For Functions, the type here is the type of 4615 // the result, which is not what we want to look at; leave them alone. 4616 Value *v = OpInfo.CallOperandVal; 4617 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 4618 OpInfo.CallOperandVal = v; 4619 return; 4620 } 4621 4622 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress) 4623 return; 4624 4625 // Otherwise, try to resolve it to something we know about by looking at 4626 // the actual operand type. 4627 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 4628 OpInfo.ConstraintCode = Repl; 4629 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4630 } 4631 } 4632 } 4633 4634 /// Given an exact SDIV by a constant, create a multiplication 4635 /// with the multiplicative inverse of the constant. 4636 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 4637 const SDLoc &dl, SelectionDAG &DAG, 4638 SmallVectorImpl<SDNode *> &Created) { 4639 SDValue Op0 = N->getOperand(0); 4640 SDValue Op1 = N->getOperand(1); 4641 EVT VT = N->getValueType(0); 4642 EVT SVT = VT.getScalarType(); 4643 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 4644 EVT ShSVT = ShVT.getScalarType(); 4645 4646 bool UseSRA = false; 4647 SmallVector<SDValue, 16> Shifts, Factors; 4648 4649 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4650 if (C->isNullValue()) 4651 return false; 4652 APInt Divisor = C->getAPIntValue(); 4653 unsigned Shift = Divisor.countTrailingZeros(); 4654 if (Shift) { 4655 Divisor.ashrInPlace(Shift); 4656 UseSRA = true; 4657 } 4658 // Calculate the multiplicative inverse, using Newton's method. 4659 APInt t; 4660 APInt Factor = Divisor; 4661 while ((t = Divisor * Factor) != 1) 4662 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 4663 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 4664 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 4665 return true; 4666 }; 4667 4668 // Collect all magic values from the build vector. 4669 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 4670 return SDValue(); 4671 4672 SDValue Shift, Factor; 4673 if (VT.isVector()) { 4674 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4675 Factor = DAG.getBuildVector(VT, dl, Factors); 4676 } else { 4677 Shift = Shifts[0]; 4678 Factor = Factors[0]; 4679 } 4680 4681 SDValue Res = Op0; 4682 4683 // Shift the value upfront if it is even, so the LSB is one. 4684 if (UseSRA) { 4685 // TODO: For UDIV use SRL instead of SRA. 4686 SDNodeFlags Flags; 4687 Flags.setExact(true); 4688 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 4689 Created.push_back(Res.getNode()); 4690 } 4691 4692 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 4693 } 4694 4695 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4696 SelectionDAG &DAG, 4697 SmallVectorImpl<SDNode *> &Created) const { 4698 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4699 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4700 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 4701 return SDValue(N, 0); // Lower SDIV as SDIV 4702 return SDValue(); 4703 } 4704 4705 /// Given an ISD::SDIV node expressing a divide by constant, 4706 /// return a DAG expression to select that will generate the same value by 4707 /// multiplying by a magic number. 4708 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 4709 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 4710 bool IsAfterLegalization, 4711 SmallVectorImpl<SDNode *> &Created) const { 4712 SDLoc dl(N); 4713 EVT VT = N->getValueType(0); 4714 EVT SVT = VT.getScalarType(); 4715 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 4716 EVT ShSVT = ShVT.getScalarType(); 4717 unsigned EltBits = VT.getScalarSizeInBits(); 4718 4719 // Check to see if we can do this. 4720 // FIXME: We should be more aggressive here. 4721 if (!isTypeLegal(VT)) 4722 return SDValue(); 4723 4724 // If the sdiv has an 'exact' bit we can use a simpler lowering. 4725 if (N->getFlags().hasExact()) 4726 return BuildExactSDIV(*this, N, dl, DAG, Created); 4727 4728 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 4729 4730 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4731 if (C->isNullValue()) 4732 return false; 4733 4734 const APInt &Divisor = C->getAPIntValue(); 4735 APInt::ms magics = Divisor.magic(); 4736 int NumeratorFactor = 0; 4737 int ShiftMask = -1; 4738 4739 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) { 4740 // If d is +1/-1, we just multiply the numerator by +1/-1. 4741 NumeratorFactor = Divisor.getSExtValue(); 4742 magics.m = 0; 4743 magics.s = 0; 4744 ShiftMask = 0; 4745 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { 4746 // If d > 0 and m < 0, add the numerator. 4747 NumeratorFactor = 1; 4748 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { 4749 // If d < 0 and m > 0, subtract the numerator. 4750 NumeratorFactor = -1; 4751 } 4752 4753 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); 4754 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 4755 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); 4756 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 4757 return true; 4758 }; 4759 4760 SDValue N0 = N->getOperand(0); 4761 SDValue N1 = N->getOperand(1); 4762 4763 // Collect the shifts / magic values from each element. 4764 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 4765 return SDValue(); 4766 4767 SDValue MagicFactor, Factor, Shift, ShiftMask; 4768 if (VT.isVector()) { 4769 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 4770 Factor = DAG.getBuildVector(VT, dl, Factors); 4771 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4772 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 4773 } else { 4774 MagicFactor = MagicFactors[0]; 4775 Factor = Factors[0]; 4776 Shift = Shifts[0]; 4777 ShiftMask = ShiftMasks[0]; 4778 } 4779 4780 // Multiply the numerator (operand 0) by the magic value. 4781 // FIXME: We should support doing a MUL in a wider type. 4782 SDValue Q; 4783 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) 4784 : isOperationLegalOrCustom(ISD::MULHS, VT)) 4785 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); 4786 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) 4787 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { 4788 SDValue LoHi = 4789 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); 4790 Q = SDValue(LoHi.getNode(), 1); 4791 } else 4792 return SDValue(); // No mulhs or equivalent. 4793 Created.push_back(Q.getNode()); 4794 4795 // (Optionally) Add/subtract the numerator using Factor. 4796 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 4797 Created.push_back(Factor.getNode()); 4798 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 4799 Created.push_back(Q.getNode()); 4800 4801 // Shift right algebraic by shift value. 4802 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 4803 Created.push_back(Q.getNode()); 4804 4805 // Extract the sign bit, mask it and add it to the quotient. 4806 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 4807 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 4808 Created.push_back(T.getNode()); 4809 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 4810 Created.push_back(T.getNode()); 4811 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 4812 } 4813 4814 /// Given an ISD::UDIV node expressing a divide by constant, 4815 /// return a DAG expression to select that will generate the same value by 4816 /// multiplying by a magic number. 4817 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 4818 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 4819 bool IsAfterLegalization, 4820 SmallVectorImpl<SDNode *> &Created) const { 4821 SDLoc dl(N); 4822 EVT VT = N->getValueType(0); 4823 EVT SVT = VT.getScalarType(); 4824 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 4825 EVT ShSVT = ShVT.getScalarType(); 4826 unsigned EltBits = VT.getScalarSizeInBits(); 4827 4828 // Check to see if we can do this. 4829 // FIXME: We should be more aggressive here. 4830 if (!isTypeLegal(VT)) 4831 return SDValue(); 4832 4833 bool UseNPQ = false; 4834 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 4835 4836 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 4837 if (C->isNullValue()) 4838 return false; 4839 // FIXME: We should use a narrower constant when the upper 4840 // bits are known to be zero. 4841 APInt Divisor = C->getAPIntValue(); 4842 APInt::mu magics = Divisor.magicu(); 4843 unsigned PreShift = 0, PostShift = 0; 4844 4845 // If the divisor is even, we can avoid using the expensive fixup by 4846 // shifting the divided value upfront. 4847 if (magics.a != 0 && !Divisor[0]) { 4848 PreShift = Divisor.countTrailingZeros(); 4849 // Get magic number for the shifted divisor. 4850 magics = Divisor.lshr(PreShift).magicu(PreShift); 4851 assert(magics.a == 0 && "Should use cheap fixup now"); 4852 } 4853 4854 APInt Magic = magics.m; 4855 4856 unsigned SelNPQ; 4857 if (magics.a == 0 || Divisor.isOneValue()) { 4858 assert(magics.s < Divisor.getBitWidth() && 4859 "We shouldn't generate an undefined shift!"); 4860 PostShift = magics.s; 4861 SelNPQ = false; 4862 } else { 4863 PostShift = magics.s - 1; 4864 SelNPQ = true; 4865 } 4866 4867 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 4868 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 4869 NPQFactors.push_back( 4870 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 4871 : APInt::getNullValue(EltBits), 4872 dl, SVT)); 4873 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 4874 UseNPQ |= SelNPQ; 4875 return true; 4876 }; 4877 4878 SDValue N0 = N->getOperand(0); 4879 SDValue N1 = N->getOperand(1); 4880 4881 // Collect the shifts/magic values from each element. 4882 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 4883 return SDValue(); 4884 4885 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 4886 if (VT.isVector()) { 4887 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 4888 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 4889 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 4890 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 4891 } else { 4892 PreShift = PreShifts[0]; 4893 MagicFactor = MagicFactors[0]; 4894 PostShift = PostShifts[0]; 4895 } 4896 4897 SDValue Q = N0; 4898 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 4899 Created.push_back(Q.getNode()); 4900 4901 // FIXME: We should support doing a MUL in a wider type. 4902 auto GetMULHU = [&](SDValue X, SDValue Y) { 4903 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) 4904 : isOperationLegalOrCustom(ISD::MULHU, VT)) 4905 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 4906 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) 4907 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) { 4908 SDValue LoHi = 4909 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 4910 return SDValue(LoHi.getNode(), 1); 4911 } 4912 return SDValue(); // No mulhu or equivalent 4913 }; 4914 4915 // Multiply the numerator (operand 0) by the magic value. 4916 Q = GetMULHU(Q, MagicFactor); 4917 if (!Q) 4918 return SDValue(); 4919 4920 Created.push_back(Q.getNode()); 4921 4922 if (UseNPQ) { 4923 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 4924 Created.push_back(NPQ.getNode()); 4925 4926 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 4927 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 4928 if (VT.isVector()) 4929 NPQ = GetMULHU(NPQ, NPQFactor); 4930 else 4931 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 4932 4933 Created.push_back(NPQ.getNode()); 4934 4935 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 4936 Created.push_back(Q.getNode()); 4937 } 4938 4939 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 4940 Created.push_back(Q.getNode()); 4941 4942 SDValue One = DAG.getConstant(1, dl, VT); 4943 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ); 4944 return DAG.getSelect(dl, VT, IsOne, N0, Q); 4945 } 4946 4947 /// If all values in Values that *don't* match the predicate are same 'splat' 4948 /// value, then replace all values with that splat value. 4949 /// Else, if AlternativeReplacement was provided, then replace all values that 4950 /// do match predicate with AlternativeReplacement value. 4951 static void 4952 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 4953 std::function<bool(SDValue)> Predicate, 4954 SDValue AlternativeReplacement = SDValue()) { 4955 SDValue Replacement; 4956 // Is there a value for which the Predicate does *NOT* match? What is it? 4957 auto SplatValue = llvm::find_if_not(Values, Predicate); 4958 if (SplatValue != Values.end()) { 4959 // Does Values consist only of SplatValue's and values matching Predicate? 4960 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 4961 return Value == *SplatValue || Predicate(Value); 4962 })) // Then we shall replace values matching predicate with SplatValue. 4963 Replacement = *SplatValue; 4964 } 4965 if (!Replacement) { 4966 // Oops, we did not find the "baseline" splat value. 4967 if (!AlternativeReplacement) 4968 return; // Nothing to do. 4969 // Let's replace with provided value then. 4970 Replacement = AlternativeReplacement; 4971 } 4972 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 4973 } 4974 4975 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 4976 /// where the divisor is constant and the comparison target is zero, 4977 /// return a DAG expression that will generate the same comparison result 4978 /// using only multiplications, additions and shifts/rotations. 4979 /// Ref: "Hacker's Delight" 10-17. 4980 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 4981 SDValue CompTargetNode, 4982 ISD::CondCode Cond, 4983 DAGCombinerInfo &DCI, 4984 const SDLoc &DL) const { 4985 SmallVector<SDNode *, 5> Built; 4986 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 4987 DCI, DL, Built)) { 4988 for (SDNode *N : Built) 4989 DCI.AddToWorklist(N); 4990 return Folded; 4991 } 4992 4993 return SDValue(); 4994 } 4995 4996 SDValue 4997 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 4998 SDValue CompTargetNode, ISD::CondCode Cond, 4999 DAGCombinerInfo &DCI, const SDLoc &DL, 5000 SmallVectorImpl<SDNode *> &Created) const { 5001 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5002 // - D must be constant, with D = D0 * 2^K where D0 is odd 5003 // - P is the multiplicative inverse of D0 modulo 2^W 5004 // - Q = floor(((2^W) - 1) / D) 5005 // where W is the width of the common type of N and D. 5006 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5007 "Only applicable for (in)equality comparisons."); 5008 5009 SelectionDAG &DAG = DCI.DAG; 5010 5011 EVT VT = REMNode.getValueType(); 5012 EVT SVT = VT.getScalarType(); 5013 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5014 EVT ShSVT = ShVT.getScalarType(); 5015 5016 // If MUL is unavailable, we cannot proceed in any case. 5017 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5018 return SDValue(); 5019 5020 bool ComparingWithAllZeros = true; 5021 bool AllComparisonsWithNonZerosAreTautological = true; 5022 bool HadTautologicalLanes = false; 5023 bool AllLanesAreTautological = true; 5024 bool HadEvenDivisor = false; 5025 bool AllDivisorsArePowerOfTwo = true; 5026 bool HadTautologicalInvertedLanes = false; 5027 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5028 5029 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5030 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5031 if (CDiv->isNullValue()) 5032 return false; 5033 5034 const APInt &D = CDiv->getAPIntValue(); 5035 const APInt &Cmp = CCmp->getAPIntValue(); 5036 5037 ComparingWithAllZeros &= Cmp.isNullValue(); 5038 5039 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5040 // if C2 is not less than C1, the comparison is always false. 5041 // But we will only be able to produce the comparison that will give the 5042 // opposive tautological answer. So this lane would need to be fixed up. 5043 bool TautologicalInvertedLane = D.ule(Cmp); 5044 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5045 5046 // If all lanes are tautological (either all divisors are ones, or divisor 5047 // is not greater than the constant we are comparing with), 5048 // we will prefer to avoid the fold. 5049 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; 5050 HadTautologicalLanes |= TautologicalLane; 5051 AllLanesAreTautological &= TautologicalLane; 5052 5053 // If we are comparing with non-zero, we need'll need to subtract said 5054 // comparison value from the LHS. But there is no point in doing that if 5055 // every lane where we are comparing with non-zero is tautological.. 5056 if (!Cmp.isNullValue()) 5057 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5058 5059 // Decompose D into D0 * 2^K 5060 unsigned K = D.countTrailingZeros(); 5061 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5062 APInt D0 = D.lshr(K); 5063 5064 // D is even if it has trailing zeros. 5065 HadEvenDivisor |= (K != 0); 5066 // D is a power-of-two if D0 is one. 5067 // If all divisors are power-of-two, we will prefer to avoid the fold. 5068 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5069 5070 // P = inv(D0, 2^W) 5071 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5072 unsigned W = D.getBitWidth(); 5073 APInt P = D0.zext(W + 1) 5074 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5075 .trunc(W); 5076 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5077 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5078 5079 // Q = floor((2^W - 1) u/ D) 5080 // R = ((2^W - 1) u% D) 5081 APInt Q, R; 5082 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R); 5083 5084 // If we are comparing with zero, then that comparison constant is okay, 5085 // else it may need to be one less than that. 5086 if (Cmp.ugt(R)) 5087 Q -= 1; 5088 5089 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5090 "We are expecting that K is always less than all-ones for ShSVT"); 5091 5092 // If the lane is tautological the result can be constant-folded. 5093 if (TautologicalLane) { 5094 // Set P and K amount to a bogus values so we can try to splat them. 5095 P = 0; 5096 K = -1; 5097 // And ensure that comparison constant is tautological, 5098 // it will always compare true/false. 5099 Q = -1; 5100 } 5101 5102 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5103 KAmts.push_back( 5104 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5105 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5106 return true; 5107 }; 5108 5109 SDValue N = REMNode.getOperand(0); 5110 SDValue D = REMNode.getOperand(1); 5111 5112 // Collect the values from each element. 5113 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5114 return SDValue(); 5115 5116 // If all lanes are tautological, the result can be constant-folded. 5117 if (AllLanesAreTautological) 5118 return SDValue(); 5119 5120 // If this is a urem by a powers-of-two, avoid the fold since it can be 5121 // best implemented as a bit test. 5122 if (AllDivisorsArePowerOfTwo) 5123 return SDValue(); 5124 5125 SDValue PVal, KVal, QVal; 5126 if (VT.isVector()) { 5127 if (HadTautologicalLanes) { 5128 // Try to turn PAmts into a splat, since we don't care about the values 5129 // that are currently '0'. If we can't, just keep '0'`s. 5130 turnVectorIntoSplatVector(PAmts, isNullConstant); 5131 // Try to turn KAmts into a splat, since we don't care about the values 5132 // that are currently '-1'. If we can't, change them to '0'`s. 5133 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5134 DAG.getConstant(0, DL, ShSVT)); 5135 } 5136 5137 PVal = DAG.getBuildVector(VT, DL, PAmts); 5138 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5139 QVal = DAG.getBuildVector(VT, DL, QAmts); 5140 } else { 5141 PVal = PAmts[0]; 5142 KVal = KAmts[0]; 5143 QVal = QAmts[0]; 5144 } 5145 5146 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5147 if (!isOperationLegalOrCustom(ISD::SUB, VT)) 5148 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5149 assert(CompTargetNode.getValueType() == N.getValueType() && 5150 "Expecting that the types on LHS and RHS of comparisons match."); 5151 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5152 } 5153 5154 // (mul N, P) 5155 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5156 Created.push_back(Op0.getNode()); 5157 5158 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5159 // divisors as a performance improvement, since rotating by 0 is a no-op. 5160 if (HadEvenDivisor) { 5161 // We need ROTR to do this. 5162 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5163 return SDValue(); 5164 SDNodeFlags Flags; 5165 Flags.setExact(true); 5166 // UREM: (rotr (mul N, P), K) 5167 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5168 Created.push_back(Op0.getNode()); 5169 } 5170 5171 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5172 SDValue NewCC = 5173 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5174 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5175 if (!HadTautologicalInvertedLanes) 5176 return NewCC; 5177 5178 // If any lanes previously compared always-false, the NewCC will give 5179 // always-true result for them, so we need to fixup those lanes. 5180 // Or the other way around for inequality predicate. 5181 assert(VT.isVector() && "Can/should only get here for vectors."); 5182 Created.push_back(NewCC.getNode()); 5183 5184 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5185 // if C2 is not less than C1, the comparison is always false. 5186 // But we have produced the comparison that will give the 5187 // opposive tautological answer. So these lanes would need to be fixed up. 5188 SDValue TautologicalInvertedChannels = 5189 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5190 Created.push_back(TautologicalInvertedChannels.getNode()); 5191 5192 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5193 // If we have a vector select, let's replace the comparison results in the 5194 // affected lanes with the correct tautological result. 5195 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5196 DL, SETCCVT, SETCCVT); 5197 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5198 Replacement, NewCC); 5199 } 5200 5201 // Else, we can just invert the comparison result in the appropriate lanes. 5202 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5203 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5204 TautologicalInvertedChannels); 5205 5206 return SDValue(); // Don't know how to lower. 5207 } 5208 5209 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5210 /// where the divisor is constant and the comparison target is zero, 5211 /// return a DAG expression that will generate the same comparison result 5212 /// using only multiplications, additions and shifts/rotations. 5213 /// Ref: "Hacker's Delight" 10-17. 5214 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5215 SDValue CompTargetNode, 5216 ISD::CondCode Cond, 5217 DAGCombinerInfo &DCI, 5218 const SDLoc &DL) const { 5219 SmallVector<SDNode *, 7> Built; 5220 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5221 DCI, DL, Built)) { 5222 assert(Built.size() <= 7 && "Max size prediction failed."); 5223 for (SDNode *N : Built) 5224 DCI.AddToWorklist(N); 5225 return Folded; 5226 } 5227 5228 return SDValue(); 5229 } 5230 5231 SDValue 5232 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5233 SDValue CompTargetNode, ISD::CondCode Cond, 5234 DAGCombinerInfo &DCI, const SDLoc &DL, 5235 SmallVectorImpl<SDNode *> &Created) const { 5236 // Fold: 5237 // (seteq/ne (srem N, D), 0) 5238 // To: 5239 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5240 // 5241 // - D must be constant, with D = D0 * 2^K where D0 is odd 5242 // - P is the multiplicative inverse of D0 modulo 2^W 5243 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5244 // - Q = floor((2 * A) / (2^K)) 5245 // where W is the width of the common type of N and D. 5246 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5247 "Only applicable for (in)equality comparisons."); 5248 5249 SelectionDAG &DAG = DCI.DAG; 5250 5251 EVT VT = REMNode.getValueType(); 5252 EVT SVT = VT.getScalarType(); 5253 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5254 EVT ShSVT = ShVT.getScalarType(); 5255 5256 // If MUL is unavailable, we cannot proceed in any case. 5257 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5258 return SDValue(); 5259 5260 // TODO: Could support comparing with non-zero too. 5261 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5262 if (!CompTarget || !CompTarget->isNullValue()) 5263 return SDValue(); 5264 5265 bool HadIntMinDivisor = false; 5266 bool HadOneDivisor = false; 5267 bool AllDivisorsAreOnes = true; 5268 bool HadEvenDivisor = false; 5269 bool NeedToApplyOffset = false; 5270 bool AllDivisorsArePowerOfTwo = true; 5271 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5272 5273 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5274 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5275 if (C->isNullValue()) 5276 return false; 5277 5278 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5279 5280 // WARNING: this fold is only valid for positive divisors! 5281 APInt D = C->getAPIntValue(); 5282 if (D.isNegative()) 5283 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5284 5285 HadIntMinDivisor |= D.isMinSignedValue(); 5286 5287 // If all divisors are ones, we will prefer to avoid the fold. 5288 HadOneDivisor |= D.isOneValue(); 5289 AllDivisorsAreOnes &= D.isOneValue(); 5290 5291 // Decompose D into D0 * 2^K 5292 unsigned K = D.countTrailingZeros(); 5293 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5294 APInt D0 = D.lshr(K); 5295 5296 if (!D.isMinSignedValue()) { 5297 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5298 // we don't care about this lane in this fold, we'll special-handle it. 5299 HadEvenDivisor |= (K != 0); 5300 } 5301 5302 // D is a power-of-two if D0 is one. This includes INT_MIN. 5303 // If all divisors are power-of-two, we will prefer to avoid the fold. 5304 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5305 5306 // P = inv(D0, 2^W) 5307 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5308 unsigned W = D.getBitWidth(); 5309 APInt P = D0.zext(W + 1) 5310 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5311 .trunc(W); 5312 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5313 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5314 5315 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5316 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5317 A.clearLowBits(K); 5318 5319 if (!D.isMinSignedValue()) { 5320 // If divisor INT_MIN, then we don't care about this lane in this fold, 5321 // we'll special-handle it. 5322 NeedToApplyOffset |= A != 0; 5323 } 5324 5325 // Q = floor((2 * A) / (2^K)) 5326 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5327 5328 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) && 5329 "We are expecting that A is always less than all-ones for SVT"); 5330 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5331 "We are expecting that K is always less than all-ones for ShSVT"); 5332 5333 // If the divisor is 1 the result can be constant-folded. Likewise, we 5334 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5335 if (D.isOneValue()) { 5336 // Set P, A and K to a bogus values so we can try to splat them. 5337 P = 0; 5338 A = -1; 5339 K = -1; 5340 5341 // x ?% 1 == 0 <--> true <--> x u<= -1 5342 Q = -1; 5343 } 5344 5345 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5346 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5347 KAmts.push_back( 5348 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5349 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5350 return true; 5351 }; 5352 5353 SDValue N = REMNode.getOperand(0); 5354 SDValue D = REMNode.getOperand(1); 5355 5356 // Collect the values from each element. 5357 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5358 return SDValue(); 5359 5360 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5361 if (AllDivisorsAreOnes) 5362 return SDValue(); 5363 5364 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5365 // since it can be best implemented as a bit test. 5366 if (AllDivisorsArePowerOfTwo) 5367 return SDValue(); 5368 5369 SDValue PVal, AVal, KVal, QVal; 5370 if (VT.isVector()) { 5371 if (HadOneDivisor) { 5372 // Try to turn PAmts into a splat, since we don't care about the values 5373 // that are currently '0'. If we can't, just keep '0'`s. 5374 turnVectorIntoSplatVector(PAmts, isNullConstant); 5375 // Try to turn AAmts into a splat, since we don't care about the 5376 // values that are currently '-1'. If we can't, change them to '0'`s. 5377 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5378 DAG.getConstant(0, DL, SVT)); 5379 // Try to turn KAmts into a splat, since we don't care about the values 5380 // that are currently '-1'. If we can't, change them to '0'`s. 5381 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5382 DAG.getConstant(0, DL, ShSVT)); 5383 } 5384 5385 PVal = DAG.getBuildVector(VT, DL, PAmts); 5386 AVal = DAG.getBuildVector(VT, DL, AAmts); 5387 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5388 QVal = DAG.getBuildVector(VT, DL, QAmts); 5389 } else { 5390 PVal = PAmts[0]; 5391 AVal = AAmts[0]; 5392 KVal = KAmts[0]; 5393 QVal = QAmts[0]; 5394 } 5395 5396 // (mul N, P) 5397 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5398 Created.push_back(Op0.getNode()); 5399 5400 if (NeedToApplyOffset) { 5401 // We need ADD to do this. 5402 if (!isOperationLegalOrCustom(ISD::ADD, VT)) 5403 return SDValue(); 5404 5405 // (add (mul N, P), A) 5406 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 5407 Created.push_back(Op0.getNode()); 5408 } 5409 5410 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5411 // divisors as a performance improvement, since rotating by 0 is a no-op. 5412 if (HadEvenDivisor) { 5413 // We need ROTR to do this. 5414 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5415 return SDValue(); 5416 SDNodeFlags Flags; 5417 Flags.setExact(true); 5418 // SREM: (rotr (add (mul N, P), A), K) 5419 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5420 Created.push_back(Op0.getNode()); 5421 } 5422 5423 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 5424 SDValue Fold = 5425 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5426 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5427 5428 // If we didn't have lanes with INT_MIN divisor, then we're done. 5429 if (!HadIntMinDivisor) 5430 return Fold; 5431 5432 // That fold is only valid for positive divisors. Which effectively means, 5433 // it is invalid for INT_MIN divisors. So if we have such a lane, 5434 // we must fix-up results for said lanes. 5435 assert(VT.isVector() && "Can/should only get here for vectors."); 5436 5437 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 5438 !isOperationLegalOrCustom(ISD::AND, VT) || 5439 !isOperationLegalOrCustom(Cond, VT) || 5440 !isOperationLegalOrCustom(ISD::VSELECT, VT)) 5441 return SDValue(); 5442 5443 Created.push_back(Fold.getNode()); 5444 5445 SDValue IntMin = DAG.getConstant( 5446 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 5447 SDValue IntMax = DAG.getConstant( 5448 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 5449 SDValue Zero = 5450 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT); 5451 5452 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 5453 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 5454 Created.push_back(DivisorIsIntMin.getNode()); 5455 5456 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 5457 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 5458 Created.push_back(Masked.getNode()); 5459 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 5460 Created.push_back(MaskedIsZero.getNode()); 5461 5462 // To produce final result we need to blend 2 vectors: 'SetCC' and 5463 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 5464 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 5465 // constant-folded, select can get lowered to a shuffle with constant mask. 5466 SDValue Blended = 5467 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold); 5468 5469 return Blended; 5470 } 5471 5472 bool TargetLowering:: 5473 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 5474 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 5475 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 5476 "be a constant integer"); 5477 return true; 5478 } 5479 5480 return false; 5481 } 5482 5483 char TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG, 5484 bool LegalOperations, bool ForCodeSize, 5485 unsigned Depth) const { 5486 // fneg is removable even if it has multiple uses. 5487 if (Op.getOpcode() == ISD::FNEG) 5488 return 2; 5489 5490 // Don't allow anything with multiple uses unless we know it is free. 5491 EVT VT = Op.getValueType(); 5492 const SDNodeFlags Flags = Op->getFlags(); 5493 const TargetOptions &Options = DAG.getTarget().Options; 5494 if (!Op.hasOneUse() && !(Op.getOpcode() == ISD::FP_EXTEND && 5495 isFPExtFree(VT, Op.getOperand(0).getValueType()))) 5496 return 0; 5497 5498 // Don't recurse exponentially. 5499 if (Depth > SelectionDAG::MaxRecursionDepth) 5500 return 0; 5501 5502 switch (Op.getOpcode()) { 5503 case ISD::ConstantFP: { 5504 if (!LegalOperations) 5505 return 1; 5506 5507 // Don't invert constant FP values after legalization unless the target says 5508 // the negated constant is legal. 5509 return isOperationLegal(ISD::ConstantFP, VT) || 5510 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 5511 ForCodeSize); 5512 } 5513 case ISD::BUILD_VECTOR: { 5514 // Only permit BUILD_VECTOR of constants. 5515 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 5516 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 5517 })) 5518 return 0; 5519 if (!LegalOperations) 5520 return 1; 5521 if (isOperationLegal(ISD::ConstantFP, VT) && 5522 isOperationLegal(ISD::BUILD_VECTOR, VT)) 5523 return 1; 5524 return llvm::all_of(Op->op_values(), [&](SDValue N) { 5525 return N.isUndef() || 5526 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 5527 ForCodeSize); 5528 }); 5529 } 5530 case ISD::FADD: 5531 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5532 return 0; 5533 5534 // After operation legalization, it might not be legal to create new FSUBs. 5535 if (LegalOperations && !isOperationLegalOrCustom(ISD::FSUB, VT)) 5536 return 0; 5537 5538 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 5539 if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, 5540 ForCodeSize, Depth + 1)) 5541 return V; 5542 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 5543 return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations, 5544 ForCodeSize, Depth + 1); 5545 case ISD::FSUB: 5546 // We can't turn -(A-B) into B-A when we honor signed zeros. 5547 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5548 return 0; 5549 5550 // fold (fneg (fsub A, B)) -> (fsub B, A) 5551 return 1; 5552 5553 case ISD::FMUL: 5554 case ISD::FDIV: 5555 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 5556 if (char V = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, 5557 ForCodeSize, Depth + 1)) 5558 return V; 5559 5560 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 5561 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 5562 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 5563 return 0; 5564 5565 return isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations, 5566 ForCodeSize, Depth + 1); 5567 5568 case ISD::FMA: 5569 case ISD::FMAD: { 5570 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5571 return 0; 5572 5573 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 5574 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 5575 char V2 = isNegatibleForFree(Op.getOperand(2), DAG, LegalOperations, 5576 ForCodeSize, Depth + 1); 5577 if (!V2) 5578 return 0; 5579 5580 // One of Op0/Op1 must be cheaply negatible, then select the cheapest. 5581 char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, 5582 ForCodeSize, Depth + 1); 5583 char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations, 5584 ForCodeSize, Depth + 1); 5585 char V01 = std::max(V0, V1); 5586 return V01 ? std::max(V01, V2) : 0; 5587 } 5588 5589 case ISD::FP_EXTEND: 5590 case ISD::FP_ROUND: 5591 case ISD::FSIN: 5592 return isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, 5593 ForCodeSize, Depth + 1); 5594 } 5595 5596 return 0; 5597 } 5598 5599 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 5600 bool LegalOperations, 5601 bool ForCodeSize, 5602 unsigned Depth) const { 5603 // fneg is removable even if it has multiple uses. 5604 if (Op.getOpcode() == ISD::FNEG) 5605 return Op.getOperand(0); 5606 5607 assert(Depth <= SelectionDAG::MaxRecursionDepth && 5608 "getNegatedExpression doesn't match isNegatibleForFree"); 5609 const SDNodeFlags Flags = Op->getFlags(); 5610 5611 switch (Op.getOpcode()) { 5612 case ISD::ConstantFP: { 5613 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5614 V.changeSign(); 5615 return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType()); 5616 } 5617 case ISD::BUILD_VECTOR: { 5618 SmallVector<SDValue, 4> Ops; 5619 for (SDValue C : Op->op_values()) { 5620 if (C.isUndef()) { 5621 Ops.push_back(C); 5622 continue; 5623 } 5624 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 5625 V.changeSign(); 5626 Ops.push_back(DAG.getConstantFP(V, SDLoc(Op), C.getValueType())); 5627 } 5628 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Ops); 5629 } 5630 case ISD::FADD: 5631 assert((DAG.getTarget().Options.NoSignedZerosFPMath || 5632 Flags.hasNoSignedZeros()) && 5633 "Expected NSZ fp-flag"); 5634 5635 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 5636 if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize, 5637 Depth + 1)) 5638 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 5639 getNegatedExpression(Op.getOperand(0), DAG, 5640 LegalOperations, ForCodeSize, 5641 Depth + 1), 5642 Op.getOperand(1), Flags); 5643 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 5644 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 5645 getNegatedExpression(Op.getOperand(1), DAG, 5646 LegalOperations, ForCodeSize, 5647 Depth + 1), 5648 Op.getOperand(0), Flags); 5649 case ISD::FSUB: 5650 // fold (fneg (fsub 0, B)) -> B 5651 if (ConstantFPSDNode *N0CFP = 5652 isConstOrConstSplatFP(Op.getOperand(0), /*AllowUndefs*/ true)) 5653 if (N0CFP->isZero()) 5654 return Op.getOperand(1); 5655 5656 // fold (fneg (fsub A, B)) -> (fsub B, A) 5657 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 5658 Op.getOperand(1), Op.getOperand(0), Flags); 5659 5660 case ISD::FMUL: 5661 case ISD::FDIV: 5662 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 5663 if (isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, ForCodeSize, 5664 Depth + 1)) 5665 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 5666 getNegatedExpression(Op.getOperand(0), DAG, 5667 LegalOperations, ForCodeSize, 5668 Depth + 1), 5669 Op.getOperand(1), Flags); 5670 5671 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 5672 return DAG.getNode( 5673 Op.getOpcode(), SDLoc(Op), Op.getValueType(), Op.getOperand(0), 5674 getNegatedExpression(Op.getOperand(1), DAG, LegalOperations, 5675 ForCodeSize, Depth + 1), 5676 Flags); 5677 5678 case ISD::FMA: 5679 case ISD::FMAD: { 5680 assert((DAG.getTarget().Options.NoSignedZerosFPMath || 5681 Flags.hasNoSignedZeros()) && 5682 "Expected NSZ fp-flag"); 5683 5684 SDValue Neg2 = getNegatedExpression(Op.getOperand(2), DAG, LegalOperations, 5685 ForCodeSize, Depth + 1); 5686 5687 char V0 = isNegatibleForFree(Op.getOperand(0), DAG, LegalOperations, 5688 ForCodeSize, Depth + 1); 5689 char V1 = isNegatibleForFree(Op.getOperand(1), DAG, LegalOperations, 5690 ForCodeSize, Depth + 1); 5691 // TODO: This is a hack. It is possible that costs have changed between now 5692 // and the initial calls to isNegatibleForFree(). That is because we 5693 // are rewriting the expression, and that may change the number of 5694 // uses (and therefore the cost) of values. If the negation costs are 5695 // equal, only negate this value if it is a constant. Otherwise, try 5696 // operand 1. A better fix would eliminate uses as a cost factor or 5697 // track the change in uses as we rewrite the expression. 5698 if (V0 > V1 || (V0 == V1 && isa<ConstantFPSDNode>(Op.getOperand(0)))) { 5699 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 5700 SDValue Neg0 = getNegatedExpression( 5701 Op.getOperand(0), DAG, LegalOperations, ForCodeSize, Depth + 1); 5702 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Neg0, 5703 Op.getOperand(1), Neg2, Flags); 5704 } 5705 5706 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 5707 SDValue Neg1 = getNegatedExpression(Op.getOperand(1), DAG, LegalOperations, 5708 ForCodeSize, Depth + 1); 5709 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 5710 Op.getOperand(0), Neg1, Neg2, Flags); 5711 } 5712 5713 case ISD::FP_EXTEND: 5714 case ISD::FSIN: 5715 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 5716 getNegatedExpression(Op.getOperand(0), DAG, 5717 LegalOperations, ForCodeSize, 5718 Depth + 1)); 5719 case ISD::FP_ROUND: 5720 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(), 5721 getNegatedExpression(Op.getOperand(0), DAG, 5722 LegalOperations, ForCodeSize, 5723 Depth + 1), 5724 Op.getOperand(1)); 5725 } 5726 5727 llvm_unreachable("Unknown code"); 5728 } 5729 5730 //===----------------------------------------------------------------------===// 5731 // Legalization Utilities 5732 //===----------------------------------------------------------------------===// 5733 5734 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, 5735 SDValue LHS, SDValue RHS, 5736 SmallVectorImpl<SDValue> &Result, 5737 EVT HiLoVT, SelectionDAG &DAG, 5738 MulExpansionKind Kind, SDValue LL, 5739 SDValue LH, SDValue RL, SDValue RH) const { 5740 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 5741 Opcode == ISD::SMUL_LOHI); 5742 5743 bool HasMULHS = (Kind == MulExpansionKind::Always) || 5744 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 5745 bool HasMULHU = (Kind == MulExpansionKind::Always) || 5746 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 5747 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 5748 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 5749 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 5750 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 5751 5752 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 5753 return false; 5754 5755 unsigned OuterBitSize = VT.getScalarSizeInBits(); 5756 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 5757 unsigned LHSSB = DAG.ComputeNumSignBits(LHS); 5758 unsigned RHSSB = DAG.ComputeNumSignBits(RHS); 5759 5760 // LL, LH, RL, and RH must be either all NULL or all set to a value. 5761 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 5762 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 5763 5764 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 5765 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 5766 bool Signed) -> bool { 5767 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 5768 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 5769 Hi = SDValue(Lo.getNode(), 1); 5770 return true; 5771 } 5772 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 5773 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 5774 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 5775 return true; 5776 } 5777 return false; 5778 }; 5779 5780 SDValue Lo, Hi; 5781 5782 if (!LL.getNode() && !RL.getNode() && 5783 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 5784 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 5785 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 5786 } 5787 5788 if (!LL.getNode()) 5789 return false; 5790 5791 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 5792 if (DAG.MaskedValueIsZero(LHS, HighMask) && 5793 DAG.MaskedValueIsZero(RHS, HighMask)) { 5794 // The inputs are both zero-extended. 5795 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 5796 Result.push_back(Lo); 5797 Result.push_back(Hi); 5798 if (Opcode != ISD::MUL) { 5799 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 5800 Result.push_back(Zero); 5801 Result.push_back(Zero); 5802 } 5803 return true; 5804 } 5805 } 5806 5807 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize && 5808 RHSSB > InnerBitSize) { 5809 // The input values are both sign-extended. 5810 // TODO non-MUL case? 5811 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 5812 Result.push_back(Lo); 5813 Result.push_back(Hi); 5814 return true; 5815 } 5816 } 5817 5818 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 5819 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 5820 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) { 5821 // FIXME getShiftAmountTy does not always return a sensible result when VT 5822 // is an illegal type, and so the type may be too small to fit the shift 5823 // amount. Override it with i32. The shift will have to be legalized. 5824 ShiftAmountTy = MVT::i32; 5825 } 5826 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 5827 5828 if (!LH.getNode() && !RH.getNode() && 5829 isOperationLegalOrCustom(ISD::SRL, VT) && 5830 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 5831 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 5832 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 5833 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 5834 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 5835 } 5836 5837 if (!LH.getNode()) 5838 return false; 5839 5840 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 5841 return false; 5842 5843 Result.push_back(Lo); 5844 5845 if (Opcode == ISD::MUL) { 5846 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 5847 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 5848 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 5849 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 5850 Result.push_back(Hi); 5851 return true; 5852 } 5853 5854 // Compute the full width result. 5855 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 5856 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 5857 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 5858 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 5859 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 5860 }; 5861 5862 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 5863 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 5864 return false; 5865 5866 // This is effectively the add part of a multiply-add of half-sized operands, 5867 // so it cannot overflow. 5868 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 5869 5870 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 5871 return false; 5872 5873 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 5874 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 5875 5876 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 5877 isOperationLegalOrCustom(ISD::ADDE, VT)); 5878 if (UseGlue) 5879 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 5880 Merge(Lo, Hi)); 5881 else 5882 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 5883 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 5884 5885 SDValue Carry = Next.getValue(1); 5886 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 5887 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 5888 5889 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 5890 return false; 5891 5892 if (UseGlue) 5893 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 5894 Carry); 5895 else 5896 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 5897 Zero, Carry); 5898 5899 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 5900 5901 if (Opcode == ISD::SMUL_LOHI) { 5902 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 5903 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 5904 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 5905 5906 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 5907 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 5908 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 5909 } 5910 5911 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 5912 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 5913 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 5914 return true; 5915 } 5916 5917 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 5918 SelectionDAG &DAG, MulExpansionKind Kind, 5919 SDValue LL, SDValue LH, SDValue RL, 5920 SDValue RH) const { 5921 SmallVector<SDValue, 2> Result; 5922 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), N, 5923 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 5924 DAG, Kind, LL, LH, RL, RH); 5925 if (Ok) { 5926 assert(Result.size() == 2); 5927 Lo = Result[0]; 5928 Hi = Result[1]; 5929 } 5930 return Ok; 5931 } 5932 5933 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result, 5934 SelectionDAG &DAG) const { 5935 EVT VT = Node->getValueType(0); 5936 5937 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 5938 !isOperationLegalOrCustom(ISD::SRL, VT) || 5939 !isOperationLegalOrCustom(ISD::SUB, VT) || 5940 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 5941 return false; 5942 5943 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 5944 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 5945 SDValue X = Node->getOperand(0); 5946 SDValue Y = Node->getOperand(1); 5947 SDValue Z = Node->getOperand(2); 5948 5949 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 5950 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 5951 SDLoc DL(SDValue(Node, 0)); 5952 5953 EVT ShVT = Z.getValueType(); 5954 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 5955 SDValue Zero = DAG.getConstant(0, DL, ShVT); 5956 5957 SDValue ShAmt; 5958 if (isPowerOf2_32(EltSizeInBits)) { 5959 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 5960 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 5961 } else { 5962 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 5963 } 5964 5965 SDValue InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 5966 SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 5967 SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 5968 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 5969 5970 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth, 5971 // and that is undefined. We must compare and select to avoid UB. 5972 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShVT); 5973 5974 // For fshl, 0-shift returns the 1st arg (X). 5975 // For fshr, 0-shift returns the 2nd arg (Y). 5976 SDValue IsZeroShift = DAG.getSetCC(DL, CCVT, ShAmt, Zero, ISD::SETEQ); 5977 Result = DAG.getSelect(DL, VT, IsZeroShift, IsFSHL ? X : Y, Or); 5978 return true; 5979 } 5980 5981 // TODO: Merge with expandFunnelShift. 5982 bool TargetLowering::expandROT(SDNode *Node, SDValue &Result, 5983 SelectionDAG &DAG) const { 5984 EVT VT = Node->getValueType(0); 5985 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 5986 bool IsLeft = Node->getOpcode() == ISD::ROTL; 5987 SDValue Op0 = Node->getOperand(0); 5988 SDValue Op1 = Node->getOperand(1); 5989 SDLoc DL(SDValue(Node, 0)); 5990 5991 EVT ShVT = Op1.getValueType(); 5992 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 5993 5994 // If a rotate in the other direction is legal, use it. 5995 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 5996 if (isOperationLegal(RevRot, VT)) { 5997 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1); 5998 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub); 5999 return true; 6000 } 6001 6002 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6003 !isOperationLegalOrCustom(ISD::SRL, VT) || 6004 !isOperationLegalOrCustom(ISD::SUB, VT) || 6005 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6006 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6007 return false; 6008 6009 // Otherwise, 6010 // (rotl x, c) -> (or (shl x, (and c, w-1)), (srl x, (and w-c, w-1))) 6011 // (rotr x, c) -> (or (srl x, (and c, w-1)), (shl x, (and w-c, w-1))) 6012 // 6013 assert(isPowerOf2_32(EltSizeInBits) && EltSizeInBits > 1 && 6014 "Expecting the type bitwidth to be a power of 2"); 6015 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6016 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6017 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6018 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, Op1); 6019 SDValue And0 = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6020 SDValue And1 = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6021 Result = DAG.getNode(ISD::OR, DL, VT, DAG.getNode(ShOpc, DL, VT, Op0, And0), 6022 DAG.getNode(HsOpc, DL, VT, Op0, And1)); 6023 return true; 6024 } 6025 6026 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6027 SelectionDAG &DAG) const { 6028 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6029 SDValue Src = Node->getOperand(OpNo); 6030 EVT SrcVT = Src.getValueType(); 6031 EVT DstVT = Node->getValueType(0); 6032 SDLoc dl(SDValue(Node, 0)); 6033 6034 // FIXME: Only f32 to i64 conversions are supported. 6035 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6036 return false; 6037 6038 if (Node->isStrictFPOpcode()) 6039 // When a NaN is converted to an integer a trap is allowed. We can't 6040 // use this expansion here because it would eliminate that trap. Other 6041 // traps are also allowed and cannot be eliminated. See 6042 // IEEE 754-2008 sec 5.8. 6043 return false; 6044 6045 // Expand f32 -> i64 conversion 6046 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6047 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c 6048 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6049 EVT IntVT = SrcVT.changeTypeToInteger(); 6050 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6051 6052 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6053 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6054 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6055 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6056 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6057 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6058 6059 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6060 6061 SDValue ExponentBits = DAG.getNode( 6062 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6063 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6064 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6065 6066 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6067 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6068 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6069 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6070 6071 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6072 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6073 DAG.getConstant(0x00800000, dl, IntVT)); 6074 6075 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6076 6077 R = DAG.getSelectCC( 6078 dl, Exponent, ExponentLoBit, 6079 DAG.getNode(ISD::SHL, dl, DstVT, R, 6080 DAG.getZExtOrTrunc( 6081 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6082 dl, IntShVT)), 6083 DAG.getNode(ISD::SRL, dl, DstVT, R, 6084 DAG.getZExtOrTrunc( 6085 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6086 dl, IntShVT)), 6087 ISD::SETGT); 6088 6089 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6090 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6091 6092 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6093 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6094 return true; 6095 } 6096 6097 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6098 SDValue &Chain, 6099 SelectionDAG &DAG) const { 6100 SDLoc dl(SDValue(Node, 0)); 6101 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6102 SDValue Src = Node->getOperand(OpNo); 6103 6104 EVT SrcVT = Src.getValueType(); 6105 EVT DstVT = Node->getValueType(0); 6106 EVT SetCCVT = 6107 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6108 EVT DstSetCCVT = 6109 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6110 6111 // Only expand vector types if we have the appropriate vector bit operations. 6112 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6113 ISD::FP_TO_SINT; 6114 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6115 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6116 return false; 6117 6118 // If the maximum float value is smaller then the signed integer range, 6119 // the destination signmask can't be represented by the float, so we can 6120 // just use FP_TO_SINT directly. 6121 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6122 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits())); 6123 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6124 if (APFloat::opOverflow & 6125 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6126 if (Node->isStrictFPOpcode()) { 6127 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6128 { Node->getOperand(0), Src }); 6129 Chain = Result.getValue(1); 6130 } else 6131 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6132 return true; 6133 } 6134 6135 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6136 SDValue Sel; 6137 6138 if (Node->isStrictFPOpcode()) { 6139 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6140 Node->getOperand(0), /*IsSignaling*/ true); 6141 Chain = Sel.getValue(1); 6142 } else { 6143 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6144 } 6145 6146 bool Strict = Node->isStrictFPOpcode() || 6147 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6148 6149 if (Strict) { 6150 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6151 // signmask then offset (the result of which should be fully representable). 6152 // Sel = Src < 0x8000000000000000 6153 // FltOfs = select Sel, 0, 0x8000000000000000 6154 // IntOfs = select Sel, 0, 0x8000000000000000 6155 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6156 6157 // TODO: Should any fast-math-flags be set for the FSUB? 6158 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6159 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6160 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6161 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6162 DAG.getConstant(0, dl, DstVT), 6163 DAG.getConstant(SignMask, dl, DstVT)); 6164 SDValue SInt; 6165 if (Node->isStrictFPOpcode()) { 6166 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6167 { Chain, Src, FltOfs }); 6168 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6169 { Val.getValue(1), Val }); 6170 Chain = SInt.getValue(1); 6171 } else { 6172 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6173 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6174 } 6175 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6176 } else { 6177 // Expand based on maximum range of FP_TO_SINT: 6178 // True = fp_to_sint(Src) 6179 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6180 // Result = select (Src < 0x8000000000000000), True, False 6181 6182 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6183 // TODO: Should any fast-math-flags be set for the FSUB? 6184 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6185 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6186 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6187 DAG.getConstant(SignMask, dl, DstVT)); 6188 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6189 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6190 } 6191 return true; 6192 } 6193 6194 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6195 SDValue &Chain, 6196 SelectionDAG &DAG) const { 6197 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6198 SDValue Src = Node->getOperand(OpNo); 6199 EVT SrcVT = Src.getValueType(); 6200 EVT DstVT = Node->getValueType(0); 6201 6202 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6203 return false; 6204 6205 // Only expand vector types if we have the appropriate vector bit operations. 6206 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6207 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6208 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6209 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6210 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6211 return false; 6212 6213 SDLoc dl(SDValue(Node, 0)); 6214 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6215 6216 // Implementation of unsigned i64 to f64 following the algorithm in 6217 // __floatundidf in compiler_rt. This implementation has the advantage 6218 // of performing rounding correctly, both in the default rounding mode 6219 // and in all alternate rounding modes. 6220 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6221 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6222 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6223 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6224 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6225 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6226 6227 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6228 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6229 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6230 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6231 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6232 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6233 if (Node->isStrictFPOpcode()) { 6234 SDValue HiSub = 6235 DAG.getNode(ISD::STRICT_FSUB, dl, {DstVT, MVT::Other}, 6236 {Node->getOperand(0), HiFlt, TwoP84PlusTwoP52}); 6237 Result = DAG.getNode(ISD::STRICT_FADD, dl, {DstVT, MVT::Other}, 6238 {HiSub.getValue(1), LoFlt, HiSub}); 6239 Chain = Result.getValue(1); 6240 } else { 6241 SDValue HiSub = 6242 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6243 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6244 } 6245 return true; 6246 } 6247 6248 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 6249 SelectionDAG &DAG) const { 6250 SDLoc dl(Node); 6251 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 6252 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 6253 EVT VT = Node->getValueType(0); 6254 if (isOperationLegalOrCustom(NewOp, VT)) { 6255 SDValue Quiet0 = Node->getOperand(0); 6256 SDValue Quiet1 = Node->getOperand(1); 6257 6258 if (!Node->getFlags().hasNoNaNs()) { 6259 // Insert canonicalizes if it's possible we need to quiet to get correct 6260 // sNaN behavior. 6261 if (!DAG.isKnownNeverSNaN(Quiet0)) { 6262 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 6263 Node->getFlags()); 6264 } 6265 if (!DAG.isKnownNeverSNaN(Quiet1)) { 6266 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 6267 Node->getFlags()); 6268 } 6269 } 6270 6271 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 6272 } 6273 6274 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 6275 // instead if there are no NaNs. 6276 if (Node->getFlags().hasNoNaNs()) { 6277 unsigned IEEE2018Op = 6278 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 6279 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 6280 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 6281 Node->getOperand(1), Node->getFlags()); 6282 } 6283 } 6284 6285 // If none of the above worked, but there are no NaNs, then expand to 6286 // a compare/select sequence. This is required for correctness since 6287 // InstCombine might have canonicalized a fcmp+select sequence to a 6288 // FMINNUM/FMAXNUM node. If we were to fall through to the default 6289 // expansion to libcall, we might introduce a link-time dependency 6290 // on libm into a file that originally did not have one. 6291 if (Node->getFlags().hasNoNaNs()) { 6292 ISD::CondCode Pred = 6293 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 6294 SDValue Op1 = Node->getOperand(0); 6295 SDValue Op2 = Node->getOperand(1); 6296 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 6297 // Copy FMF flags, but always set the no-signed-zeros flag 6298 // as this is implied by the FMINNUM/FMAXNUM semantics. 6299 SDNodeFlags Flags = Node->getFlags(); 6300 Flags.setNoSignedZeros(true); 6301 SelCC->setFlags(Flags); 6302 return SelCC; 6303 } 6304 6305 return SDValue(); 6306 } 6307 6308 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result, 6309 SelectionDAG &DAG) const { 6310 SDLoc dl(Node); 6311 EVT VT = Node->getValueType(0); 6312 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6313 SDValue Op = Node->getOperand(0); 6314 unsigned Len = VT.getScalarSizeInBits(); 6315 assert(VT.isInteger() && "CTPOP not implemented for this type."); 6316 6317 // TODO: Add support for irregular type lengths. 6318 if (!(Len <= 128 && Len % 8 == 0)) 6319 return false; 6320 6321 // Only expand vector types if we have the appropriate vector bit operations. 6322 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) || 6323 !isOperationLegalOrCustom(ISD::SUB, VT) || 6324 !isOperationLegalOrCustom(ISD::SRL, VT) || 6325 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) || 6326 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6327 return false; 6328 6329 // This is the "best" algorithm from 6330 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 6331 SDValue Mask55 = 6332 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 6333 SDValue Mask33 = 6334 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 6335 SDValue Mask0F = 6336 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 6337 SDValue Mask01 = 6338 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 6339 6340 // v = v - ((v >> 1) & 0x55555555...) 6341 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 6342 DAG.getNode(ISD::AND, dl, VT, 6343 DAG.getNode(ISD::SRL, dl, VT, Op, 6344 DAG.getConstant(1, dl, ShVT)), 6345 Mask55)); 6346 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 6347 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 6348 DAG.getNode(ISD::AND, dl, VT, 6349 DAG.getNode(ISD::SRL, dl, VT, Op, 6350 DAG.getConstant(2, dl, ShVT)), 6351 Mask33)); 6352 // v = (v + (v >> 4)) & 0x0F0F0F0F... 6353 Op = DAG.getNode(ISD::AND, dl, VT, 6354 DAG.getNode(ISD::ADD, dl, VT, Op, 6355 DAG.getNode(ISD::SRL, dl, VT, Op, 6356 DAG.getConstant(4, dl, ShVT))), 6357 Mask0F); 6358 // v = (v * 0x01010101...) >> (Len - 8) 6359 if (Len > 8) 6360 Op = 6361 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 6362 DAG.getConstant(Len - 8, dl, ShVT)); 6363 6364 Result = Op; 6365 return true; 6366 } 6367 6368 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result, 6369 SelectionDAG &DAG) const { 6370 SDLoc dl(Node); 6371 EVT VT = Node->getValueType(0); 6372 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6373 SDValue Op = Node->getOperand(0); 6374 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6375 6376 // If the non-ZERO_UNDEF version is supported we can use that instead. 6377 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 6378 isOperationLegalOrCustom(ISD::CTLZ, VT)) { 6379 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op); 6380 return true; 6381 } 6382 6383 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6384 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 6385 EVT SetCCVT = 6386 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6387 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 6388 SDValue Zero = DAG.getConstant(0, dl, VT); 6389 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6390 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6391 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 6392 return true; 6393 } 6394 6395 // Only expand vector types if we have the appropriate vector bit operations. 6396 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6397 !isOperationLegalOrCustom(ISD::CTPOP, VT) || 6398 !isOperationLegalOrCustom(ISD::SRL, VT) || 6399 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6400 return false; 6401 6402 // for now, we do this: 6403 // x = x | (x >> 1); 6404 // x = x | (x >> 2); 6405 // ... 6406 // x = x | (x >>16); 6407 // x = x | (x >>32); // for 64-bit input 6408 // return popcount(~x); 6409 // 6410 // Ref: "Hacker's Delight" by Henry Warren 6411 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 6412 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 6413 Op = DAG.getNode(ISD::OR, dl, VT, Op, 6414 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 6415 } 6416 Op = DAG.getNOT(dl, Op, VT); 6417 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op); 6418 return true; 6419 } 6420 6421 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result, 6422 SelectionDAG &DAG) const { 6423 SDLoc dl(Node); 6424 EVT VT = Node->getValueType(0); 6425 SDValue Op = Node->getOperand(0); 6426 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6427 6428 // If the non-ZERO_UNDEF version is supported we can use that instead. 6429 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 6430 isOperationLegalOrCustom(ISD::CTTZ, VT)) { 6431 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op); 6432 return true; 6433 } 6434 6435 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6436 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 6437 EVT SetCCVT = 6438 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6439 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 6440 SDValue Zero = DAG.getConstant(0, dl, VT); 6441 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6442 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6443 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 6444 return true; 6445 } 6446 6447 // Only expand vector types if we have the appropriate vector bit operations. 6448 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6449 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 6450 !isOperationLegalOrCustom(ISD::CTLZ, VT)) || 6451 !isOperationLegalOrCustom(ISD::SUB, VT) || 6452 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 6453 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6454 return false; 6455 6456 // for now, we use: { return popcount(~x & (x - 1)); } 6457 // unless the target has ctlz but not ctpop, in which case we use: 6458 // { return 32 - nlz(~x & (x-1)); } 6459 // Ref: "Hacker's Delight" by Henry Warren 6460 SDValue Tmp = DAG.getNode( 6461 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 6462 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 6463 6464 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 6465 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 6466 Result = 6467 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 6468 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 6469 return true; 6470 } 6471 6472 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 6473 return true; 6474 } 6475 6476 bool TargetLowering::expandABS(SDNode *N, SDValue &Result, 6477 SelectionDAG &DAG) const { 6478 SDLoc dl(N); 6479 EVT VT = N->getValueType(0); 6480 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6481 SDValue Op = N->getOperand(0); 6482 6483 // Only expand vector types if we have the appropriate vector operations. 6484 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) || 6485 !isOperationLegalOrCustom(ISD::ADD, VT) || 6486 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6487 return false; 6488 6489 SDValue Shift = 6490 DAG.getNode(ISD::SRA, dl, VT, Op, 6491 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 6492 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 6493 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 6494 return true; 6495 } 6496 6497 std::pair<SDValue, SDValue> 6498 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 6499 SelectionDAG &DAG) const { 6500 SDLoc SL(LD); 6501 SDValue Chain = LD->getChain(); 6502 SDValue BasePTR = LD->getBasePtr(); 6503 EVT SrcVT = LD->getMemoryVT(); 6504 ISD::LoadExtType ExtType = LD->getExtensionType(); 6505 6506 unsigned NumElem = SrcVT.getVectorNumElements(); 6507 6508 EVT SrcEltVT = SrcVT.getScalarType(); 6509 EVT DstEltVT = LD->getValueType(0).getScalarType(); 6510 6511 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 6512 assert(SrcEltVT.isByteSized()); 6513 6514 SmallVector<SDValue, 8> Vals; 6515 SmallVector<SDValue, 8> LoadChains; 6516 6517 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6518 SDValue ScalarLoad = 6519 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 6520 LD->getPointerInfo().getWithOffset(Idx * Stride), 6521 SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride), 6522 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6523 6524 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, Stride); 6525 6526 Vals.push_back(ScalarLoad.getValue(0)); 6527 LoadChains.push_back(ScalarLoad.getValue(1)); 6528 } 6529 6530 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 6531 SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals); 6532 6533 return std::make_pair(Value, NewChain); 6534 } 6535 6536 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 6537 SelectionDAG &DAG) const { 6538 SDLoc SL(ST); 6539 6540 SDValue Chain = ST->getChain(); 6541 SDValue BasePtr = ST->getBasePtr(); 6542 SDValue Value = ST->getValue(); 6543 EVT StVT = ST->getMemoryVT(); 6544 6545 // The type of the data we want to save 6546 EVT RegVT = Value.getValueType(); 6547 EVT RegSclVT = RegVT.getScalarType(); 6548 6549 // The type of data as saved in memory. 6550 EVT MemSclVT = StVT.getScalarType(); 6551 6552 unsigned NumElem = StVT.getVectorNumElements(); 6553 6554 // A vector must always be stored in memory as-is, i.e. without any padding 6555 // between the elements, since various code depend on it, e.g. in the 6556 // handling of a bitcast of a vector type to int, which may be done with a 6557 // vector store followed by an integer load. A vector that does not have 6558 // elements that are byte-sized must therefore be stored as an integer 6559 // built out of the extracted vector elements. 6560 if (!MemSclVT.isByteSized()) { 6561 unsigned NumBits = StVT.getSizeInBits(); 6562 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 6563 6564 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 6565 6566 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6567 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6568 DAG.getVectorIdxConstant(Idx, SL)); 6569 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 6570 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 6571 unsigned ShiftIntoIdx = 6572 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6573 SDValue ShiftAmount = 6574 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 6575 SDValue ShiftedElt = 6576 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 6577 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 6578 } 6579 6580 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 6581 ST->getAlignment(), ST->getMemOperand()->getFlags(), 6582 ST->getAAInfo()); 6583 } 6584 6585 // Store Stride in bytes 6586 unsigned Stride = MemSclVT.getSizeInBits() / 8; 6587 assert(Stride && "Zero stride!"); 6588 // Extract each of the elements from the original vector and save them into 6589 // memory individually. 6590 SmallVector<SDValue, 8> Stores; 6591 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6592 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6593 DAG.getVectorIdxConstant(Idx, SL)); 6594 6595 SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride); 6596 6597 // This scalar TruncStore may be illegal, but we legalize it later. 6598 SDValue Store = DAG.getTruncStore( 6599 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 6600 MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride), 6601 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 6602 6603 Stores.push_back(Store); 6604 } 6605 6606 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 6607 } 6608 6609 std::pair<SDValue, SDValue> 6610 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 6611 assert(LD->getAddressingMode() == ISD::UNINDEXED && 6612 "unaligned indexed loads not implemented!"); 6613 SDValue Chain = LD->getChain(); 6614 SDValue Ptr = LD->getBasePtr(); 6615 EVT VT = LD->getValueType(0); 6616 EVT LoadedVT = LD->getMemoryVT(); 6617 SDLoc dl(LD); 6618 auto &MF = DAG.getMachineFunction(); 6619 6620 if (VT.isFloatingPoint() || VT.isVector()) { 6621 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 6622 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 6623 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 6624 LoadedVT.isVector()) { 6625 // Scalarize the load and let the individual components be handled. 6626 return scalarizeVectorLoad(LD, DAG); 6627 } 6628 6629 // Expand to a (misaligned) integer load of the same size, 6630 // then bitconvert to floating point or vector. 6631 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 6632 LD->getMemOperand()); 6633 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 6634 if (LoadedVT != VT) 6635 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 6636 ISD::ANY_EXTEND, dl, VT, Result); 6637 6638 return std::make_pair(Result, newLoad.getValue(1)); 6639 } 6640 6641 // Copy the value to a (aligned) stack slot using (unaligned) integer 6642 // loads and stores, then do a (aligned) load from the stack slot. 6643 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 6644 unsigned LoadedBytes = LoadedVT.getStoreSize(); 6645 unsigned RegBytes = RegVT.getSizeInBits() / 8; 6646 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 6647 6648 // Make sure the stack slot is also aligned for the register type. 6649 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 6650 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 6651 SmallVector<SDValue, 8> Stores; 6652 SDValue StackPtr = StackBase; 6653 unsigned Offset = 0; 6654 6655 EVT PtrVT = Ptr.getValueType(); 6656 EVT StackPtrVT = StackPtr.getValueType(); 6657 6658 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 6659 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 6660 6661 // Do all but one copies using the full register width. 6662 for (unsigned i = 1; i < NumRegs; i++) { 6663 // Load one integer register's worth from the original location. 6664 SDValue Load = DAG.getLoad( 6665 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 6666 MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(), 6667 LD->getAAInfo()); 6668 // Follow the load with a store to the stack slot. Remember the store. 6669 Stores.push_back(DAG.getStore( 6670 Load.getValue(1), dl, Load, StackPtr, 6671 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 6672 // Increment the pointers. 6673 Offset += RegBytes; 6674 6675 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 6676 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 6677 } 6678 6679 // The last copy may be partial. Do an extending load. 6680 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 6681 8 * (LoadedBytes - Offset)); 6682 SDValue Load = 6683 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 6684 LD->getPointerInfo().getWithOffset(Offset), MemVT, 6685 MinAlign(LD->getAlignment(), Offset), 6686 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6687 // Follow the load with a store to the stack slot. Remember the store. 6688 // On big-endian machines this requires a truncating store to ensure 6689 // that the bits end up in the right place. 6690 Stores.push_back(DAG.getTruncStore( 6691 Load.getValue(1), dl, Load, StackPtr, 6692 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 6693 6694 // The order of the stores doesn't matter - say it with a TokenFactor. 6695 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6696 6697 // Finally, perform the original load only redirected to the stack slot. 6698 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 6699 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 6700 LoadedVT); 6701 6702 // Callers expect a MERGE_VALUES node. 6703 return std::make_pair(Load, TF); 6704 } 6705 6706 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 6707 "Unaligned load of unsupported type."); 6708 6709 // Compute the new VT that is half the size of the old one. This is an 6710 // integer MVT. 6711 unsigned NumBits = LoadedVT.getSizeInBits(); 6712 EVT NewLoadedVT; 6713 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 6714 NumBits >>= 1; 6715 6716 unsigned Alignment = LD->getAlignment(); 6717 unsigned IncrementSize = NumBits / 8; 6718 ISD::LoadExtType HiExtType = LD->getExtensionType(); 6719 6720 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 6721 if (HiExtType == ISD::NON_EXTLOAD) 6722 HiExtType = ISD::ZEXTLOAD; 6723 6724 // Load the value in two parts 6725 SDValue Lo, Hi; 6726 if (DAG.getDataLayout().isLittleEndian()) { 6727 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 6728 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 6729 LD->getAAInfo()); 6730 6731 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 6732 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 6733 LD->getPointerInfo().getWithOffset(IncrementSize), 6734 NewLoadedVT, MinAlign(Alignment, IncrementSize), 6735 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6736 } else { 6737 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 6738 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 6739 LD->getAAInfo()); 6740 6741 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 6742 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 6743 LD->getPointerInfo().getWithOffset(IncrementSize), 6744 NewLoadedVT, MinAlign(Alignment, IncrementSize), 6745 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6746 } 6747 6748 // aggregate the two parts 6749 SDValue ShiftAmount = 6750 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 6751 DAG.getDataLayout())); 6752 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 6753 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 6754 6755 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 6756 Hi.getValue(1)); 6757 6758 return std::make_pair(Result, TF); 6759 } 6760 6761 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 6762 SelectionDAG &DAG) const { 6763 assert(ST->getAddressingMode() == ISD::UNINDEXED && 6764 "unaligned indexed stores not implemented!"); 6765 SDValue Chain = ST->getChain(); 6766 SDValue Ptr = ST->getBasePtr(); 6767 SDValue Val = ST->getValue(); 6768 EVT VT = Val.getValueType(); 6769 int Alignment = ST->getAlignment(); 6770 auto &MF = DAG.getMachineFunction(); 6771 EVT StoreMemVT = ST->getMemoryVT(); 6772 6773 SDLoc dl(ST); 6774 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 6775 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 6776 if (isTypeLegal(intVT)) { 6777 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 6778 StoreMemVT.isVector()) { 6779 // Scalarize the store and let the individual components be handled. 6780 SDValue Result = scalarizeVectorStore(ST, DAG); 6781 return Result; 6782 } 6783 // Expand to a bitconvert of the value to the integer type of the 6784 // same size, then a (misaligned) int store. 6785 // FIXME: Does not handle truncating floating point stores! 6786 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 6787 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 6788 Alignment, ST->getMemOperand()->getFlags()); 6789 return Result; 6790 } 6791 // Do a (aligned) store to a stack slot, then copy from the stack slot 6792 // to the final destination using (unaligned) integer loads and stores. 6793 MVT RegVT = getRegisterType( 6794 *DAG.getContext(), 6795 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 6796 EVT PtrVT = Ptr.getValueType(); 6797 unsigned StoredBytes = StoreMemVT.getStoreSize(); 6798 unsigned RegBytes = RegVT.getSizeInBits() / 8; 6799 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 6800 6801 // Make sure the stack slot is also aligned for the register type. 6802 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 6803 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 6804 6805 // Perform the original store, only redirected to the stack slot. 6806 SDValue Store = DAG.getTruncStore( 6807 Chain, dl, Val, StackPtr, 6808 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 6809 6810 EVT StackPtrVT = StackPtr.getValueType(); 6811 6812 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 6813 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 6814 SmallVector<SDValue, 8> Stores; 6815 unsigned Offset = 0; 6816 6817 // Do all but one copies using the full register width. 6818 for (unsigned i = 1; i < NumRegs; i++) { 6819 // Load one integer register's worth from the stack slot. 6820 SDValue Load = DAG.getLoad( 6821 RegVT, dl, Store, StackPtr, 6822 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 6823 // Store it to the final location. Remember the store. 6824 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 6825 ST->getPointerInfo().getWithOffset(Offset), 6826 MinAlign(ST->getAlignment(), Offset), 6827 ST->getMemOperand()->getFlags())); 6828 // Increment the pointers. 6829 Offset += RegBytes; 6830 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 6831 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 6832 } 6833 6834 // The last store may be partial. Do a truncating store. On big-endian 6835 // machines this requires an extending load from the stack slot to ensure 6836 // that the bits are in the right place. 6837 EVT LoadMemVT = 6838 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 6839 6840 // Load from the stack slot. 6841 SDValue Load = DAG.getExtLoad( 6842 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 6843 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 6844 6845 Stores.push_back( 6846 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 6847 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 6848 MinAlign(ST->getAlignment(), Offset), 6849 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 6850 // The order of the stores doesn't matter - say it with a TokenFactor. 6851 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6852 return Result; 6853 } 6854 6855 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 6856 "Unaligned store of unknown type."); 6857 // Get the half-size VT 6858 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 6859 int NumBits = NewStoredVT.getSizeInBits(); 6860 int IncrementSize = NumBits / 8; 6861 6862 // Divide the stored value in two parts. 6863 SDValue ShiftAmount = DAG.getConstant( 6864 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 6865 SDValue Lo = Val; 6866 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 6867 6868 // Store the two parts 6869 SDValue Store1, Store2; 6870 Store1 = DAG.getTruncStore(Chain, dl, 6871 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 6872 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 6873 ST->getMemOperand()->getFlags()); 6874 6875 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 6876 Alignment = MinAlign(Alignment, IncrementSize); 6877 Store2 = DAG.getTruncStore( 6878 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 6879 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 6880 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 6881 6882 SDValue Result = 6883 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 6884 return Result; 6885 } 6886 6887 SDValue 6888 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 6889 const SDLoc &DL, EVT DataVT, 6890 SelectionDAG &DAG, 6891 bool IsCompressedMemory) const { 6892 SDValue Increment; 6893 EVT AddrVT = Addr.getValueType(); 6894 EVT MaskVT = Mask.getValueType(); 6895 assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() && 6896 "Incompatible types of Data and Mask"); 6897 if (IsCompressedMemory) { 6898 // Incrementing the pointer according to number of '1's in the mask. 6899 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 6900 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 6901 if (MaskIntVT.getSizeInBits() < 32) { 6902 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 6903 MaskIntVT = MVT::i32; 6904 } 6905 6906 // Count '1's with POPCNT. 6907 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 6908 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 6909 // Scale is an element size in bytes. 6910 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 6911 AddrVT); 6912 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 6913 } else 6914 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 6915 6916 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 6917 } 6918 6919 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, 6920 SDValue Idx, 6921 EVT VecVT, 6922 const SDLoc &dl) { 6923 if (isa<ConstantSDNode>(Idx)) 6924 return Idx; 6925 6926 EVT IdxVT = Idx.getValueType(); 6927 unsigned NElts = VecVT.getVectorNumElements(); 6928 if (isPowerOf2_32(NElts)) { 6929 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), 6930 Log2_32(NElts)); 6931 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 6932 DAG.getConstant(Imm, dl, IdxVT)); 6933 } 6934 6935 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 6936 DAG.getConstant(NElts - 1, dl, IdxVT)); 6937 } 6938 6939 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 6940 SDValue VecPtr, EVT VecVT, 6941 SDValue Index) const { 6942 SDLoc dl(Index); 6943 // Make sure the index type is big enough to compute in. 6944 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 6945 6946 EVT EltVT = VecVT.getVectorElementType(); 6947 6948 // Calculate the element offset and add it to the pointer. 6949 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size. 6950 assert(EltSize * 8 == EltVT.getSizeInBits() && 6951 "Converting bits to bytes lost precision"); 6952 6953 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl); 6954 6955 EVT IdxVT = Index.getValueType(); 6956 6957 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 6958 DAG.getConstant(EltSize, dl, IdxVT)); 6959 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 6960 } 6961 6962 //===----------------------------------------------------------------------===// 6963 // Implementation of Emulated TLS Model 6964 //===----------------------------------------------------------------------===// 6965 6966 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 6967 SelectionDAG &DAG) const { 6968 // Access to address of TLS varialbe xyz is lowered to a function call: 6969 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 6970 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6971 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 6972 SDLoc dl(GA); 6973 6974 ArgListTy Args; 6975 ArgListEntry Entry; 6976 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 6977 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 6978 StringRef EmuTlsVarName(NameString); 6979 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 6980 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 6981 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 6982 Entry.Ty = VoidPtrType; 6983 Args.push_back(Entry); 6984 6985 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 6986 6987 TargetLowering::CallLoweringInfo CLI(DAG); 6988 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 6989 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 6990 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 6991 6992 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 6993 // At last for X86 targets, maybe good for other targets too? 6994 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 6995 MFI.setAdjustsStack(true); // Is this only for X86 target? 6996 MFI.setHasCalls(true); 6997 6998 assert((GA->getOffset() == 0) && 6999 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7000 return CallResult.first; 7001 } 7002 7003 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 7004 SelectionDAG &DAG) const { 7005 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 7006 if (!isCtlzFast()) 7007 return SDValue(); 7008 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7009 SDLoc dl(Op); 7010 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 7011 if (C->isNullValue() && CC == ISD::SETEQ) { 7012 EVT VT = Op.getOperand(0).getValueType(); 7013 SDValue Zext = Op.getOperand(0); 7014 if (VT.bitsLT(MVT::i32)) { 7015 VT = MVT::i32; 7016 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 7017 } 7018 unsigned Log2b = Log2_32(VT.getSizeInBits()); 7019 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 7020 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 7021 DAG.getConstant(Log2b, dl, MVT::i32)); 7022 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 7023 } 7024 } 7025 return SDValue(); 7026 } 7027 7028 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 7029 unsigned Opcode = Node->getOpcode(); 7030 SDValue LHS = Node->getOperand(0); 7031 SDValue RHS = Node->getOperand(1); 7032 EVT VT = LHS.getValueType(); 7033 SDLoc dl(Node); 7034 7035 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7036 assert(VT.isInteger() && "Expected operands to be integers"); 7037 7038 // usub.sat(a, b) -> umax(a, b) - b 7039 if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) { 7040 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 7041 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 7042 } 7043 7044 if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) { 7045 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 7046 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 7047 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 7048 } 7049 7050 unsigned OverflowOp; 7051 switch (Opcode) { 7052 case ISD::SADDSAT: 7053 OverflowOp = ISD::SADDO; 7054 break; 7055 case ISD::UADDSAT: 7056 OverflowOp = ISD::UADDO; 7057 break; 7058 case ISD::SSUBSAT: 7059 OverflowOp = ISD::SSUBO; 7060 break; 7061 case ISD::USUBSAT: 7062 OverflowOp = ISD::USUBO; 7063 break; 7064 default: 7065 llvm_unreachable("Expected method to receive signed or unsigned saturation " 7066 "addition or subtraction node."); 7067 } 7068 7069 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 7070 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7071 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), 7072 LHS, RHS); 7073 SDValue SumDiff = Result.getValue(0); 7074 SDValue Overflow = Result.getValue(1); 7075 SDValue Zero = DAG.getConstant(0, dl, VT); 7076 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 7077 7078 if (Opcode == ISD::UADDSAT) { 7079 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7080 // (LHS + RHS) | OverflowMask 7081 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7082 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 7083 } 7084 // Overflow ? 0xffff.... : (LHS + RHS) 7085 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 7086 } else if (Opcode == ISD::USUBSAT) { 7087 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7088 // (LHS - RHS) & ~OverflowMask 7089 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7090 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 7091 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 7092 } 7093 // Overflow ? 0 : (LHS - RHS) 7094 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 7095 } else { 7096 // SatMax -> Overflow && SumDiff < 0 7097 // SatMin -> Overflow && SumDiff >= 0 7098 APInt MinVal = APInt::getSignedMinValue(BitWidth); 7099 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 7100 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7101 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7102 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); 7103 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin); 7104 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 7105 } 7106 } 7107 7108 SDValue 7109 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 7110 assert((Node->getOpcode() == ISD::SMULFIX || 7111 Node->getOpcode() == ISD::UMULFIX || 7112 Node->getOpcode() == ISD::SMULFIXSAT || 7113 Node->getOpcode() == ISD::UMULFIXSAT) && 7114 "Expected a fixed point multiplication opcode"); 7115 7116 SDLoc dl(Node); 7117 SDValue LHS = Node->getOperand(0); 7118 SDValue RHS = Node->getOperand(1); 7119 EVT VT = LHS.getValueType(); 7120 unsigned Scale = Node->getConstantOperandVal(2); 7121 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 7122 Node->getOpcode() == ISD::UMULFIXSAT); 7123 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 7124 Node->getOpcode() == ISD::SMULFIXSAT); 7125 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7126 unsigned VTSize = VT.getScalarSizeInBits(); 7127 7128 if (!Scale) { 7129 // [us]mul.fix(a, b, 0) -> mul(a, b) 7130 if (!Saturating) { 7131 if (isOperationLegalOrCustom(ISD::MUL, VT)) 7132 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7133 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 7134 SDValue Result = 7135 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7136 SDValue Product = Result.getValue(0); 7137 SDValue Overflow = Result.getValue(1); 7138 SDValue Zero = DAG.getConstant(0, dl, VT); 7139 7140 APInt MinVal = APInt::getSignedMinValue(VTSize); 7141 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 7142 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7143 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7144 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT); 7145 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin); 7146 return DAG.getSelect(dl, VT, Overflow, Result, Product); 7147 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 7148 SDValue Result = 7149 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7150 SDValue Product = Result.getValue(0); 7151 SDValue Overflow = Result.getValue(1); 7152 7153 APInt MaxVal = APInt::getMaxValue(VTSize); 7154 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7155 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 7156 } 7157 } 7158 7159 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 7160 "Expected scale to be less than the number of bits if signed or at " 7161 "most the number of bits if unsigned."); 7162 assert(LHS.getValueType() == RHS.getValueType() && 7163 "Expected both operands to be the same type"); 7164 7165 // Get the upper and lower bits of the result. 7166 SDValue Lo, Hi; 7167 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 7168 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 7169 if (isOperationLegalOrCustom(LoHiOp, VT)) { 7170 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 7171 Lo = Result.getValue(0); 7172 Hi = Result.getValue(1); 7173 } else if (isOperationLegalOrCustom(HiOp, VT)) { 7174 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7175 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 7176 } else if (VT.isVector()) { 7177 return SDValue(); 7178 } else { 7179 report_fatal_error("Unable to expand fixed point multiplication."); 7180 } 7181 7182 if (Scale == VTSize) 7183 // Result is just the top half since we'd be shifting by the width of the 7184 // operand. Overflow impossible so this works for both UMULFIX and 7185 // UMULFIXSAT. 7186 return Hi; 7187 7188 // The result will need to be shifted right by the scale since both operands 7189 // are scaled. The result is given to us in 2 halves, so we only want part of 7190 // both in the result. 7191 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7192 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 7193 DAG.getConstant(Scale, dl, ShiftTy)); 7194 if (!Saturating) 7195 return Result; 7196 7197 if (!Signed) { 7198 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 7199 // widened multiplication) aren't all zeroes. 7200 7201 // Saturate to max if ((Hi >> Scale) != 0), 7202 // which is the same as if (Hi > ((1 << Scale) - 1)) 7203 APInt MaxVal = APInt::getMaxValue(VTSize); 7204 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 7205 dl, VT); 7206 Result = DAG.getSelectCC(dl, Hi, LowMask, 7207 DAG.getConstant(MaxVal, dl, VT), Result, 7208 ISD::SETUGT); 7209 7210 return Result; 7211 } 7212 7213 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 7214 // widened multiplication) aren't all ones or all zeroes. 7215 7216 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 7217 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 7218 7219 if (Scale == 0) { 7220 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 7221 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 7222 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 7223 // Saturated to SatMin if wide product is negative, and SatMax if wide 7224 // product is positive ... 7225 SDValue Zero = DAG.getConstant(0, dl, VT); 7226 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 7227 ISD::SETLT); 7228 // ... but only if we overflowed. 7229 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 7230 } 7231 7232 // We handled Scale==0 above so all the bits to examine is in Hi. 7233 7234 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 7235 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 7236 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 7237 dl, VT); 7238 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 7239 // Saturate to min if (Hi >> (Scale - 1)) < -1), 7240 // which is the same as if (HI < (-1 << (Scale - 1)) 7241 SDValue HighMask = 7242 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 7243 dl, VT); 7244 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 7245 return Result; 7246 } 7247 7248 SDValue 7249 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 7250 SDValue LHS, SDValue RHS, 7251 unsigned Scale, SelectionDAG &DAG) const { 7252 assert((Opcode == ISD::SDIVFIX || 7253 Opcode == ISD::UDIVFIX) && 7254 "Expected a fixed point division opcode"); 7255 7256 EVT VT = LHS.getValueType(); 7257 bool Signed = Opcode == ISD::SDIVFIX; 7258 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7259 7260 // If there is enough room in the type to upscale the LHS or downscale the 7261 // RHS before the division, we can perform it in this type without having to 7262 // resize. For signed operations, the LHS headroom is the number of 7263 // redundant sign bits, and for unsigned ones it is the number of zeroes. 7264 // The headroom for the RHS is the number of trailing zeroes. 7265 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 7266 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 7267 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 7268 7269 if (LHSLead + RHSTrail < Scale) 7270 return SDValue(); 7271 7272 unsigned LHSShift = std::min(LHSLead, Scale); 7273 unsigned RHSShift = Scale - LHSShift; 7274 7275 // At this point, we know that if we shift the LHS up by LHSShift and the 7276 // RHS down by RHSShift, we can emit a regular division with a final scaling 7277 // factor of Scale. 7278 7279 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7280 if (LHSShift) 7281 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 7282 DAG.getConstant(LHSShift, dl, ShiftTy)); 7283 if (RHSShift) 7284 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 7285 DAG.getConstant(RHSShift, dl, ShiftTy)); 7286 7287 SDValue Quot; 7288 if (Signed) { 7289 // For signed operations, if the resulting quotient is negative and the 7290 // remainder is nonzero, subtract 1 from the quotient to round towards 7291 // negative infinity. 7292 SDValue Rem; 7293 // FIXME: Ideally we would always produce an SDIVREM here, but if the 7294 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 7295 // we couldn't just form a libcall, but the type legalizer doesn't do it. 7296 if (isTypeLegal(VT) && 7297 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 7298 Quot = DAG.getNode(ISD::SDIVREM, dl, 7299 DAG.getVTList(VT, VT), 7300 LHS, RHS); 7301 Rem = Quot.getValue(1); 7302 Quot = Quot.getValue(0); 7303 } else { 7304 Quot = DAG.getNode(ISD::SDIV, dl, VT, 7305 LHS, RHS); 7306 Rem = DAG.getNode(ISD::SREM, dl, VT, 7307 LHS, RHS); 7308 } 7309 SDValue Zero = DAG.getConstant(0, dl, VT); 7310 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 7311 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 7312 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 7313 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 7314 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 7315 DAG.getConstant(1, dl, VT)); 7316 Quot = DAG.getSelect(dl, VT, 7317 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 7318 Sub1, Quot); 7319 } else 7320 Quot = DAG.getNode(ISD::UDIV, dl, VT, 7321 LHS, RHS); 7322 7323 // TODO: Saturation. 7324 7325 return Quot; 7326 } 7327 7328 void TargetLowering::expandUADDSUBO( 7329 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7330 SDLoc dl(Node); 7331 SDValue LHS = Node->getOperand(0); 7332 SDValue RHS = Node->getOperand(1); 7333 bool IsAdd = Node->getOpcode() == ISD::UADDO; 7334 7335 // If ADD/SUBCARRY is legal, use that instead. 7336 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 7337 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 7338 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 7339 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 7340 { LHS, RHS, CarryIn }); 7341 Result = SDValue(NodeCarry.getNode(), 0); 7342 Overflow = SDValue(NodeCarry.getNode(), 1); 7343 return; 7344 } 7345 7346 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7347 LHS.getValueType(), LHS, RHS); 7348 7349 EVT ResultType = Node->getValueType(1); 7350 EVT SetCCType = getSetCCResultType( 7351 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7352 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 7353 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 7354 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7355 } 7356 7357 void TargetLowering::expandSADDSUBO( 7358 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7359 SDLoc dl(Node); 7360 SDValue LHS = Node->getOperand(0); 7361 SDValue RHS = Node->getOperand(1); 7362 bool IsAdd = Node->getOpcode() == ISD::SADDO; 7363 7364 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7365 LHS.getValueType(), LHS, RHS); 7366 7367 EVT ResultType = Node->getValueType(1); 7368 EVT OType = getSetCCResultType( 7369 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7370 7371 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 7372 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 7373 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) { 7374 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 7375 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 7376 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7377 return; 7378 } 7379 7380 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 7381 7382 // For an addition, the result should be less than one of the operands (LHS) 7383 // if and only if the other operand (RHS) is negative, otherwise there will 7384 // be overflow. 7385 // For a subtraction, the result should be less than one of the operands 7386 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 7387 // otherwise there will be overflow. 7388 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 7389 SDValue ConditionRHS = 7390 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 7391 7392 Overflow = DAG.getBoolExtOrTrunc( 7393 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 7394 ResultType, ResultType); 7395 } 7396 7397 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 7398 SDValue &Overflow, SelectionDAG &DAG) const { 7399 SDLoc dl(Node); 7400 EVT VT = Node->getValueType(0); 7401 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7402 SDValue LHS = Node->getOperand(0); 7403 SDValue RHS = Node->getOperand(1); 7404 bool isSigned = Node->getOpcode() == ISD::SMULO; 7405 7406 // For power-of-two multiplications we can use a simpler shift expansion. 7407 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 7408 const APInt &C = RHSC->getAPIntValue(); 7409 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 7410 if (C.isPowerOf2()) { 7411 // smulo(x, signed_min) is same as umulo(x, signed_min). 7412 bool UseArithShift = isSigned && !C.isMinSignedValue(); 7413 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7414 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 7415 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 7416 Overflow = DAG.getSetCC(dl, SetCCVT, 7417 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 7418 dl, VT, Result, ShiftAmt), 7419 LHS, ISD::SETNE); 7420 return true; 7421 } 7422 } 7423 7424 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 7425 if (VT.isVector()) 7426 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 7427 VT.getVectorNumElements()); 7428 7429 SDValue BottomHalf; 7430 SDValue TopHalf; 7431 static const unsigned Ops[2][3] = 7432 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 7433 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 7434 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 7435 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7436 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 7437 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 7438 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 7439 RHS); 7440 TopHalf = BottomHalf.getValue(1); 7441 } else if (isTypeLegal(WideVT)) { 7442 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 7443 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 7444 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 7445 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 7446 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 7447 getShiftAmountTy(WideVT, DAG.getDataLayout())); 7448 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 7449 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 7450 } else { 7451 if (VT.isVector()) 7452 return false; 7453 7454 // We can fall back to a libcall with an illegal type for the MUL if we 7455 // have a libcall big enough. 7456 // Also, we can fall back to a division in some cases, but that's a big 7457 // performance hit in the general case. 7458 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 7459 if (WideVT == MVT::i16) 7460 LC = RTLIB::MUL_I16; 7461 else if (WideVT == MVT::i32) 7462 LC = RTLIB::MUL_I32; 7463 else if (WideVT == MVT::i64) 7464 LC = RTLIB::MUL_I64; 7465 else if (WideVT == MVT::i128) 7466 LC = RTLIB::MUL_I128; 7467 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 7468 7469 SDValue HiLHS; 7470 SDValue HiRHS; 7471 if (isSigned) { 7472 // The high part is obtained by SRA'ing all but one of the bits of low 7473 // part. 7474 unsigned LoSize = VT.getSizeInBits(); 7475 HiLHS = 7476 DAG.getNode(ISD::SRA, dl, VT, LHS, 7477 DAG.getConstant(LoSize - 1, dl, 7478 getPointerTy(DAG.getDataLayout()))); 7479 HiRHS = 7480 DAG.getNode(ISD::SRA, dl, VT, RHS, 7481 DAG.getConstant(LoSize - 1, dl, 7482 getPointerTy(DAG.getDataLayout()))); 7483 } else { 7484 HiLHS = DAG.getConstant(0, dl, VT); 7485 HiRHS = DAG.getConstant(0, dl, VT); 7486 } 7487 7488 // Here we're passing the 2 arguments explicitly as 4 arguments that are 7489 // pre-lowered to the correct types. This all depends upon WideVT not 7490 // being a legal type for the architecture and thus has to be split to 7491 // two arguments. 7492 SDValue Ret; 7493 TargetLowering::MakeLibCallOptions CallOptions; 7494 CallOptions.setSExt(isSigned); 7495 CallOptions.setIsPostTypeLegalization(true); 7496 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 7497 // Halves of WideVT are packed into registers in different order 7498 // depending on platform endianness. This is usually handled by 7499 // the C calling convention, but we can't defer to it in 7500 // the legalizer. 7501 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 7502 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7503 } else { 7504 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 7505 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7506 } 7507 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 7508 "Ret value is a collection of constituent nodes holding result."); 7509 if (DAG.getDataLayout().isLittleEndian()) { 7510 // Same as above. 7511 BottomHalf = Ret.getOperand(0); 7512 TopHalf = Ret.getOperand(1); 7513 } else { 7514 BottomHalf = Ret.getOperand(1); 7515 TopHalf = Ret.getOperand(0); 7516 } 7517 } 7518 7519 Result = BottomHalf; 7520 if (isSigned) { 7521 SDValue ShiftAmt = DAG.getConstant( 7522 VT.getScalarSizeInBits() - 1, dl, 7523 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 7524 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 7525 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 7526 } else { 7527 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 7528 DAG.getConstant(0, dl, VT), ISD::SETNE); 7529 } 7530 7531 // Truncate the result if SetCC returns a larger type than needed. 7532 EVT RType = Node->getValueType(1); 7533 if (RType.getSizeInBits() < Overflow.getValueSizeInBits()) 7534 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 7535 7536 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 7537 "Unexpected result type for S/UMULO legalization"); 7538 return true; 7539 } 7540 7541 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 7542 SDLoc dl(Node); 7543 bool NoNaN = Node->getFlags().hasNoNaNs(); 7544 unsigned BaseOpcode = 0; 7545 switch (Node->getOpcode()) { 7546 default: llvm_unreachable("Expected VECREDUCE opcode"); 7547 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 7548 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 7549 case ISD::VECREDUCE_ADD: BaseOpcode = ISD::ADD; break; 7550 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 7551 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 7552 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 7553 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 7554 case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break; 7555 case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break; 7556 case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break; 7557 case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break; 7558 case ISD::VECREDUCE_FMAX: 7559 BaseOpcode = NoNaN ? ISD::FMAXNUM : ISD::FMAXIMUM; 7560 break; 7561 case ISD::VECREDUCE_FMIN: 7562 BaseOpcode = NoNaN ? ISD::FMINNUM : ISD::FMINIMUM; 7563 break; 7564 } 7565 7566 SDValue Op = Node->getOperand(0); 7567 EVT VT = Op.getValueType(); 7568 7569 // Try to use a shuffle reduction for power of two vectors. 7570 if (VT.isPow2VectorType()) { 7571 while (VT.getVectorNumElements() > 1) { 7572 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 7573 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 7574 break; 7575 7576 SDValue Lo, Hi; 7577 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 7578 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 7579 VT = HalfVT; 7580 } 7581 } 7582 7583 EVT EltVT = VT.getVectorElementType(); 7584 unsigned NumElts = VT.getVectorNumElements(); 7585 7586 SmallVector<SDValue, 8> Ops; 7587 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 7588 7589 SDValue Res = Ops[0]; 7590 for (unsigned i = 1; i < NumElts; i++) 7591 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 7592 7593 // Result type may be wider than element type. 7594 if (EltVT != Node->getValueType(0)) 7595 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 7596 return Res; 7597 } 7598