1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/DivisionByConstantInfo.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/KnownBits.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetLoweringObjectFile.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include <cctype> 36 using namespace llvm; 37 38 /// NOTE: The TargetMachine owns TLOF. 39 TargetLowering::TargetLowering(const TargetMachine &tm) 40 : TargetLoweringBase(tm) {} 41 42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 43 return nullptr; 44 } 45 46 bool TargetLowering::isPositionIndependent() const { 47 return getTargetMachine().isPositionIndependent(); 48 } 49 50 /// Check whether a given call node is in tail position within its function. If 51 /// so, it sets Chain to the input chain of the tail call. 52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 53 SDValue &Chain) const { 54 const Function &F = DAG.getMachineFunction().getFunction(); 55 56 // First, check if tail calls have been disabled in this function. 57 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 58 return false; 59 60 // Conservatively require the attributes of the call to match those of 61 // the return. Ignore following attributes because they don't affect the 62 // call sequence. 63 AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs()); 64 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable, 65 Attribute::DereferenceableOrNull, Attribute::NoAlias, 66 Attribute::NonNull}) 67 CallerAttrs.removeAttribute(Attr); 68 69 if (CallerAttrs.hasAttributes()) 70 return false; 71 72 // It's not safe to eliminate the sign / zero extension of the return value. 73 if (CallerAttrs.contains(Attribute::ZExt) || 74 CallerAttrs.contains(Attribute::SExt)) 75 return false; 76 77 // Check if the only use is a function return node. 78 return isUsedByReturnOnly(Node, Chain); 79 } 80 81 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 82 const uint32_t *CallerPreservedMask, 83 const SmallVectorImpl<CCValAssign> &ArgLocs, 84 const SmallVectorImpl<SDValue> &OutVals) const { 85 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 86 const CCValAssign &ArgLoc = ArgLocs[I]; 87 if (!ArgLoc.isRegLoc()) 88 continue; 89 MCRegister Reg = ArgLoc.getLocReg(); 90 // Only look at callee saved registers. 91 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 92 continue; 93 // Check that we pass the value used for the caller. 94 // (We look for a CopyFromReg reading a virtual register that is used 95 // for the function live-in value of register Reg) 96 SDValue Value = OutVals[I]; 97 if (Value->getOpcode() != ISD::CopyFromReg) 98 return false; 99 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 100 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 101 return false; 102 } 103 return true; 104 } 105 106 /// Set CallLoweringInfo attribute flags based on a call instruction 107 /// and called function attributes. 108 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 109 unsigned ArgIdx) { 110 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 111 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 112 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 113 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 114 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 115 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 116 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 117 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 118 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 119 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 120 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync); 121 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 122 Alignment = Call->getParamStackAlign(ArgIdx); 123 IndirectType = nullptr; 124 assert(IsByVal + IsPreallocated + IsInAlloca <= 1 && 125 "multiple ABI attributes?"); 126 if (IsByVal) { 127 IndirectType = Call->getParamByValType(ArgIdx); 128 if (!Alignment) 129 Alignment = Call->getParamAlign(ArgIdx); 130 } 131 if (IsPreallocated) 132 IndirectType = Call->getParamPreallocatedType(ArgIdx); 133 if (IsInAlloca) 134 IndirectType = Call->getParamInAllocaType(ArgIdx); 135 } 136 137 /// Generate a libcall taking the given operands as arguments and returning a 138 /// result of type RetVT. 139 std::pair<SDValue, SDValue> 140 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 141 ArrayRef<SDValue> Ops, 142 MakeLibCallOptions CallOptions, 143 const SDLoc &dl, 144 SDValue InChain) const { 145 if (!InChain) 146 InChain = DAG.getEntryNode(); 147 148 TargetLowering::ArgListTy Args; 149 Args.reserve(Ops.size()); 150 151 TargetLowering::ArgListEntry Entry; 152 for (unsigned i = 0; i < Ops.size(); ++i) { 153 SDValue NewOp = Ops[i]; 154 Entry.Node = NewOp; 155 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 156 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 157 CallOptions.IsSExt); 158 Entry.IsZExt = !Entry.IsSExt; 159 160 if (CallOptions.IsSoften && 161 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 162 Entry.IsSExt = Entry.IsZExt = false; 163 } 164 Args.push_back(Entry); 165 } 166 167 if (LC == RTLIB::UNKNOWN_LIBCALL) 168 report_fatal_error("Unsupported library call operation!"); 169 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 170 getPointerTy(DAG.getDataLayout())); 171 172 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 173 TargetLowering::CallLoweringInfo CLI(DAG); 174 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 175 bool zeroExtend = !signExtend; 176 177 if (CallOptions.IsSoften && 178 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 179 signExtend = zeroExtend = false; 180 } 181 182 CLI.setDebugLoc(dl) 183 .setChain(InChain) 184 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 185 .setNoReturn(CallOptions.DoesNotReturn) 186 .setDiscardResult(!CallOptions.IsReturnValueUsed) 187 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 188 .setSExtResult(signExtend) 189 .setZExtResult(zeroExtend); 190 return LowerCallTo(CLI); 191 } 192 193 bool TargetLowering::findOptimalMemOpLowering( 194 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 195 unsigned SrcAS, const AttributeList &FuncAttributes) const { 196 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 197 return false; 198 199 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 200 201 if (VT == MVT::Other) { 202 // Use the largest integer type whose alignment constraints are satisfied. 203 // We only need to check DstAlign here as SrcAlign is always greater or 204 // equal to DstAlign (or zero). 205 VT = MVT::i64; 206 if (Op.isFixedDstAlign()) 207 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && 208 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) 209 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 210 assert(VT.isInteger()); 211 212 // Find the largest legal integer type. 213 MVT LVT = MVT::i64; 214 while (!isTypeLegal(LVT)) 215 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 216 assert(LVT.isInteger()); 217 218 // If the type we've chosen is larger than the largest legal integer type 219 // then use that instead. 220 if (VT.bitsGT(LVT)) 221 VT = LVT; 222 } 223 224 unsigned NumMemOps = 0; 225 uint64_t Size = Op.size(); 226 while (Size) { 227 unsigned VTSize = VT.getSizeInBits() / 8; 228 while (VTSize > Size) { 229 // For now, only use non-vector load / store's for the left-over pieces. 230 EVT NewVT = VT; 231 unsigned NewVTSize; 232 233 bool Found = false; 234 if (VT.isVector() || VT.isFloatingPoint()) { 235 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 236 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 237 isSafeMemOpType(NewVT.getSimpleVT())) 238 Found = true; 239 else if (NewVT == MVT::i64 && 240 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 241 isSafeMemOpType(MVT::f64)) { 242 // i64 is usually not legal on 32-bit targets, but f64 may be. 243 NewVT = MVT::f64; 244 Found = true; 245 } 246 } 247 248 if (!Found) { 249 do { 250 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 251 if (NewVT == MVT::i8) 252 break; 253 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 254 } 255 NewVTSize = NewVT.getSizeInBits() / 8; 256 257 // If the new VT cannot cover all of the remaining bits, then consider 258 // issuing a (or a pair of) unaligned and overlapping load / store. 259 bool Fast; 260 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 261 allowsMisalignedMemoryAccesses( 262 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), 263 MachineMemOperand::MONone, &Fast) && 264 Fast) 265 VTSize = Size; 266 else { 267 VT = NewVT; 268 VTSize = NewVTSize; 269 } 270 } 271 272 if (++NumMemOps > Limit) 273 return false; 274 275 MemOps.push_back(VT); 276 Size -= VTSize; 277 } 278 279 return true; 280 } 281 282 /// Soften the operands of a comparison. This code is shared among BR_CC, 283 /// SELECT_CC, and SETCC handlers. 284 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 285 SDValue &NewLHS, SDValue &NewRHS, 286 ISD::CondCode &CCCode, 287 const SDLoc &dl, const SDValue OldLHS, 288 const SDValue OldRHS) const { 289 SDValue Chain; 290 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 291 OldRHS, Chain); 292 } 293 294 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 295 SDValue &NewLHS, SDValue &NewRHS, 296 ISD::CondCode &CCCode, 297 const SDLoc &dl, const SDValue OldLHS, 298 const SDValue OldRHS, 299 SDValue &Chain, 300 bool IsSignaling) const { 301 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 302 // not supporting it. We can update this code when libgcc provides such 303 // functions. 304 305 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 306 && "Unsupported setcc type!"); 307 308 // Expand into one or more soft-fp libcall(s). 309 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 310 bool ShouldInvertCC = false; 311 switch (CCCode) { 312 case ISD::SETEQ: 313 case ISD::SETOEQ: 314 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 315 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 316 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 317 break; 318 case ISD::SETNE: 319 case ISD::SETUNE: 320 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 321 (VT == MVT::f64) ? RTLIB::UNE_F64 : 322 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 323 break; 324 case ISD::SETGE: 325 case ISD::SETOGE: 326 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 327 (VT == MVT::f64) ? RTLIB::OGE_F64 : 328 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 329 break; 330 case ISD::SETLT: 331 case ISD::SETOLT: 332 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 333 (VT == MVT::f64) ? RTLIB::OLT_F64 : 334 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 335 break; 336 case ISD::SETLE: 337 case ISD::SETOLE: 338 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 339 (VT == MVT::f64) ? RTLIB::OLE_F64 : 340 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 341 break; 342 case ISD::SETGT: 343 case ISD::SETOGT: 344 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 345 (VT == MVT::f64) ? RTLIB::OGT_F64 : 346 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 347 break; 348 case ISD::SETO: 349 ShouldInvertCC = true; 350 LLVM_FALLTHROUGH; 351 case ISD::SETUO: 352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 353 (VT == MVT::f64) ? RTLIB::UO_F64 : 354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 355 break; 356 case ISD::SETONE: 357 // SETONE = O && UNE 358 ShouldInvertCC = true; 359 LLVM_FALLTHROUGH; 360 case ISD::SETUEQ: 361 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 362 (VT == MVT::f64) ? RTLIB::UO_F64 : 363 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 364 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 365 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 366 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 367 break; 368 default: 369 // Invert CC for unordered comparisons 370 ShouldInvertCC = true; 371 switch (CCCode) { 372 case ISD::SETULT: 373 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 374 (VT == MVT::f64) ? RTLIB::OGE_F64 : 375 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 376 break; 377 case ISD::SETULE: 378 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 379 (VT == MVT::f64) ? RTLIB::OGT_F64 : 380 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 381 break; 382 case ISD::SETUGT: 383 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 384 (VT == MVT::f64) ? RTLIB::OLE_F64 : 385 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 386 break; 387 case ISD::SETUGE: 388 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 389 (VT == MVT::f64) ? RTLIB::OLT_F64 : 390 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 391 break; 392 default: llvm_unreachable("Do not know how to soften this setcc!"); 393 } 394 } 395 396 // Use the target specific return value for comparions lib calls. 397 EVT RetVT = getCmpLibcallReturnType(); 398 SDValue Ops[2] = {NewLHS, NewRHS}; 399 TargetLowering::MakeLibCallOptions CallOptions; 400 EVT OpsVT[2] = { OldLHS.getValueType(), 401 OldRHS.getValueType() }; 402 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 403 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 404 NewLHS = Call.first; 405 NewRHS = DAG.getConstant(0, dl, RetVT); 406 407 CCCode = getCmpLibcallCC(LC1); 408 if (ShouldInvertCC) { 409 assert(RetVT.isInteger()); 410 CCCode = getSetCCInverse(CCCode, RetVT); 411 } 412 413 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 414 // Update Chain. 415 Chain = Call.second; 416 } else { 417 EVT SetCCVT = 418 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 419 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 420 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 421 CCCode = getCmpLibcallCC(LC2); 422 if (ShouldInvertCC) 423 CCCode = getSetCCInverse(CCCode, RetVT); 424 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 425 if (Chain) 426 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 427 Call2.second); 428 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 429 Tmp.getValueType(), Tmp, NewLHS); 430 NewRHS = SDValue(); 431 } 432 } 433 434 /// Return the entry encoding for a jump table in the current function. The 435 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 436 unsigned TargetLowering::getJumpTableEncoding() const { 437 // In non-pic modes, just use the address of a block. 438 if (!isPositionIndependent()) 439 return MachineJumpTableInfo::EK_BlockAddress; 440 441 // In PIC mode, if the target supports a GPRel32 directive, use it. 442 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 443 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 444 445 // Otherwise, use a label difference. 446 return MachineJumpTableInfo::EK_LabelDifference32; 447 } 448 449 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 450 SelectionDAG &DAG) const { 451 // If our PIC model is GP relative, use the global offset table as the base. 452 unsigned JTEncoding = getJumpTableEncoding(); 453 454 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 455 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 456 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 457 458 return Table; 459 } 460 461 /// This returns the relocation base for the given PIC jumptable, the same as 462 /// getPICJumpTableRelocBase, but as an MCExpr. 463 const MCExpr * 464 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 465 unsigned JTI,MCContext &Ctx) const{ 466 // The normal PIC reloc base is the label at the start of the jump table. 467 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 468 } 469 470 bool 471 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 472 const TargetMachine &TM = getTargetMachine(); 473 const GlobalValue *GV = GA->getGlobal(); 474 475 // If the address is not even local to this DSO we will have to load it from 476 // a got and then add the offset. 477 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 478 return false; 479 480 // If the code is position independent we will have to add a base register. 481 if (isPositionIndependent()) 482 return false; 483 484 // Otherwise we can do it. 485 return true; 486 } 487 488 //===----------------------------------------------------------------------===// 489 // Optimization Methods 490 //===----------------------------------------------------------------------===// 491 492 /// If the specified instruction has a constant integer operand and there are 493 /// bits set in that constant that are not demanded, then clear those bits and 494 /// return true. 495 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 496 const APInt &DemandedBits, 497 const APInt &DemandedElts, 498 TargetLoweringOpt &TLO) const { 499 SDLoc DL(Op); 500 unsigned Opcode = Op.getOpcode(); 501 502 // Do target-specific constant optimization. 503 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 504 return TLO.New.getNode(); 505 506 // FIXME: ISD::SELECT, ISD::SELECT_CC 507 switch (Opcode) { 508 default: 509 break; 510 case ISD::XOR: 511 case ISD::AND: 512 case ISD::OR: { 513 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 514 if (!Op1C || Op1C->isOpaque()) 515 return false; 516 517 // If this is a 'not' op, don't touch it because that's a canonical form. 518 const APInt &C = Op1C->getAPIntValue(); 519 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 520 return false; 521 522 if (!C.isSubsetOf(DemandedBits)) { 523 EVT VT = Op.getValueType(); 524 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 525 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 526 return TLO.CombineTo(Op, NewOp); 527 } 528 529 break; 530 } 531 } 532 533 return false; 534 } 535 536 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 537 const APInt &DemandedBits, 538 TargetLoweringOpt &TLO) const { 539 EVT VT = Op.getValueType(); 540 APInt DemandedElts = VT.isVector() 541 ? APInt::getAllOnes(VT.getVectorNumElements()) 542 : APInt(1, 1); 543 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 544 } 545 546 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 547 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 548 /// generalized for targets with other types of implicit widening casts. 549 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 550 const APInt &Demanded, 551 TargetLoweringOpt &TLO) const { 552 assert(Op.getNumOperands() == 2 && 553 "ShrinkDemandedOp only supports binary operators!"); 554 assert(Op.getNode()->getNumValues() == 1 && 555 "ShrinkDemandedOp only supports nodes with one result!"); 556 557 SelectionDAG &DAG = TLO.DAG; 558 SDLoc dl(Op); 559 560 // Early return, as this function cannot handle vector types. 561 if (Op.getValueType().isVector()) 562 return false; 563 564 // Don't do this if the node has another user, which may require the 565 // full value. 566 if (!Op.getNode()->hasOneUse()) 567 return false; 568 569 // Search for the smallest integer type with free casts to and from 570 // Op's type. For expedience, just check power-of-2 integer types. 571 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 572 unsigned DemandedSize = Demanded.getActiveBits(); 573 unsigned SmallVTBits = DemandedSize; 574 if (!isPowerOf2_32(SmallVTBits)) 575 SmallVTBits = NextPowerOf2(SmallVTBits); 576 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 577 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 578 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 579 TLI.isZExtFree(SmallVT, Op.getValueType())) { 580 // We found a type with free casts. 581 SDValue X = DAG.getNode( 582 Op.getOpcode(), dl, SmallVT, 583 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 584 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 585 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 586 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 587 return TLO.CombineTo(Op, Z); 588 } 589 } 590 return false; 591 } 592 593 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 594 DAGCombinerInfo &DCI) const { 595 SelectionDAG &DAG = DCI.DAG; 596 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 597 !DCI.isBeforeLegalizeOps()); 598 KnownBits Known; 599 600 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 601 if (Simplified) { 602 DCI.AddToWorklist(Op.getNode()); 603 DCI.CommitTargetLoweringOpt(TLO); 604 } 605 return Simplified; 606 } 607 608 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 609 KnownBits &Known, 610 TargetLoweringOpt &TLO, 611 unsigned Depth, 612 bool AssumeSingleUse) const { 613 EVT VT = Op.getValueType(); 614 615 // TODO: We can probably do more work on calculating the known bits and 616 // simplifying the operations for scalable vectors, but for now we just 617 // bail out. 618 if (VT.isScalableVector()) { 619 // Pretend we don't know anything for now. 620 Known = KnownBits(DemandedBits.getBitWidth()); 621 return false; 622 } 623 624 APInt DemandedElts = VT.isVector() 625 ? APInt::getAllOnes(VT.getVectorNumElements()) 626 : APInt(1, 1); 627 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 628 AssumeSingleUse); 629 } 630 631 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 632 // TODO: Under what circumstances can we create nodes? Constant folding? 633 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 634 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 635 SelectionDAG &DAG, unsigned Depth) const { 636 // Limit search depth. 637 if (Depth >= SelectionDAG::MaxRecursionDepth) 638 return SDValue(); 639 640 // Ignore UNDEFs. 641 if (Op.isUndef()) 642 return SDValue(); 643 644 // Not demanding any bits/elts from Op. 645 if (DemandedBits == 0 || DemandedElts == 0) 646 return DAG.getUNDEF(Op.getValueType()); 647 648 bool IsLE = DAG.getDataLayout().isLittleEndian(); 649 unsigned NumElts = DemandedElts.getBitWidth(); 650 unsigned BitWidth = DemandedBits.getBitWidth(); 651 KnownBits LHSKnown, RHSKnown; 652 switch (Op.getOpcode()) { 653 case ISD::BITCAST: { 654 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 655 EVT SrcVT = Src.getValueType(); 656 EVT DstVT = Op.getValueType(); 657 if (SrcVT == DstVT) 658 return Src; 659 660 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 661 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 662 if (NumSrcEltBits == NumDstEltBits) 663 if (SDValue V = SimplifyMultipleUseDemandedBits( 664 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 665 return DAG.getBitcast(DstVT, V); 666 667 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0) { 668 unsigned Scale = NumDstEltBits / NumSrcEltBits; 669 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 670 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 671 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 672 for (unsigned i = 0; i != Scale; ++i) { 673 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 674 unsigned BitOffset = EltOffset * NumSrcEltBits; 675 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 676 if (!Sub.isZero()) { 677 DemandedSrcBits |= Sub; 678 for (unsigned j = 0; j != NumElts; ++j) 679 if (DemandedElts[j]) 680 DemandedSrcElts.setBit((j * Scale) + i); 681 } 682 } 683 684 if (SDValue V = SimplifyMultipleUseDemandedBits( 685 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 686 return DAG.getBitcast(DstVT, V); 687 } 688 689 // TODO - bigendian once we have test coverage. 690 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) { 691 unsigned Scale = NumSrcEltBits / NumDstEltBits; 692 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 693 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 694 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 695 for (unsigned i = 0; i != NumElts; ++i) 696 if (DemandedElts[i]) { 697 unsigned Offset = (i % Scale) * NumDstEltBits; 698 DemandedSrcBits.insertBits(DemandedBits, Offset); 699 DemandedSrcElts.setBit(i / Scale); 700 } 701 702 if (SDValue V = SimplifyMultipleUseDemandedBits( 703 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 704 return DAG.getBitcast(DstVT, V); 705 } 706 707 break; 708 } 709 case ISD::AND: { 710 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 711 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 712 713 // If all of the demanded bits are known 1 on one side, return the other. 714 // These bits cannot contribute to the result of the 'and' in this 715 // context. 716 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 717 return Op.getOperand(0); 718 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 719 return Op.getOperand(1); 720 break; 721 } 722 case ISD::OR: { 723 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 724 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 725 726 // If all of the demanded bits are known zero on one side, return the 727 // other. These bits cannot contribute to the result of the 'or' in this 728 // context. 729 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 730 return Op.getOperand(0); 731 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 732 return Op.getOperand(1); 733 break; 734 } 735 case ISD::XOR: { 736 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 737 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 738 739 // If all of the demanded bits are known zero on one side, return the 740 // other. 741 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 742 return Op.getOperand(0); 743 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 744 return Op.getOperand(1); 745 break; 746 } 747 case ISD::SHL: { 748 // If we are only demanding sign bits then we can use the shift source 749 // directly. 750 if (const APInt *MaxSA = 751 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 752 SDValue Op0 = Op.getOperand(0); 753 unsigned ShAmt = MaxSA->getZExtValue(); 754 unsigned NumSignBits = 755 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 756 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 757 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 758 return Op0; 759 } 760 break; 761 } 762 case ISD::SETCC: { 763 SDValue Op0 = Op.getOperand(0); 764 SDValue Op1 = Op.getOperand(1); 765 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 766 // If (1) we only need the sign-bit, (2) the setcc operands are the same 767 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 768 // -1, we may be able to bypass the setcc. 769 if (DemandedBits.isSignMask() && 770 Op0.getScalarValueSizeInBits() == BitWidth && 771 getBooleanContents(Op0.getValueType()) == 772 BooleanContent::ZeroOrNegativeOneBooleanContent) { 773 // If we're testing X < 0, then this compare isn't needed - just use X! 774 // FIXME: We're limiting to integer types here, but this should also work 775 // if we don't care about FP signed-zero. The use of SETLT with FP means 776 // that we don't care about NaNs. 777 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 778 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 779 return Op0; 780 } 781 break; 782 } 783 case ISD::SIGN_EXTEND_INREG: { 784 // If none of the extended bits are demanded, eliminate the sextinreg. 785 SDValue Op0 = Op.getOperand(0); 786 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 787 unsigned ExBits = ExVT.getScalarSizeInBits(); 788 if (DemandedBits.getActiveBits() <= ExBits) 789 return Op0; 790 // If the input is already sign extended, just drop the extension. 791 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 792 if (NumSignBits >= (BitWidth - ExBits + 1)) 793 return Op0; 794 break; 795 } 796 case ISD::ANY_EXTEND_VECTOR_INREG: 797 case ISD::SIGN_EXTEND_VECTOR_INREG: 798 case ISD::ZERO_EXTEND_VECTOR_INREG: { 799 // If we only want the lowest element and none of extended bits, then we can 800 // return the bitcasted source vector. 801 SDValue Src = Op.getOperand(0); 802 EVT SrcVT = Src.getValueType(); 803 EVT DstVT = Op.getValueType(); 804 if (IsLE && DemandedElts == 1 && 805 DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 806 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 807 return DAG.getBitcast(DstVT, Src); 808 } 809 break; 810 } 811 case ISD::INSERT_VECTOR_ELT: { 812 // If we don't demand the inserted element, return the base vector. 813 SDValue Vec = Op.getOperand(0); 814 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 815 EVT VecVT = Vec.getValueType(); 816 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 817 !DemandedElts[CIdx->getZExtValue()]) 818 return Vec; 819 break; 820 } 821 case ISD::INSERT_SUBVECTOR: { 822 SDValue Vec = Op.getOperand(0); 823 SDValue Sub = Op.getOperand(1); 824 uint64_t Idx = Op.getConstantOperandVal(2); 825 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 826 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 827 // If we don't demand the inserted subvector, return the base vector. 828 if (DemandedSubElts == 0) 829 return Vec; 830 // If this simply widens the lowest subvector, see if we can do it earlier. 831 if (Idx == 0 && Vec.isUndef()) { 832 if (SDValue NewSub = SimplifyMultipleUseDemandedBits( 833 Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1)) 834 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 835 Op.getOperand(0), NewSub, Op.getOperand(2)); 836 } 837 break; 838 } 839 case ISD::VECTOR_SHUFFLE: { 840 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 841 842 // If all the demanded elts are from one operand and are inline, 843 // then we can use the operand directly. 844 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 845 for (unsigned i = 0; i != NumElts; ++i) { 846 int M = ShuffleMask[i]; 847 if (M < 0 || !DemandedElts[i]) 848 continue; 849 AllUndef = false; 850 IdentityLHS &= (M == (int)i); 851 IdentityRHS &= ((M - NumElts) == i); 852 } 853 854 if (AllUndef) 855 return DAG.getUNDEF(Op.getValueType()); 856 if (IdentityLHS) 857 return Op.getOperand(0); 858 if (IdentityRHS) 859 return Op.getOperand(1); 860 break; 861 } 862 default: 863 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 864 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 865 Op, DemandedBits, DemandedElts, DAG, Depth)) 866 return V; 867 break; 868 } 869 return SDValue(); 870 } 871 872 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 873 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 874 unsigned Depth) const { 875 EVT VT = Op.getValueType(); 876 APInt DemandedElts = VT.isVector() 877 ? APInt::getAllOnes(VT.getVectorNumElements()) 878 : APInt(1, 1); 879 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 880 Depth); 881 } 882 883 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 884 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 885 unsigned Depth) const { 886 APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits()); 887 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 888 Depth); 889 } 890 891 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 892 /// result of Op are ever used downstream. If we can use this information to 893 /// simplify Op, create a new simplified DAG node and return true, returning the 894 /// original and new nodes in Old and New. Otherwise, analyze the expression and 895 /// return a mask of Known bits for the expression (used to simplify the 896 /// caller). The Known bits may only be accurate for those bits in the 897 /// OriginalDemandedBits and OriginalDemandedElts. 898 bool TargetLowering::SimplifyDemandedBits( 899 SDValue Op, const APInt &OriginalDemandedBits, 900 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 901 unsigned Depth, bool AssumeSingleUse) const { 902 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 903 assert(Op.getScalarValueSizeInBits() == BitWidth && 904 "Mask size mismatches value type size!"); 905 906 // Don't know anything. 907 Known = KnownBits(BitWidth); 908 909 // TODO: We can probably do more work on calculating the known bits and 910 // simplifying the operations for scalable vectors, but for now we just 911 // bail out. 912 if (Op.getValueType().isScalableVector()) 913 return false; 914 915 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 916 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 917 assert((!Op.getValueType().isVector() || 918 NumElts == Op.getValueType().getVectorNumElements()) && 919 "Unexpected vector size"); 920 921 APInt DemandedBits = OriginalDemandedBits; 922 APInt DemandedElts = OriginalDemandedElts; 923 SDLoc dl(Op); 924 auto &DL = TLO.DAG.getDataLayout(); 925 926 // Undef operand. 927 if (Op.isUndef()) 928 return false; 929 930 if (Op.getOpcode() == ISD::Constant) { 931 // We know all of the bits for a constant! 932 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 933 return false; 934 } 935 936 if (Op.getOpcode() == ISD::ConstantFP) { 937 // We know all of the bits for a floating point constant! 938 Known = KnownBits::makeConstant( 939 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 940 return false; 941 } 942 943 // Other users may use these bits. 944 EVT VT = Op.getValueType(); 945 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 946 if (Depth != 0) { 947 // If not at the root, Just compute the Known bits to 948 // simplify things downstream. 949 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 950 return false; 951 } 952 // If this is the root being simplified, allow it to have multiple uses, 953 // just set the DemandedBits/Elts to all bits. 954 DemandedBits = APInt::getAllOnes(BitWidth); 955 DemandedElts = APInt::getAllOnes(NumElts); 956 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 957 // Not demanding any bits/elts from Op. 958 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 959 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 960 // Limit search depth. 961 return false; 962 } 963 964 KnownBits Known2; 965 switch (Op.getOpcode()) { 966 case ISD::TargetConstant: 967 llvm_unreachable("Can't simplify this node"); 968 case ISD::SCALAR_TO_VECTOR: { 969 if (!DemandedElts[0]) 970 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 971 972 KnownBits SrcKnown; 973 SDValue Src = Op.getOperand(0); 974 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 975 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 976 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 977 return true; 978 979 // Upper elements are undef, so only get the knownbits if we just demand 980 // the bottom element. 981 if (DemandedElts == 1) 982 Known = SrcKnown.anyextOrTrunc(BitWidth); 983 break; 984 } 985 case ISD::BUILD_VECTOR: 986 // Collect the known bits that are shared by every demanded element. 987 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 988 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 989 return false; // Don't fall through, will infinitely loop. 990 case ISD::LOAD: { 991 auto *LD = cast<LoadSDNode>(Op); 992 if (getTargetConstantFromLoad(LD)) { 993 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 994 return false; // Don't fall through, will infinitely loop. 995 } 996 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 997 // If this is a ZEXTLoad and we are looking at the loaded value. 998 EVT MemVT = LD->getMemoryVT(); 999 unsigned MemBits = MemVT.getScalarSizeInBits(); 1000 Known.Zero.setBitsFrom(MemBits); 1001 return false; // Don't fall through, will infinitely loop. 1002 } 1003 break; 1004 } 1005 case ISD::INSERT_VECTOR_ELT: { 1006 SDValue Vec = Op.getOperand(0); 1007 SDValue Scl = Op.getOperand(1); 1008 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 1009 EVT VecVT = Vec.getValueType(); 1010 1011 // If index isn't constant, assume we need all vector elements AND the 1012 // inserted element. 1013 APInt DemandedVecElts(DemandedElts); 1014 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 1015 unsigned Idx = CIdx->getZExtValue(); 1016 DemandedVecElts.clearBit(Idx); 1017 1018 // Inserted element is not required. 1019 if (!DemandedElts[Idx]) 1020 return TLO.CombineTo(Op, Vec); 1021 } 1022 1023 KnownBits KnownScl; 1024 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1025 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1026 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1027 return true; 1028 1029 Known = KnownScl.anyextOrTrunc(BitWidth); 1030 1031 KnownBits KnownVec; 1032 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1033 Depth + 1)) 1034 return true; 1035 1036 if (!!DemandedVecElts) 1037 Known = KnownBits::commonBits(Known, KnownVec); 1038 1039 return false; 1040 } 1041 case ISD::INSERT_SUBVECTOR: { 1042 // Demand any elements from the subvector and the remainder from the src its 1043 // inserted into. 1044 SDValue Src = Op.getOperand(0); 1045 SDValue Sub = Op.getOperand(1); 1046 uint64_t Idx = Op.getConstantOperandVal(2); 1047 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1048 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1049 APInt DemandedSrcElts = DemandedElts; 1050 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 1051 1052 KnownBits KnownSub, KnownSrc; 1053 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1054 Depth + 1)) 1055 return true; 1056 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1057 Depth + 1)) 1058 return true; 1059 1060 Known.Zero.setAllBits(); 1061 Known.One.setAllBits(); 1062 if (!!DemandedSubElts) 1063 Known = KnownBits::commonBits(Known, KnownSub); 1064 if (!!DemandedSrcElts) 1065 Known = KnownBits::commonBits(Known, KnownSrc); 1066 1067 // Attempt to avoid multi-use src if we don't need anything from it. 1068 if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() || 1069 !DemandedSrcElts.isAllOnes()) { 1070 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1071 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1072 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1073 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1074 if (NewSub || NewSrc) { 1075 NewSub = NewSub ? NewSub : Sub; 1076 NewSrc = NewSrc ? NewSrc : Src; 1077 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1078 Op.getOperand(2)); 1079 return TLO.CombineTo(Op, NewOp); 1080 } 1081 } 1082 break; 1083 } 1084 case ISD::EXTRACT_SUBVECTOR: { 1085 // Offset the demanded elts by the subvector index. 1086 SDValue Src = Op.getOperand(0); 1087 if (Src.getValueType().isScalableVector()) 1088 break; 1089 uint64_t Idx = Op.getConstantOperandVal(1); 1090 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1091 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1092 1093 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1094 Depth + 1)) 1095 return true; 1096 1097 // Attempt to avoid multi-use src if we don't need anything from it. 1098 if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 1099 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1100 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1101 if (DemandedSrc) { 1102 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1103 Op.getOperand(1)); 1104 return TLO.CombineTo(Op, NewOp); 1105 } 1106 } 1107 break; 1108 } 1109 case ISD::CONCAT_VECTORS: { 1110 Known.Zero.setAllBits(); 1111 Known.One.setAllBits(); 1112 EVT SubVT = Op.getOperand(0).getValueType(); 1113 unsigned NumSubVecs = Op.getNumOperands(); 1114 unsigned NumSubElts = SubVT.getVectorNumElements(); 1115 for (unsigned i = 0; i != NumSubVecs; ++i) { 1116 APInt DemandedSubElts = 1117 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1118 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1119 Known2, TLO, Depth + 1)) 1120 return true; 1121 // Known bits are shared by every demanded subvector element. 1122 if (!!DemandedSubElts) 1123 Known = KnownBits::commonBits(Known, Known2); 1124 } 1125 break; 1126 } 1127 case ISD::VECTOR_SHUFFLE: { 1128 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1129 1130 // Collect demanded elements from shuffle operands.. 1131 APInt DemandedLHS(NumElts, 0); 1132 APInt DemandedRHS(NumElts, 0); 1133 for (unsigned i = 0; i != NumElts; ++i) { 1134 if (!DemandedElts[i]) 1135 continue; 1136 int M = ShuffleMask[i]; 1137 if (M < 0) { 1138 // For UNDEF elements, we don't know anything about the common state of 1139 // the shuffle result. 1140 DemandedLHS.clearAllBits(); 1141 DemandedRHS.clearAllBits(); 1142 break; 1143 } 1144 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1145 if (M < (int)NumElts) 1146 DemandedLHS.setBit(M); 1147 else 1148 DemandedRHS.setBit(M - NumElts); 1149 } 1150 1151 if (!!DemandedLHS || !!DemandedRHS) { 1152 SDValue Op0 = Op.getOperand(0); 1153 SDValue Op1 = Op.getOperand(1); 1154 1155 Known.Zero.setAllBits(); 1156 Known.One.setAllBits(); 1157 if (!!DemandedLHS) { 1158 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1159 Depth + 1)) 1160 return true; 1161 Known = KnownBits::commonBits(Known, Known2); 1162 } 1163 if (!!DemandedRHS) { 1164 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1165 Depth + 1)) 1166 return true; 1167 Known = KnownBits::commonBits(Known, Known2); 1168 } 1169 1170 // Attempt to avoid multi-use ops if we don't need anything from them. 1171 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1172 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1173 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1174 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1175 if (DemandedOp0 || DemandedOp1) { 1176 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1177 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1178 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1179 return TLO.CombineTo(Op, NewOp); 1180 } 1181 } 1182 break; 1183 } 1184 case ISD::AND: { 1185 SDValue Op0 = Op.getOperand(0); 1186 SDValue Op1 = Op.getOperand(1); 1187 1188 // If the RHS is a constant, check to see if the LHS would be zero without 1189 // using the bits from the RHS. Below, we use knowledge about the RHS to 1190 // simplify the LHS, here we're using information from the LHS to simplify 1191 // the RHS. 1192 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1193 // Do not increment Depth here; that can cause an infinite loop. 1194 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1195 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1196 if ((LHSKnown.Zero & DemandedBits) == 1197 (~RHSC->getAPIntValue() & DemandedBits)) 1198 return TLO.CombineTo(Op, Op0); 1199 1200 // If any of the set bits in the RHS are known zero on the LHS, shrink 1201 // the constant. 1202 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1203 DemandedElts, TLO)) 1204 return true; 1205 1206 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1207 // constant, but if this 'and' is only clearing bits that were just set by 1208 // the xor, then this 'and' can be eliminated by shrinking the mask of 1209 // the xor. For example, for a 32-bit X: 1210 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1211 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1212 LHSKnown.One == ~RHSC->getAPIntValue()) { 1213 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1214 return TLO.CombineTo(Op, Xor); 1215 } 1216 } 1217 1218 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1219 Depth + 1)) 1220 return true; 1221 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1222 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1223 Known2, TLO, Depth + 1)) 1224 return true; 1225 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1226 1227 // Attempt to avoid multi-use ops if we don't need anything from them. 1228 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1229 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1230 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1231 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1232 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1233 if (DemandedOp0 || DemandedOp1) { 1234 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1235 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1236 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1237 return TLO.CombineTo(Op, NewOp); 1238 } 1239 } 1240 1241 // If all of the demanded bits are known one on one side, return the other. 1242 // These bits cannot contribute to the result of the 'and'. 1243 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1244 return TLO.CombineTo(Op, Op0); 1245 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1246 return TLO.CombineTo(Op, Op1); 1247 // If all of the demanded bits in the inputs are known zeros, return zero. 1248 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1249 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1250 // If the RHS is a constant, see if we can simplify it. 1251 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1252 TLO)) 1253 return true; 1254 // If the operation can be done in a smaller type, do so. 1255 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1256 return true; 1257 1258 Known &= Known2; 1259 break; 1260 } 1261 case ISD::OR: { 1262 SDValue Op0 = Op.getOperand(0); 1263 SDValue Op1 = Op.getOperand(1); 1264 1265 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1266 Depth + 1)) 1267 return true; 1268 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1269 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1270 Known2, TLO, Depth + 1)) 1271 return true; 1272 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1273 1274 // Attempt to avoid multi-use ops if we don't need anything from them. 1275 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1276 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1277 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1278 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1279 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1280 if (DemandedOp0 || DemandedOp1) { 1281 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1282 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1283 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1284 return TLO.CombineTo(Op, NewOp); 1285 } 1286 } 1287 1288 // If all of the demanded bits are known zero on one side, return the other. 1289 // These bits cannot contribute to the result of the 'or'. 1290 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1291 return TLO.CombineTo(Op, Op0); 1292 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1293 return TLO.CombineTo(Op, Op1); 1294 // If the RHS is a constant, see if we can simplify it. 1295 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1296 return true; 1297 // If the operation can be done in a smaller type, do so. 1298 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1299 return true; 1300 1301 Known |= Known2; 1302 break; 1303 } 1304 case ISD::XOR: { 1305 SDValue Op0 = Op.getOperand(0); 1306 SDValue Op1 = Op.getOperand(1); 1307 1308 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1309 Depth + 1)) 1310 return true; 1311 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1312 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1313 Depth + 1)) 1314 return true; 1315 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1316 1317 // Attempt to avoid multi-use ops if we don't need anything from them. 1318 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1319 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1320 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1321 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1322 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1323 if (DemandedOp0 || DemandedOp1) { 1324 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1325 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1326 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1327 return TLO.CombineTo(Op, NewOp); 1328 } 1329 } 1330 1331 // If all of the demanded bits are known zero on one side, return the other. 1332 // These bits cannot contribute to the result of the 'xor'. 1333 if (DemandedBits.isSubsetOf(Known.Zero)) 1334 return TLO.CombineTo(Op, Op0); 1335 if (DemandedBits.isSubsetOf(Known2.Zero)) 1336 return TLO.CombineTo(Op, Op1); 1337 // If the operation can be done in a smaller type, do so. 1338 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1339 return true; 1340 1341 // If all of the unknown bits are known to be zero on one side or the other 1342 // turn this into an *inclusive* or. 1343 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1344 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1345 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1346 1347 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1348 if (C) { 1349 // If one side is a constant, and all of the set bits in the constant are 1350 // also known set on the other side, turn this into an AND, as we know 1351 // the bits will be cleared. 1352 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1353 // NB: it is okay if more bits are known than are requested 1354 if (C->getAPIntValue() == Known2.One) { 1355 SDValue ANDC = 1356 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1357 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1358 } 1359 1360 // If the RHS is a constant, see if we can change it. Don't alter a -1 1361 // constant because that's a 'not' op, and that is better for combining 1362 // and codegen. 1363 if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) { 1364 // We're flipping all demanded bits. Flip the undemanded bits too. 1365 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1366 return TLO.CombineTo(Op, New); 1367 } 1368 } 1369 1370 // If we can't turn this into a 'not', try to shrink the constant. 1371 if (!C || !C->isAllOnes()) 1372 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1373 return true; 1374 1375 Known ^= Known2; 1376 break; 1377 } 1378 case ISD::SELECT: 1379 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1380 Depth + 1)) 1381 return true; 1382 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1383 Depth + 1)) 1384 return true; 1385 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1386 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1387 1388 // If the operands are constants, see if we can simplify them. 1389 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1390 return true; 1391 1392 // Only known if known in both the LHS and RHS. 1393 Known = KnownBits::commonBits(Known, Known2); 1394 break; 1395 case ISD::SELECT_CC: 1396 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1397 Depth + 1)) 1398 return true; 1399 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1400 Depth + 1)) 1401 return true; 1402 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1403 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1404 1405 // If the operands are constants, see if we can simplify them. 1406 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1407 return true; 1408 1409 // Only known if known in both the LHS and RHS. 1410 Known = KnownBits::commonBits(Known, Known2); 1411 break; 1412 case ISD::SETCC: { 1413 SDValue Op0 = Op.getOperand(0); 1414 SDValue Op1 = Op.getOperand(1); 1415 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1416 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1417 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1418 // -1, we may be able to bypass the setcc. 1419 if (DemandedBits.isSignMask() && 1420 Op0.getScalarValueSizeInBits() == BitWidth && 1421 getBooleanContents(Op0.getValueType()) == 1422 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1423 // If we're testing X < 0, then this compare isn't needed - just use X! 1424 // FIXME: We're limiting to integer types here, but this should also work 1425 // if we don't care about FP signed-zero. The use of SETLT with FP means 1426 // that we don't care about NaNs. 1427 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1428 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1429 return TLO.CombineTo(Op, Op0); 1430 1431 // TODO: Should we check for other forms of sign-bit comparisons? 1432 // Examples: X <= -1, X >= 0 1433 } 1434 if (getBooleanContents(Op0.getValueType()) == 1435 TargetLowering::ZeroOrOneBooleanContent && 1436 BitWidth > 1) 1437 Known.Zero.setBitsFrom(1); 1438 break; 1439 } 1440 case ISD::SHL: { 1441 SDValue Op0 = Op.getOperand(0); 1442 SDValue Op1 = Op.getOperand(1); 1443 EVT ShiftVT = Op1.getValueType(); 1444 1445 if (const APInt *SA = 1446 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1447 unsigned ShAmt = SA->getZExtValue(); 1448 if (ShAmt == 0) 1449 return TLO.CombineTo(Op, Op0); 1450 1451 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1452 // single shift. We can do this if the bottom bits (which are shifted 1453 // out) are never demanded. 1454 // TODO - support non-uniform vector amounts. 1455 if (Op0.getOpcode() == ISD::SRL) { 1456 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1457 if (const APInt *SA2 = 1458 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1459 unsigned C1 = SA2->getZExtValue(); 1460 unsigned Opc = ISD::SHL; 1461 int Diff = ShAmt - C1; 1462 if (Diff < 0) { 1463 Diff = -Diff; 1464 Opc = ISD::SRL; 1465 } 1466 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1467 return TLO.CombineTo( 1468 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1469 } 1470 } 1471 } 1472 1473 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1474 // are not demanded. This will likely allow the anyext to be folded away. 1475 // TODO - support non-uniform vector amounts. 1476 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1477 SDValue InnerOp = Op0.getOperand(0); 1478 EVT InnerVT = InnerOp.getValueType(); 1479 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1480 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1481 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1482 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1483 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1484 ShTy = InnerVT; 1485 SDValue NarrowShl = 1486 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1487 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1488 return TLO.CombineTo( 1489 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1490 } 1491 1492 // Repeat the SHL optimization above in cases where an extension 1493 // intervenes: (shl (anyext (shr x, c1)), c2) to 1494 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1495 // aren't demanded (as above) and that the shifted upper c1 bits of 1496 // x aren't demanded. 1497 // TODO - support non-uniform vector amounts. 1498 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1499 InnerOp.hasOneUse()) { 1500 if (const APInt *SA2 = 1501 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1502 unsigned InnerShAmt = SA2->getZExtValue(); 1503 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1504 DemandedBits.getActiveBits() <= 1505 (InnerBits - InnerShAmt + ShAmt) && 1506 DemandedBits.countTrailingZeros() >= ShAmt) { 1507 SDValue NewSA = 1508 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1509 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1510 InnerOp.getOperand(0)); 1511 return TLO.CombineTo( 1512 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1513 } 1514 } 1515 } 1516 } 1517 1518 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1519 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1520 Depth + 1)) 1521 return true; 1522 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1523 Known.Zero <<= ShAmt; 1524 Known.One <<= ShAmt; 1525 // low bits known zero. 1526 Known.Zero.setLowBits(ShAmt); 1527 1528 // Try shrinking the operation as long as the shift amount will still be 1529 // in range. 1530 if ((ShAmt < DemandedBits.getActiveBits()) && 1531 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1532 return true; 1533 } 1534 1535 // If we are only demanding sign bits then we can use the shift source 1536 // directly. 1537 if (const APInt *MaxSA = 1538 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1539 unsigned ShAmt = MaxSA->getZExtValue(); 1540 unsigned NumSignBits = 1541 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1542 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1543 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1544 return TLO.CombineTo(Op, Op0); 1545 } 1546 break; 1547 } 1548 case ISD::SRL: { 1549 SDValue Op0 = Op.getOperand(0); 1550 SDValue Op1 = Op.getOperand(1); 1551 EVT ShiftVT = Op1.getValueType(); 1552 1553 if (const APInt *SA = 1554 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1555 unsigned ShAmt = SA->getZExtValue(); 1556 if (ShAmt == 0) 1557 return TLO.CombineTo(Op, Op0); 1558 1559 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1560 // single shift. We can do this if the top bits (which are shifted out) 1561 // are never demanded. 1562 // TODO - support non-uniform vector amounts. 1563 if (Op0.getOpcode() == ISD::SHL) { 1564 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1565 if (const APInt *SA2 = 1566 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1567 unsigned C1 = SA2->getZExtValue(); 1568 unsigned Opc = ISD::SRL; 1569 int Diff = ShAmt - C1; 1570 if (Diff < 0) { 1571 Diff = -Diff; 1572 Opc = ISD::SHL; 1573 } 1574 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1575 return TLO.CombineTo( 1576 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1577 } 1578 } 1579 } 1580 1581 APInt InDemandedMask = (DemandedBits << ShAmt); 1582 1583 // If the shift is exact, then it does demand the low bits (and knows that 1584 // they are zero). 1585 if (Op->getFlags().hasExact()) 1586 InDemandedMask.setLowBits(ShAmt); 1587 1588 // Compute the new bits that are at the top now. 1589 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1590 Depth + 1)) 1591 return true; 1592 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1593 Known.Zero.lshrInPlace(ShAmt); 1594 Known.One.lshrInPlace(ShAmt); 1595 // High bits known zero. 1596 Known.Zero.setHighBits(ShAmt); 1597 } 1598 break; 1599 } 1600 case ISD::SRA: { 1601 SDValue Op0 = Op.getOperand(0); 1602 SDValue Op1 = Op.getOperand(1); 1603 EVT ShiftVT = Op1.getValueType(); 1604 1605 // If we only want bits that already match the signbit then we don't need 1606 // to shift. 1607 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1608 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1609 NumHiDemandedBits) 1610 return TLO.CombineTo(Op, Op0); 1611 1612 // If this is an arithmetic shift right and only the low-bit is set, we can 1613 // always convert this into a logical shr, even if the shift amount is 1614 // variable. The low bit of the shift cannot be an input sign bit unless 1615 // the shift amount is >= the size of the datatype, which is undefined. 1616 if (DemandedBits.isOne()) 1617 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1618 1619 if (const APInt *SA = 1620 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1621 unsigned ShAmt = SA->getZExtValue(); 1622 if (ShAmt == 0) 1623 return TLO.CombineTo(Op, Op0); 1624 1625 APInt InDemandedMask = (DemandedBits << ShAmt); 1626 1627 // If the shift is exact, then it does demand the low bits (and knows that 1628 // they are zero). 1629 if (Op->getFlags().hasExact()) 1630 InDemandedMask.setLowBits(ShAmt); 1631 1632 // If any of the demanded bits are produced by the sign extension, we also 1633 // demand the input sign bit. 1634 if (DemandedBits.countLeadingZeros() < ShAmt) 1635 InDemandedMask.setSignBit(); 1636 1637 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1638 Depth + 1)) 1639 return true; 1640 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1641 Known.Zero.lshrInPlace(ShAmt); 1642 Known.One.lshrInPlace(ShAmt); 1643 1644 // If the input sign bit is known to be zero, or if none of the top bits 1645 // are demanded, turn this into an unsigned shift right. 1646 if (Known.Zero[BitWidth - ShAmt - 1] || 1647 DemandedBits.countLeadingZeros() >= ShAmt) { 1648 SDNodeFlags Flags; 1649 Flags.setExact(Op->getFlags().hasExact()); 1650 return TLO.CombineTo( 1651 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1652 } 1653 1654 int Log2 = DemandedBits.exactLogBase2(); 1655 if (Log2 >= 0) { 1656 // The bit must come from the sign. 1657 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1658 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1659 } 1660 1661 if (Known.One[BitWidth - ShAmt - 1]) 1662 // New bits are known one. 1663 Known.One.setHighBits(ShAmt); 1664 1665 // Attempt to avoid multi-use ops if we don't need anything from them. 1666 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1667 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1668 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1669 if (DemandedOp0) { 1670 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1671 return TLO.CombineTo(Op, NewOp); 1672 } 1673 } 1674 } 1675 break; 1676 } 1677 case ISD::FSHL: 1678 case ISD::FSHR: { 1679 SDValue Op0 = Op.getOperand(0); 1680 SDValue Op1 = Op.getOperand(1); 1681 SDValue Op2 = Op.getOperand(2); 1682 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1683 1684 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1685 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1686 1687 // For fshl, 0-shift returns the 1st arg. 1688 // For fshr, 0-shift returns the 2nd arg. 1689 if (Amt == 0) { 1690 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1691 Known, TLO, Depth + 1)) 1692 return true; 1693 break; 1694 } 1695 1696 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1697 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1698 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1699 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1700 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1701 Depth + 1)) 1702 return true; 1703 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1704 Depth + 1)) 1705 return true; 1706 1707 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1708 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1709 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1710 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1711 Known.One |= Known2.One; 1712 Known.Zero |= Known2.Zero; 1713 } 1714 1715 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1716 if (isPowerOf2_32(BitWidth)) { 1717 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1718 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1719 Known2, TLO, Depth + 1)) 1720 return true; 1721 } 1722 break; 1723 } 1724 case ISD::ROTL: 1725 case ISD::ROTR: { 1726 SDValue Op0 = Op.getOperand(0); 1727 SDValue Op1 = Op.getOperand(1); 1728 bool IsROTL = (Op.getOpcode() == ISD::ROTL); 1729 1730 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1731 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1732 return TLO.CombineTo(Op, Op0); 1733 1734 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1735 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1736 unsigned RevAmt = BitWidth - Amt; 1737 1738 // rotl: (Op0 << Amt) | (Op0 >> (BW - Amt)) 1739 // rotr: (Op0 << (BW - Amt)) | (Op0 >> Amt) 1740 APInt Demanded0 = DemandedBits.rotr(IsROTL ? Amt : RevAmt); 1741 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1742 Depth + 1)) 1743 return true; 1744 1745 // rot*(x, 0) --> x 1746 if (Amt == 0) 1747 return TLO.CombineTo(Op, Op0); 1748 1749 // See if we don't demand either half of the rotated bits. 1750 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SHL, VT)) && 1751 DemandedBits.countTrailingZeros() >= (IsROTL ? Amt : RevAmt)) { 1752 Op1 = TLO.DAG.getConstant(IsROTL ? Amt : RevAmt, dl, Op1.getValueType()); 1753 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, Op1)); 1754 } 1755 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT)) && 1756 DemandedBits.countLeadingZeros() >= (IsROTL ? RevAmt : Amt)) { 1757 Op1 = TLO.DAG.getConstant(IsROTL ? RevAmt : Amt, dl, Op1.getValueType()); 1758 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1759 } 1760 } 1761 1762 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1763 if (isPowerOf2_32(BitWidth)) { 1764 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1765 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1766 Depth + 1)) 1767 return true; 1768 } 1769 break; 1770 } 1771 case ISD::UMIN: { 1772 // Check if one arg is always less than (or equal) to the other arg. 1773 SDValue Op0 = Op.getOperand(0); 1774 SDValue Op1 = Op.getOperand(1); 1775 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1776 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1777 Known = KnownBits::umin(Known0, Known1); 1778 if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 1779 return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1); 1780 if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 1781 return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1); 1782 break; 1783 } 1784 case ISD::UMAX: { 1785 // Check if one arg is always greater than (or equal) to the other arg. 1786 SDValue Op0 = Op.getOperand(0); 1787 SDValue Op1 = Op.getOperand(1); 1788 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1789 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1790 Known = KnownBits::umax(Known0, Known1); 1791 if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 1792 return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1); 1793 if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 1794 return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1); 1795 break; 1796 } 1797 case ISD::BITREVERSE: { 1798 SDValue Src = Op.getOperand(0); 1799 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1800 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1801 Depth + 1)) 1802 return true; 1803 Known.One = Known2.One.reverseBits(); 1804 Known.Zero = Known2.Zero.reverseBits(); 1805 break; 1806 } 1807 case ISD::BSWAP: { 1808 SDValue Src = Op.getOperand(0); 1809 1810 // If the only bits demanded come from one byte of the bswap result, 1811 // just shift the input byte into position to eliminate the bswap. 1812 unsigned NLZ = DemandedBits.countLeadingZeros(); 1813 unsigned NTZ = DemandedBits.countTrailingZeros(); 1814 1815 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 1816 // we need all the bits down to bit 8. Likewise, round NLZ. If we 1817 // have 14 leading zeros, round to 8. 1818 NLZ &= ~7; 1819 NTZ &= ~7; 1820 // If we need exactly one byte, we can do this transformation. 1821 if (BitWidth - NLZ - NTZ == 8) { 1822 unsigned ResultBit = NTZ; 1823 unsigned InputBit = BitWidth - NTZ - 8; 1824 1825 // Replace this with either a left or right shift to get the byte into 1826 // the right place. 1827 unsigned ShiftOpcode = InputBit > ResultBit ? ISD::SRL : ISD::SHL; 1828 if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) { 1829 EVT ShiftAmtTy = getShiftAmountTy(VT, DL); 1830 unsigned ShiftAmount = 1831 InputBit > ResultBit ? InputBit - ResultBit : ResultBit - InputBit; 1832 SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy); 1833 SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt); 1834 return TLO.CombineTo(Op, NewOp); 1835 } 1836 } 1837 1838 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1839 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1840 Depth + 1)) 1841 return true; 1842 Known.One = Known2.One.byteSwap(); 1843 Known.Zero = Known2.Zero.byteSwap(); 1844 break; 1845 } 1846 case ISD::CTPOP: { 1847 // If only 1 bit is demanded, replace with PARITY as long as we're before 1848 // op legalization. 1849 // FIXME: Limit to scalars for now. 1850 if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) 1851 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 1852 Op.getOperand(0))); 1853 1854 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1855 break; 1856 } 1857 case ISD::SIGN_EXTEND_INREG: { 1858 SDValue Op0 = Op.getOperand(0); 1859 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1860 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1861 1862 // If we only care about the highest bit, don't bother shifting right. 1863 if (DemandedBits.isSignMask()) { 1864 unsigned MinSignedBits = 1865 TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1); 1866 bool AlreadySignExtended = ExVTBits >= MinSignedBits; 1867 // However if the input is already sign extended we expect the sign 1868 // extension to be dropped altogether later and do not simplify. 1869 if (!AlreadySignExtended) { 1870 // Compute the correct shift amount type, which must be getShiftAmountTy 1871 // for scalar types after legalization. 1872 EVT ShiftAmtTy = VT; 1873 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1874 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1875 1876 SDValue ShiftAmt = 1877 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1878 return TLO.CombineTo(Op, 1879 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1880 } 1881 } 1882 1883 // If none of the extended bits are demanded, eliminate the sextinreg. 1884 if (DemandedBits.getActiveBits() <= ExVTBits) 1885 return TLO.CombineTo(Op, Op0); 1886 1887 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1888 1889 // Since the sign extended bits are demanded, we know that the sign 1890 // bit is demanded. 1891 InputDemandedBits.setBit(ExVTBits - 1); 1892 1893 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1894 return true; 1895 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1896 1897 // If the sign bit of the input is known set or clear, then we know the 1898 // top bits of the result. 1899 1900 // If the input sign bit is known zero, convert this into a zero extension. 1901 if (Known.Zero[ExVTBits - 1]) 1902 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 1903 1904 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1905 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1906 Known.One.setBitsFrom(ExVTBits); 1907 Known.Zero &= Mask; 1908 } else { // Input sign bit unknown 1909 Known.Zero &= Mask; 1910 Known.One &= Mask; 1911 } 1912 break; 1913 } 1914 case ISD::BUILD_PAIR: { 1915 EVT HalfVT = Op.getOperand(0).getValueType(); 1916 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1917 1918 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1919 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1920 1921 KnownBits KnownLo, KnownHi; 1922 1923 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1924 return true; 1925 1926 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1927 return true; 1928 1929 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1930 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1931 1932 Known.One = KnownLo.One.zext(BitWidth) | 1933 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1934 break; 1935 } 1936 case ISD::ZERO_EXTEND: 1937 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1938 SDValue Src = Op.getOperand(0); 1939 EVT SrcVT = Src.getValueType(); 1940 unsigned InBits = SrcVT.getScalarSizeInBits(); 1941 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1942 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1943 1944 // If none of the top bits are demanded, convert this into an any_extend. 1945 if (DemandedBits.getActiveBits() <= InBits) { 1946 // If we only need the non-extended bits of the bottom element 1947 // then we can just bitcast to the result. 1948 if (IsLE && IsVecInReg && DemandedElts == 1 && 1949 VT.getSizeInBits() == SrcVT.getSizeInBits()) 1950 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1951 1952 unsigned Opc = 1953 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1954 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1955 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1956 } 1957 1958 APInt InDemandedBits = DemandedBits.trunc(InBits); 1959 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1960 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1961 Depth + 1)) 1962 return true; 1963 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1964 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1965 Known = Known.zext(BitWidth); 1966 1967 // Attempt to avoid multi-use ops if we don't need anything from them. 1968 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1969 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1970 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1971 break; 1972 } 1973 case ISD::SIGN_EXTEND: 1974 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1975 SDValue Src = Op.getOperand(0); 1976 EVT SrcVT = Src.getValueType(); 1977 unsigned InBits = SrcVT.getScalarSizeInBits(); 1978 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1979 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1980 1981 // If none of the top bits are demanded, convert this into an any_extend. 1982 if (DemandedBits.getActiveBits() <= InBits) { 1983 // If we only need the non-extended bits of the bottom element 1984 // then we can just bitcast to the result. 1985 if (IsLE && IsVecInReg && DemandedElts == 1 && 1986 VT.getSizeInBits() == SrcVT.getSizeInBits()) 1987 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1988 1989 unsigned Opc = 1990 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1991 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1992 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1993 } 1994 1995 APInt InDemandedBits = DemandedBits.trunc(InBits); 1996 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1997 1998 // Since some of the sign extended bits are demanded, we know that the sign 1999 // bit is demanded. 2000 InDemandedBits.setBit(InBits - 1); 2001 2002 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2003 Depth + 1)) 2004 return true; 2005 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2006 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2007 2008 // If the sign bit is known one, the top bits match. 2009 Known = Known.sext(BitWidth); 2010 2011 // If the sign bit is known zero, convert this to a zero extend. 2012 if (Known.isNonNegative()) { 2013 unsigned Opc = 2014 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 2015 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2016 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2017 } 2018 2019 // Attempt to avoid multi-use ops if we don't need anything from them. 2020 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2021 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2022 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2023 break; 2024 } 2025 case ISD::ANY_EXTEND: 2026 case ISD::ANY_EXTEND_VECTOR_INREG: { 2027 SDValue Src = Op.getOperand(0); 2028 EVT SrcVT = Src.getValueType(); 2029 unsigned InBits = SrcVT.getScalarSizeInBits(); 2030 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2031 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 2032 2033 // If we only need the bottom element then we can just bitcast. 2034 // TODO: Handle ANY_EXTEND? 2035 if (IsLE && IsVecInReg && DemandedElts == 1 && 2036 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2037 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2038 2039 APInt InDemandedBits = DemandedBits.trunc(InBits); 2040 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 2041 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2042 Depth + 1)) 2043 return true; 2044 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2045 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2046 Known = Known.anyext(BitWidth); 2047 2048 // Attempt to avoid multi-use ops if we don't need anything from them. 2049 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2050 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2051 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2052 break; 2053 } 2054 case ISD::TRUNCATE: { 2055 SDValue Src = Op.getOperand(0); 2056 2057 // Simplify the input, using demanded bit information, and compute the known 2058 // zero/one bits live out. 2059 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 2060 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 2061 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 2062 Depth + 1)) 2063 return true; 2064 Known = Known.trunc(BitWidth); 2065 2066 // Attempt to avoid multi-use ops if we don't need anything from them. 2067 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2068 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 2069 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 2070 2071 // If the input is only used by this truncate, see if we can shrink it based 2072 // on the known demanded bits. 2073 if (Src.getNode()->hasOneUse()) { 2074 switch (Src.getOpcode()) { 2075 default: 2076 break; 2077 case ISD::SRL: 2078 // Shrink SRL by a constant if none of the high bits shifted in are 2079 // demanded. 2080 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2081 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2082 // undesirable. 2083 break; 2084 2085 const APInt *ShAmtC = 2086 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2087 if (!ShAmtC || ShAmtC->uge(BitWidth)) 2088 break; 2089 uint64_t ShVal = ShAmtC->getZExtValue(); 2090 2091 APInt HighBits = 2092 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2093 HighBits.lshrInPlace(ShVal); 2094 HighBits = HighBits.trunc(BitWidth); 2095 2096 if (!(HighBits & DemandedBits)) { 2097 // None of the shifted in bits are needed. Add a truncate of the 2098 // shift input, then shift it. 2099 SDValue NewShAmt = TLO.DAG.getConstant( 2100 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2101 SDValue NewTrunc = 2102 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2103 return TLO.CombineTo( 2104 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2105 } 2106 break; 2107 } 2108 } 2109 2110 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2111 break; 2112 } 2113 case ISD::AssertZext: { 2114 // AssertZext demands all of the high bits, plus any of the low bits 2115 // demanded by its users. 2116 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2117 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2118 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2119 TLO, Depth + 1)) 2120 return true; 2121 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2122 2123 Known.Zero |= ~InMask; 2124 break; 2125 } 2126 case ISD::EXTRACT_VECTOR_ELT: { 2127 SDValue Src = Op.getOperand(0); 2128 SDValue Idx = Op.getOperand(1); 2129 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2130 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2131 2132 if (SrcEltCnt.isScalable()) 2133 return false; 2134 2135 // Demand the bits from every vector element without a constant index. 2136 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2137 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); 2138 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2139 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2140 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2141 2142 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2143 // anything about the extended bits. 2144 APInt DemandedSrcBits = DemandedBits; 2145 if (BitWidth > EltBitWidth) 2146 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2147 2148 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2149 Depth + 1)) 2150 return true; 2151 2152 // Attempt to avoid multi-use ops if we don't need anything from them. 2153 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2154 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2155 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2156 SDValue NewOp = 2157 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2158 return TLO.CombineTo(Op, NewOp); 2159 } 2160 } 2161 2162 Known = Known2; 2163 if (BitWidth > EltBitWidth) 2164 Known = Known.anyext(BitWidth); 2165 break; 2166 } 2167 case ISD::BITCAST: { 2168 SDValue Src = Op.getOperand(0); 2169 EVT SrcVT = Src.getValueType(); 2170 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2171 2172 // If this is an FP->Int bitcast and if the sign bit is the only 2173 // thing demanded, turn this into a FGETSIGN. 2174 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2175 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2176 SrcVT.isFloatingPoint()) { 2177 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2178 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2179 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2180 SrcVT != MVT::f128) { 2181 // Cannot eliminate/lower SHL for f128 yet. 2182 EVT Ty = OpVTLegal ? VT : MVT::i32; 2183 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2184 // place. We expect the SHL to be eliminated by other optimizations. 2185 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2186 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2187 if (!OpVTLegal && OpVTSizeInBits > 32) 2188 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2189 unsigned ShVal = Op.getValueSizeInBits() - 1; 2190 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2191 return TLO.CombineTo(Op, 2192 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2193 } 2194 } 2195 2196 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2197 // Demand the elt/bit if any of the original elts/bits are demanded. 2198 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0) { 2199 unsigned Scale = BitWidth / NumSrcEltBits; 2200 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2201 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2202 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2203 for (unsigned i = 0; i != Scale; ++i) { 2204 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 2205 unsigned BitOffset = EltOffset * NumSrcEltBits; 2206 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 2207 if (!Sub.isZero()) { 2208 DemandedSrcBits |= Sub; 2209 for (unsigned j = 0; j != NumElts; ++j) 2210 if (DemandedElts[j]) 2211 DemandedSrcElts.setBit((j * Scale) + i); 2212 } 2213 } 2214 2215 APInt KnownSrcUndef, KnownSrcZero; 2216 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2217 KnownSrcZero, TLO, Depth + 1)) 2218 return true; 2219 2220 KnownBits KnownSrcBits; 2221 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2222 KnownSrcBits, TLO, Depth + 1)) 2223 return true; 2224 } else if (IsLE && (NumSrcEltBits % BitWidth) == 0) { 2225 // TODO - bigendian once we have test coverage. 2226 unsigned Scale = NumSrcEltBits / BitWidth; 2227 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2228 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2229 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2230 for (unsigned i = 0; i != NumElts; ++i) 2231 if (DemandedElts[i]) { 2232 unsigned Offset = (i % Scale) * BitWidth; 2233 DemandedSrcBits.insertBits(DemandedBits, Offset); 2234 DemandedSrcElts.setBit(i / Scale); 2235 } 2236 2237 if (SrcVT.isVector()) { 2238 APInt KnownSrcUndef, KnownSrcZero; 2239 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2240 KnownSrcZero, TLO, Depth + 1)) 2241 return true; 2242 } 2243 2244 KnownBits KnownSrcBits; 2245 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2246 KnownSrcBits, TLO, Depth + 1)) 2247 return true; 2248 } 2249 2250 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2251 // recursive call where Known may be useful to the caller. 2252 if (Depth > 0) { 2253 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2254 return false; 2255 } 2256 break; 2257 } 2258 case ISD::ADD: 2259 case ISD::MUL: 2260 case ISD::SUB: { 2261 // Add, Sub, and Mul don't demand any bits in positions beyond that 2262 // of the highest bit demanded of them. 2263 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2264 SDNodeFlags Flags = Op.getNode()->getFlags(); 2265 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2266 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2267 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2268 Depth + 1) || 2269 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2270 Depth + 1) || 2271 // See if the operation should be performed at a smaller bit width. 2272 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2273 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2274 // Disable the nsw and nuw flags. We can no longer guarantee that we 2275 // won't wrap after simplification. 2276 Flags.setNoSignedWrap(false); 2277 Flags.setNoUnsignedWrap(false); 2278 SDValue NewOp = 2279 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2280 return TLO.CombineTo(Op, NewOp); 2281 } 2282 return true; 2283 } 2284 2285 // Attempt to avoid multi-use ops if we don't need anything from them. 2286 if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { 2287 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2288 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2289 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2290 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2291 if (DemandedOp0 || DemandedOp1) { 2292 Flags.setNoSignedWrap(false); 2293 Flags.setNoUnsignedWrap(false); 2294 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2295 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2296 SDValue NewOp = 2297 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2298 return TLO.CombineTo(Op, NewOp); 2299 } 2300 } 2301 2302 // If we have a constant operand, we may be able to turn it into -1 if we 2303 // do not demand the high bits. This can make the constant smaller to 2304 // encode, allow more general folding, or match specialized instruction 2305 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2306 // is probably not useful (and could be detrimental). 2307 ConstantSDNode *C = isConstOrConstSplat(Op1); 2308 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2309 if (C && !C->isAllOnes() && !C->isOne() && 2310 (C->getAPIntValue() | HighMask).isAllOnes()) { 2311 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2312 // Disable the nsw and nuw flags. We can no longer guarantee that we 2313 // won't wrap after simplification. 2314 Flags.setNoSignedWrap(false); 2315 Flags.setNoUnsignedWrap(false); 2316 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2317 return TLO.CombineTo(Op, NewOp); 2318 } 2319 2320 LLVM_FALLTHROUGH; 2321 } 2322 default: 2323 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2324 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2325 Known, TLO, Depth)) 2326 return true; 2327 break; 2328 } 2329 2330 // Just use computeKnownBits to compute output bits. 2331 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2332 break; 2333 } 2334 2335 // If we know the value of all of the demanded bits, return this as a 2336 // constant. 2337 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2338 // Avoid folding to a constant if any OpaqueConstant is involved. 2339 const SDNode *N = Op.getNode(); 2340 for (SDNode *Op : 2341 llvm::make_range(SDNodeIterator::begin(N), SDNodeIterator::end(N))) { 2342 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2343 if (C->isOpaque()) 2344 return false; 2345 } 2346 if (VT.isInteger()) 2347 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2348 if (VT.isFloatingPoint()) 2349 return TLO.CombineTo( 2350 Op, 2351 TLO.DAG.getConstantFP( 2352 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2353 } 2354 2355 return false; 2356 } 2357 2358 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2359 const APInt &DemandedElts, 2360 APInt &KnownUndef, 2361 APInt &KnownZero, 2362 DAGCombinerInfo &DCI) const { 2363 SelectionDAG &DAG = DCI.DAG; 2364 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2365 !DCI.isBeforeLegalizeOps()); 2366 2367 bool Simplified = 2368 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2369 if (Simplified) { 2370 DCI.AddToWorklist(Op.getNode()); 2371 DCI.CommitTargetLoweringOpt(TLO); 2372 } 2373 2374 return Simplified; 2375 } 2376 2377 /// Given a vector binary operation and known undefined elements for each input 2378 /// operand, compute whether each element of the output is undefined. 2379 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2380 const APInt &UndefOp0, 2381 const APInt &UndefOp1) { 2382 EVT VT = BO.getValueType(); 2383 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2384 "Vector binop only"); 2385 2386 EVT EltVT = VT.getVectorElementType(); 2387 unsigned NumElts = VT.getVectorNumElements(); 2388 assert(UndefOp0.getBitWidth() == NumElts && 2389 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2390 2391 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2392 const APInt &UndefVals) { 2393 if (UndefVals[Index]) 2394 return DAG.getUNDEF(EltVT); 2395 2396 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2397 // Try hard to make sure that the getNode() call is not creating temporary 2398 // nodes. Ignore opaque integers because they do not constant fold. 2399 SDValue Elt = BV->getOperand(Index); 2400 auto *C = dyn_cast<ConstantSDNode>(Elt); 2401 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2402 return Elt; 2403 } 2404 2405 return SDValue(); 2406 }; 2407 2408 APInt KnownUndef = APInt::getZero(NumElts); 2409 for (unsigned i = 0; i != NumElts; ++i) { 2410 // If both inputs for this element are either constant or undef and match 2411 // the element type, compute the constant/undef result for this element of 2412 // the vector. 2413 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2414 // not handle FP constants. The code within getNode() should be refactored 2415 // to avoid the danger of creating a bogus temporary node here. 2416 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2417 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2418 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2419 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2420 KnownUndef.setBit(i); 2421 } 2422 return KnownUndef; 2423 } 2424 2425 bool TargetLowering::SimplifyDemandedVectorElts( 2426 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2427 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2428 bool AssumeSingleUse) const { 2429 EVT VT = Op.getValueType(); 2430 unsigned Opcode = Op.getOpcode(); 2431 APInt DemandedElts = OriginalDemandedElts; 2432 unsigned NumElts = DemandedElts.getBitWidth(); 2433 assert(VT.isVector() && "Expected vector op"); 2434 2435 KnownUndef = KnownZero = APInt::getZero(NumElts); 2436 2437 // TODO: For now we assume we know nothing about scalable vectors. 2438 if (VT.isScalableVector()) 2439 return false; 2440 2441 assert(VT.getVectorNumElements() == NumElts && 2442 "Mask size mismatches value type element count!"); 2443 2444 // Undef operand. 2445 if (Op.isUndef()) { 2446 KnownUndef.setAllBits(); 2447 return false; 2448 } 2449 2450 // If Op has other users, assume that all elements are needed. 2451 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2452 DemandedElts.setAllBits(); 2453 2454 // Not demanding any elements from Op. 2455 if (DemandedElts == 0) { 2456 KnownUndef.setAllBits(); 2457 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2458 } 2459 2460 // Limit search depth. 2461 if (Depth >= SelectionDAG::MaxRecursionDepth) 2462 return false; 2463 2464 SDLoc DL(Op); 2465 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2466 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 2467 2468 // Helper for demanding the specified elements and all the bits of both binary 2469 // operands. 2470 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2471 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2472 TLO.DAG, Depth + 1); 2473 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2474 TLO.DAG, Depth + 1); 2475 if (NewOp0 || NewOp1) { 2476 SDValue NewOp = TLO.DAG.getNode( 2477 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2478 return TLO.CombineTo(Op, NewOp); 2479 } 2480 return false; 2481 }; 2482 2483 switch (Opcode) { 2484 case ISD::SCALAR_TO_VECTOR: { 2485 if (!DemandedElts[0]) { 2486 KnownUndef.setAllBits(); 2487 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2488 } 2489 SDValue ScalarSrc = Op.getOperand(0); 2490 if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2491 SDValue Src = ScalarSrc.getOperand(0); 2492 SDValue Idx = ScalarSrc.getOperand(1); 2493 EVT SrcVT = Src.getValueType(); 2494 2495 ElementCount SrcEltCnt = SrcVT.getVectorElementCount(); 2496 2497 if (SrcEltCnt.isScalable()) 2498 return false; 2499 2500 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2501 if (isNullConstant(Idx)) { 2502 APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0); 2503 APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts); 2504 APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts); 2505 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2506 TLO, Depth + 1)) 2507 return true; 2508 } 2509 } 2510 KnownUndef.setHighBits(NumElts - 1); 2511 break; 2512 } 2513 case ISD::BITCAST: { 2514 SDValue Src = Op.getOperand(0); 2515 EVT SrcVT = Src.getValueType(); 2516 2517 // We only handle vectors here. 2518 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2519 if (!SrcVT.isVector()) 2520 break; 2521 2522 // Fast handling of 'identity' bitcasts. 2523 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2524 if (NumSrcElts == NumElts) 2525 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2526 KnownZero, TLO, Depth + 1); 2527 2528 APInt SrcDemandedElts, SrcZero, SrcUndef; 2529 2530 // Bitcast from 'large element' src vector to 'small element' vector, we 2531 // must demand a source element if any DemandedElt maps to it. 2532 if ((NumElts % NumSrcElts) == 0) { 2533 unsigned Scale = NumElts / NumSrcElts; 2534 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2535 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2536 TLO, Depth + 1)) 2537 return true; 2538 2539 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2540 // of the large element. 2541 // TODO - bigendian once we have test coverage. 2542 if (IsLE) { 2543 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2544 APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits); 2545 for (unsigned i = 0; i != NumElts; ++i) 2546 if (DemandedElts[i]) { 2547 unsigned Ofs = (i % Scale) * EltSizeInBits; 2548 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2549 } 2550 2551 KnownBits Known; 2552 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2553 TLO, Depth + 1)) 2554 return true; 2555 } 2556 2557 // If the src element is zero/undef then all the output elements will be - 2558 // only demanded elements are guaranteed to be correct. 2559 for (unsigned i = 0; i != NumSrcElts; ++i) { 2560 if (SrcDemandedElts[i]) { 2561 if (SrcZero[i]) 2562 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2563 if (SrcUndef[i]) 2564 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2565 } 2566 } 2567 } 2568 2569 // Bitcast from 'small element' src vector to 'large element' vector, we 2570 // demand all smaller source elements covered by the larger demanded element 2571 // of this vector. 2572 if ((NumSrcElts % NumElts) == 0) { 2573 unsigned Scale = NumSrcElts / NumElts; 2574 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2575 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2576 TLO, Depth + 1)) 2577 return true; 2578 2579 // If all the src elements covering an output element are zero/undef, then 2580 // the output element will be as well, assuming it was demanded. 2581 for (unsigned i = 0; i != NumElts; ++i) { 2582 if (DemandedElts[i]) { 2583 if (SrcZero.extractBits(Scale, i * Scale).isAllOnes()) 2584 KnownZero.setBit(i); 2585 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes()) 2586 KnownUndef.setBit(i); 2587 } 2588 } 2589 } 2590 break; 2591 } 2592 case ISD::BUILD_VECTOR: { 2593 // Check all elements and simplify any unused elements with UNDEF. 2594 if (!DemandedElts.isAllOnes()) { 2595 // Don't simplify BROADCASTS. 2596 if (llvm::any_of(Op->op_values(), 2597 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2598 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2599 bool Updated = false; 2600 for (unsigned i = 0; i != NumElts; ++i) { 2601 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2602 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2603 KnownUndef.setBit(i); 2604 Updated = true; 2605 } 2606 } 2607 if (Updated) 2608 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2609 } 2610 } 2611 for (unsigned i = 0; i != NumElts; ++i) { 2612 SDValue SrcOp = Op.getOperand(i); 2613 if (SrcOp.isUndef()) { 2614 KnownUndef.setBit(i); 2615 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2616 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2617 KnownZero.setBit(i); 2618 } 2619 } 2620 break; 2621 } 2622 case ISD::CONCAT_VECTORS: { 2623 EVT SubVT = Op.getOperand(0).getValueType(); 2624 unsigned NumSubVecs = Op.getNumOperands(); 2625 unsigned NumSubElts = SubVT.getVectorNumElements(); 2626 for (unsigned i = 0; i != NumSubVecs; ++i) { 2627 SDValue SubOp = Op.getOperand(i); 2628 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2629 APInt SubUndef, SubZero; 2630 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2631 Depth + 1)) 2632 return true; 2633 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2634 KnownZero.insertBits(SubZero, i * NumSubElts); 2635 } 2636 break; 2637 } 2638 case ISD::INSERT_SUBVECTOR: { 2639 // Demand any elements from the subvector and the remainder from the src its 2640 // inserted into. 2641 SDValue Src = Op.getOperand(0); 2642 SDValue Sub = Op.getOperand(1); 2643 uint64_t Idx = Op.getConstantOperandVal(2); 2644 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2645 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2646 APInt DemandedSrcElts = DemandedElts; 2647 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 2648 2649 APInt SubUndef, SubZero; 2650 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2651 Depth + 1)) 2652 return true; 2653 2654 // If none of the src operand elements are demanded, replace it with undef. 2655 if (!DemandedSrcElts && !Src.isUndef()) 2656 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2657 TLO.DAG.getUNDEF(VT), Sub, 2658 Op.getOperand(2))); 2659 2660 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2661 TLO, Depth + 1)) 2662 return true; 2663 KnownUndef.insertBits(SubUndef, Idx); 2664 KnownZero.insertBits(SubZero, Idx); 2665 2666 // Attempt to avoid multi-use ops if we don't need anything from them. 2667 if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) { 2668 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2669 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2670 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2671 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2672 if (NewSrc || NewSub) { 2673 NewSrc = NewSrc ? NewSrc : Src; 2674 NewSub = NewSub ? NewSub : Sub; 2675 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2676 NewSub, Op.getOperand(2)); 2677 return TLO.CombineTo(Op, NewOp); 2678 } 2679 } 2680 break; 2681 } 2682 case ISD::EXTRACT_SUBVECTOR: { 2683 // Offset the demanded elts by the subvector index. 2684 SDValue Src = Op.getOperand(0); 2685 if (Src.getValueType().isScalableVector()) 2686 break; 2687 uint64_t Idx = Op.getConstantOperandVal(1); 2688 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2689 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2690 2691 APInt SrcUndef, SrcZero; 2692 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2693 Depth + 1)) 2694 return true; 2695 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2696 KnownZero = SrcZero.extractBits(NumElts, Idx); 2697 2698 // Attempt to avoid multi-use ops if we don't need anything from them. 2699 if (!DemandedElts.isAllOnes()) { 2700 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2701 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2702 if (NewSrc) { 2703 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2704 Op.getOperand(1)); 2705 return TLO.CombineTo(Op, NewOp); 2706 } 2707 } 2708 break; 2709 } 2710 case ISD::INSERT_VECTOR_ELT: { 2711 SDValue Vec = Op.getOperand(0); 2712 SDValue Scl = Op.getOperand(1); 2713 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2714 2715 // For a legal, constant insertion index, if we don't need this insertion 2716 // then strip it, else remove it from the demanded elts. 2717 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2718 unsigned Idx = CIdx->getZExtValue(); 2719 if (!DemandedElts[Idx]) 2720 return TLO.CombineTo(Op, Vec); 2721 2722 APInt DemandedVecElts(DemandedElts); 2723 DemandedVecElts.clearBit(Idx); 2724 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2725 KnownZero, TLO, Depth + 1)) 2726 return true; 2727 2728 KnownUndef.setBitVal(Idx, Scl.isUndef()); 2729 2730 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 2731 break; 2732 } 2733 2734 APInt VecUndef, VecZero; 2735 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2736 Depth + 1)) 2737 return true; 2738 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2739 break; 2740 } 2741 case ISD::VSELECT: { 2742 // Try to transform the select condition based on the current demanded 2743 // elements. 2744 // TODO: If a condition element is undef, we can choose from one arm of the 2745 // select (and if one arm is undef, then we can propagate that to the 2746 // result). 2747 // TODO - add support for constant vselect masks (see IR version of this). 2748 APInt UnusedUndef, UnusedZero; 2749 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2750 UnusedZero, TLO, Depth + 1)) 2751 return true; 2752 2753 // See if we can simplify either vselect operand. 2754 APInt DemandedLHS(DemandedElts); 2755 APInt DemandedRHS(DemandedElts); 2756 APInt UndefLHS, ZeroLHS; 2757 APInt UndefRHS, ZeroRHS; 2758 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2759 ZeroLHS, TLO, Depth + 1)) 2760 return true; 2761 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2762 ZeroRHS, TLO, Depth + 1)) 2763 return true; 2764 2765 KnownUndef = UndefLHS & UndefRHS; 2766 KnownZero = ZeroLHS & ZeroRHS; 2767 break; 2768 } 2769 case ISD::VECTOR_SHUFFLE: { 2770 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2771 2772 // Collect demanded elements from shuffle operands.. 2773 APInt DemandedLHS(NumElts, 0); 2774 APInt DemandedRHS(NumElts, 0); 2775 for (unsigned i = 0; i != NumElts; ++i) { 2776 int M = ShuffleMask[i]; 2777 if (M < 0 || !DemandedElts[i]) 2778 continue; 2779 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2780 if (M < (int)NumElts) 2781 DemandedLHS.setBit(M); 2782 else 2783 DemandedRHS.setBit(M - NumElts); 2784 } 2785 2786 // See if we can simplify either shuffle operand. 2787 APInt UndefLHS, ZeroLHS; 2788 APInt UndefRHS, ZeroRHS; 2789 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2790 ZeroLHS, TLO, Depth + 1)) 2791 return true; 2792 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2793 ZeroRHS, TLO, Depth + 1)) 2794 return true; 2795 2796 // Simplify mask using undef elements from LHS/RHS. 2797 bool Updated = false; 2798 bool IdentityLHS = true, IdentityRHS = true; 2799 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2800 for (unsigned i = 0; i != NumElts; ++i) { 2801 int &M = NewMask[i]; 2802 if (M < 0) 2803 continue; 2804 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2805 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2806 Updated = true; 2807 M = -1; 2808 } 2809 IdentityLHS &= (M < 0) || (M == (int)i); 2810 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2811 } 2812 2813 // Update legal shuffle masks based on demanded elements if it won't reduce 2814 // to Identity which can cause premature removal of the shuffle mask. 2815 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2816 SDValue LegalShuffle = 2817 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2818 NewMask, TLO.DAG); 2819 if (LegalShuffle) 2820 return TLO.CombineTo(Op, LegalShuffle); 2821 } 2822 2823 // Propagate undef/zero elements from LHS/RHS. 2824 for (unsigned i = 0; i != NumElts; ++i) { 2825 int M = ShuffleMask[i]; 2826 if (M < 0) { 2827 KnownUndef.setBit(i); 2828 } else if (M < (int)NumElts) { 2829 if (UndefLHS[M]) 2830 KnownUndef.setBit(i); 2831 if (ZeroLHS[M]) 2832 KnownZero.setBit(i); 2833 } else { 2834 if (UndefRHS[M - NumElts]) 2835 KnownUndef.setBit(i); 2836 if (ZeroRHS[M - NumElts]) 2837 KnownZero.setBit(i); 2838 } 2839 } 2840 break; 2841 } 2842 case ISD::ANY_EXTEND_VECTOR_INREG: 2843 case ISD::SIGN_EXTEND_VECTOR_INREG: 2844 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2845 APInt SrcUndef, SrcZero; 2846 SDValue Src = Op.getOperand(0); 2847 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2848 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2849 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2850 Depth + 1)) 2851 return true; 2852 KnownZero = SrcZero.zextOrTrunc(NumElts); 2853 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2854 2855 if (IsLE && Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2856 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2857 DemandedSrcElts == 1) { 2858 // aext - if we just need the bottom element then we can bitcast. 2859 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2860 } 2861 2862 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2863 // zext(undef) upper bits are guaranteed to be zero. 2864 if (DemandedElts.isSubsetOf(KnownUndef)) 2865 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2866 KnownUndef.clearAllBits(); 2867 2868 // zext - if we just need the bottom element then we can mask: 2869 // zext(and(x,c)) -> and(x,c') iff the zext is the only user of the and. 2870 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() == ISD::AND && 2871 Op->isOnlyUserOf(Src.getNode()) && 2872 Op.getValueSizeInBits() == Src.getValueSizeInBits()) { 2873 SDLoc DL(Op); 2874 EVT SrcVT = Src.getValueType(); 2875 EVT SrcSVT = SrcVT.getScalarType(); 2876 SmallVector<SDValue> MaskElts; 2877 MaskElts.push_back(TLO.DAG.getAllOnesConstant(DL, SrcSVT)); 2878 MaskElts.append(NumSrcElts - 1, TLO.DAG.getConstant(0, DL, SrcSVT)); 2879 SDValue Mask = TLO.DAG.getBuildVector(SrcVT, DL, MaskElts); 2880 if (SDValue Fold = TLO.DAG.FoldConstantArithmetic( 2881 ISD::AND, DL, SrcVT, {Src.getOperand(1), Mask})) { 2882 Fold = TLO.DAG.getNode(ISD::AND, DL, SrcVT, Src.getOperand(0), Fold); 2883 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Fold)); 2884 } 2885 } 2886 } 2887 break; 2888 } 2889 2890 // TODO: There are more binop opcodes that could be handled here - MIN, 2891 // MAX, saturated math, etc. 2892 case ISD::ADD: { 2893 SDValue Op0 = Op.getOperand(0); 2894 SDValue Op1 = Op.getOperand(1); 2895 if (Op0 == Op1 && Op->isOnlyUserOf(Op0.getNode())) { 2896 APInt UndefLHS, ZeroLHS; 2897 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2898 Depth + 1, /*AssumeSingleUse*/ true)) 2899 return true; 2900 } 2901 LLVM_FALLTHROUGH; 2902 } 2903 case ISD::OR: 2904 case ISD::XOR: 2905 case ISD::SUB: 2906 case ISD::FADD: 2907 case ISD::FSUB: 2908 case ISD::FMUL: 2909 case ISD::FDIV: 2910 case ISD::FREM: { 2911 SDValue Op0 = Op.getOperand(0); 2912 SDValue Op1 = Op.getOperand(1); 2913 2914 APInt UndefRHS, ZeroRHS; 2915 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2916 Depth + 1)) 2917 return true; 2918 APInt UndefLHS, ZeroLHS; 2919 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2920 Depth + 1)) 2921 return true; 2922 2923 KnownZero = ZeroLHS & ZeroRHS; 2924 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2925 2926 // Attempt to avoid multi-use ops if we don't need anything from them. 2927 // TODO - use KnownUndef to relax the demandedelts? 2928 if (!DemandedElts.isAllOnes()) 2929 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2930 return true; 2931 break; 2932 } 2933 case ISD::SHL: 2934 case ISD::SRL: 2935 case ISD::SRA: 2936 case ISD::ROTL: 2937 case ISD::ROTR: { 2938 SDValue Op0 = Op.getOperand(0); 2939 SDValue Op1 = Op.getOperand(1); 2940 2941 APInt UndefRHS, ZeroRHS; 2942 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2943 Depth + 1)) 2944 return true; 2945 APInt UndefLHS, ZeroLHS; 2946 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2947 Depth + 1)) 2948 return true; 2949 2950 KnownZero = ZeroLHS; 2951 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2952 2953 // Attempt to avoid multi-use ops if we don't need anything from them. 2954 // TODO - use KnownUndef to relax the demandedelts? 2955 if (!DemandedElts.isAllOnes()) 2956 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2957 return true; 2958 break; 2959 } 2960 case ISD::MUL: 2961 case ISD::AND: { 2962 SDValue Op0 = Op.getOperand(0); 2963 SDValue Op1 = Op.getOperand(1); 2964 2965 APInt SrcUndef, SrcZero; 2966 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 2967 Depth + 1)) 2968 return true; 2969 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 2970 TLO, Depth + 1)) 2971 return true; 2972 2973 // If either side has a zero element, then the result element is zero, even 2974 // if the other is an UNDEF. 2975 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2976 // and then handle 'and' nodes with the rest of the binop opcodes. 2977 KnownZero |= SrcZero; 2978 KnownUndef &= SrcUndef; 2979 KnownUndef &= ~KnownZero; 2980 2981 // Attempt to avoid multi-use ops if we don't need anything from them. 2982 // TODO - use KnownUndef to relax the demandedelts? 2983 if (!DemandedElts.isAllOnes()) 2984 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2985 return true; 2986 break; 2987 } 2988 case ISD::TRUNCATE: 2989 case ISD::SIGN_EXTEND: 2990 case ISD::ZERO_EXTEND: 2991 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2992 KnownZero, TLO, Depth + 1)) 2993 return true; 2994 2995 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2996 // zext(undef) upper bits are guaranteed to be zero. 2997 if (DemandedElts.isSubsetOf(KnownUndef)) 2998 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2999 KnownUndef.clearAllBits(); 3000 } 3001 break; 3002 default: { 3003 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 3004 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 3005 KnownZero, TLO, Depth)) 3006 return true; 3007 } else { 3008 KnownBits Known; 3009 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits); 3010 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 3011 TLO, Depth, AssumeSingleUse)) 3012 return true; 3013 } 3014 break; 3015 } 3016 } 3017 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 3018 3019 // Constant fold all undef cases. 3020 // TODO: Handle zero cases as well. 3021 if (DemandedElts.isSubsetOf(KnownUndef)) 3022 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 3023 3024 return false; 3025 } 3026 3027 /// Determine which of the bits specified in Mask are known to be either zero or 3028 /// one and return them in the Known. 3029 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3030 KnownBits &Known, 3031 const APInt &DemandedElts, 3032 const SelectionDAG &DAG, 3033 unsigned Depth) const { 3034 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3035 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3036 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3037 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3038 "Should use MaskedValueIsZero if you don't know whether Op" 3039 " is a target node!"); 3040 Known.resetAll(); 3041 } 3042 3043 void TargetLowering::computeKnownBitsForTargetInstr( 3044 GISelKnownBits &Analysis, Register R, KnownBits &Known, 3045 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 3046 unsigned Depth) const { 3047 Known.resetAll(); 3048 } 3049 3050 void TargetLowering::computeKnownBitsForFrameIndex( 3051 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 3052 // The low bits are known zero if the pointer is aligned. 3053 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 3054 } 3055 3056 Align TargetLowering::computeKnownAlignForTargetInstr( 3057 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 3058 unsigned Depth) const { 3059 return Align(1); 3060 } 3061 3062 /// This method can be implemented by targets that want to expose additional 3063 /// information about sign bits to the DAG Combiner. 3064 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 3065 const APInt &, 3066 const SelectionDAG &, 3067 unsigned Depth) const { 3068 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3069 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3070 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3071 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3072 "Should use ComputeNumSignBits if you don't know whether Op" 3073 " is a target node!"); 3074 return 1; 3075 } 3076 3077 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 3078 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 3079 const MachineRegisterInfo &MRI, unsigned Depth) const { 3080 return 1; 3081 } 3082 3083 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 3084 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 3085 TargetLoweringOpt &TLO, unsigned Depth) const { 3086 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3087 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3088 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3089 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3090 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 3091 " is a target node!"); 3092 return false; 3093 } 3094 3095 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 3096 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3097 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 3098 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3099 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3100 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3101 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3102 "Should use SimplifyDemandedBits if you don't know whether Op" 3103 " is a target node!"); 3104 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 3105 return false; 3106 } 3107 3108 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 3109 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3110 SelectionDAG &DAG, unsigned Depth) const { 3111 assert( 3112 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3113 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3114 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3115 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3116 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3117 " is a target node!"); 3118 return SDValue(); 3119 } 3120 3121 SDValue 3122 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3123 SDValue N1, MutableArrayRef<int> Mask, 3124 SelectionDAG &DAG) const { 3125 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3126 if (!LegalMask) { 3127 std::swap(N0, N1); 3128 ShuffleVectorSDNode::commuteMask(Mask); 3129 LegalMask = isShuffleMaskLegal(Mask, VT); 3130 } 3131 3132 if (!LegalMask) 3133 return SDValue(); 3134 3135 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3136 } 3137 3138 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3139 return nullptr; 3140 } 3141 3142 bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3143 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3144 bool PoisonOnly, unsigned Depth) const { 3145 assert( 3146 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3147 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3148 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3149 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3150 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op" 3151 " is a target node!"); 3152 return false; 3153 } 3154 3155 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3156 const SelectionDAG &DAG, 3157 bool SNaN, 3158 unsigned Depth) const { 3159 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3160 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3161 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3162 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3163 "Should use isKnownNeverNaN if you don't know whether Op" 3164 " is a target node!"); 3165 return false; 3166 } 3167 3168 bool TargetLowering::isSplatValueForTargetNode(SDValue Op, 3169 const APInt &DemandedElts, 3170 APInt &UndefElts, 3171 unsigned Depth) const { 3172 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3173 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3174 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3175 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3176 "Should use isSplatValue if you don't know whether Op" 3177 " is a target node!"); 3178 return false; 3179 } 3180 3181 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3182 // work with truncating build vectors and vectors with elements of less than 3183 // 8 bits. 3184 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 3185 if (!N) 3186 return false; 3187 3188 APInt CVal; 3189 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 3190 CVal = CN->getAPIntValue(); 3191 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 3192 auto *CN = BV->getConstantSplatNode(); 3193 if (!CN) 3194 return false; 3195 3196 // If this is a truncating build vector, truncate the splat value. 3197 // Otherwise, we may fail to match the expected values below. 3198 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 3199 CVal = CN->getAPIntValue(); 3200 if (BVEltWidth < CVal.getBitWidth()) 3201 CVal = CVal.trunc(BVEltWidth); 3202 } else { 3203 return false; 3204 } 3205 3206 switch (getBooleanContents(N->getValueType(0))) { 3207 case UndefinedBooleanContent: 3208 return CVal[0]; 3209 case ZeroOrOneBooleanContent: 3210 return CVal.isOne(); 3211 case ZeroOrNegativeOneBooleanContent: 3212 return CVal.isAllOnes(); 3213 } 3214 3215 llvm_unreachable("Invalid boolean contents"); 3216 } 3217 3218 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 3219 if (!N) 3220 return false; 3221 3222 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3223 if (!CN) { 3224 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3225 if (!BV) 3226 return false; 3227 3228 // Only interested in constant splats, we don't care about undef 3229 // elements in identifying boolean constants and getConstantSplatNode 3230 // returns NULL if all ops are undef; 3231 CN = BV->getConstantSplatNode(); 3232 if (!CN) 3233 return false; 3234 } 3235 3236 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3237 return !CN->getAPIntValue()[0]; 3238 3239 return CN->isZero(); 3240 } 3241 3242 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3243 bool SExt) const { 3244 if (VT == MVT::i1) 3245 return N->isOne(); 3246 3247 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3248 switch (Cnt) { 3249 case TargetLowering::ZeroOrOneBooleanContent: 3250 // An extended value of 1 is always true, unless its original type is i1, 3251 // in which case it will be sign extended to -1. 3252 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3253 case TargetLowering::UndefinedBooleanContent: 3254 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3255 return N->isAllOnes() && SExt; 3256 } 3257 llvm_unreachable("Unexpected enumeration."); 3258 } 3259 3260 /// This helper function of SimplifySetCC tries to optimize the comparison when 3261 /// either operand of the SetCC node is a bitwise-and instruction. 3262 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3263 ISD::CondCode Cond, const SDLoc &DL, 3264 DAGCombinerInfo &DCI) const { 3265 // Match these patterns in any of their permutations: 3266 // (X & Y) == Y 3267 // (X & Y) != Y 3268 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3269 std::swap(N0, N1); 3270 3271 EVT OpVT = N0.getValueType(); 3272 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3273 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3274 return SDValue(); 3275 3276 SDValue X, Y; 3277 if (N0.getOperand(0) == N1) { 3278 X = N0.getOperand(1); 3279 Y = N0.getOperand(0); 3280 } else if (N0.getOperand(1) == N1) { 3281 X = N0.getOperand(0); 3282 Y = N0.getOperand(1); 3283 } else { 3284 return SDValue(); 3285 } 3286 3287 SelectionDAG &DAG = DCI.DAG; 3288 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3289 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3290 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3291 // Note that where Y is variable and is known to have at most one bit set 3292 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3293 // equivalent when Y == 0. 3294 assert(OpVT.isInteger()); 3295 Cond = ISD::getSetCCInverse(Cond, OpVT); 3296 if (DCI.isBeforeLegalizeOps() || 3297 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3298 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3299 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3300 // If the target supports an 'and-not' or 'and-complement' logic operation, 3301 // try to use that to make a comparison operation more efficient. 3302 // But don't do this transform if the mask is a single bit because there are 3303 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3304 // 'rlwinm' on PPC). 3305 3306 // Bail out if the compare operand that we want to turn into a zero is 3307 // already a zero (otherwise, infinite loop). 3308 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3309 if (YConst && YConst->isZero()) 3310 return SDValue(); 3311 3312 // Transform this into: ~X & Y == 0. 3313 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3314 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3315 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3316 } 3317 3318 return SDValue(); 3319 } 3320 3321 /// There are multiple IR patterns that could be checking whether certain 3322 /// truncation of a signed number would be lossy or not. The pattern which is 3323 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3324 /// We are looking for the following pattern: (KeptBits is a constant) 3325 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3326 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3327 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3328 /// We will unfold it into the natural trunc+sext pattern: 3329 /// ((%x << C) a>> C) dstcond %x 3330 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3331 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3332 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3333 const SDLoc &DL) const { 3334 // We must be comparing with a constant. 3335 ConstantSDNode *C1; 3336 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3337 return SDValue(); 3338 3339 // N0 should be: add %x, (1 << (KeptBits-1)) 3340 if (N0->getOpcode() != ISD::ADD) 3341 return SDValue(); 3342 3343 // And we must be 'add'ing a constant. 3344 ConstantSDNode *C01; 3345 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3346 return SDValue(); 3347 3348 SDValue X = N0->getOperand(0); 3349 EVT XVT = X.getValueType(); 3350 3351 // Validate constants ... 3352 3353 APInt I1 = C1->getAPIntValue(); 3354 3355 ISD::CondCode NewCond; 3356 if (Cond == ISD::CondCode::SETULT) { 3357 NewCond = ISD::CondCode::SETEQ; 3358 } else if (Cond == ISD::CondCode::SETULE) { 3359 NewCond = ISD::CondCode::SETEQ; 3360 // But need to 'canonicalize' the constant. 3361 I1 += 1; 3362 } else if (Cond == ISD::CondCode::SETUGT) { 3363 NewCond = ISD::CondCode::SETNE; 3364 // But need to 'canonicalize' the constant. 3365 I1 += 1; 3366 } else if (Cond == ISD::CondCode::SETUGE) { 3367 NewCond = ISD::CondCode::SETNE; 3368 } else 3369 return SDValue(); 3370 3371 APInt I01 = C01->getAPIntValue(); 3372 3373 auto checkConstants = [&I1, &I01]() -> bool { 3374 // Both of them must be power-of-two, and the constant from setcc is bigger. 3375 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3376 }; 3377 3378 if (checkConstants()) { 3379 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3380 } else { 3381 // What if we invert constants? (and the target predicate) 3382 I1.negate(); 3383 I01.negate(); 3384 assert(XVT.isInteger()); 3385 NewCond = getSetCCInverse(NewCond, XVT); 3386 if (!checkConstants()) 3387 return SDValue(); 3388 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3389 } 3390 3391 // They are power-of-two, so which bit is set? 3392 const unsigned KeptBits = I1.logBase2(); 3393 const unsigned KeptBitsMinusOne = I01.logBase2(); 3394 3395 // Magic! 3396 if (KeptBits != (KeptBitsMinusOne + 1)) 3397 return SDValue(); 3398 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3399 3400 // We don't want to do this in every single case. 3401 SelectionDAG &DAG = DCI.DAG; 3402 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3403 XVT, KeptBits)) 3404 return SDValue(); 3405 3406 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3407 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3408 3409 // Unfold into: ((%x << C) a>> C) cond %x 3410 // Where 'cond' will be either 'eq' or 'ne'. 3411 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3412 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3413 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3414 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3415 3416 return T2; 3417 } 3418 3419 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3420 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3421 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3422 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3423 assert(isConstOrConstSplat(N1C) && 3424 isConstOrConstSplat(N1C)->getAPIntValue().isZero() && 3425 "Should be a comparison with 0."); 3426 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3427 "Valid only for [in]equality comparisons."); 3428 3429 unsigned NewShiftOpcode; 3430 SDValue X, C, Y; 3431 3432 SelectionDAG &DAG = DCI.DAG; 3433 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3434 3435 // Look for '(C l>>/<< Y)'. 3436 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3437 // The shift should be one-use. 3438 if (!V.hasOneUse()) 3439 return false; 3440 unsigned OldShiftOpcode = V.getOpcode(); 3441 switch (OldShiftOpcode) { 3442 case ISD::SHL: 3443 NewShiftOpcode = ISD::SRL; 3444 break; 3445 case ISD::SRL: 3446 NewShiftOpcode = ISD::SHL; 3447 break; 3448 default: 3449 return false; // must be a logical shift. 3450 } 3451 // We should be shifting a constant. 3452 // FIXME: best to use isConstantOrConstantVector(). 3453 C = V.getOperand(0); 3454 ConstantSDNode *CC = 3455 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3456 if (!CC) 3457 return false; 3458 Y = V.getOperand(1); 3459 3460 ConstantSDNode *XC = 3461 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3462 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3463 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3464 }; 3465 3466 // LHS of comparison should be an one-use 'and'. 3467 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3468 return SDValue(); 3469 3470 X = N0.getOperand(0); 3471 SDValue Mask = N0.getOperand(1); 3472 3473 // 'and' is commutative! 3474 if (!Match(Mask)) { 3475 std::swap(X, Mask); 3476 if (!Match(Mask)) 3477 return SDValue(); 3478 } 3479 3480 EVT VT = X.getValueType(); 3481 3482 // Produce: 3483 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3484 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3485 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3486 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3487 return T2; 3488 } 3489 3490 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3491 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3492 /// handle the commuted versions of these patterns. 3493 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3494 ISD::CondCode Cond, const SDLoc &DL, 3495 DAGCombinerInfo &DCI) const { 3496 unsigned BOpcode = N0.getOpcode(); 3497 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3498 "Unexpected binop"); 3499 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3500 3501 // (X + Y) == X --> Y == 0 3502 // (X - Y) == X --> Y == 0 3503 // (X ^ Y) == X --> Y == 0 3504 SelectionDAG &DAG = DCI.DAG; 3505 EVT OpVT = N0.getValueType(); 3506 SDValue X = N0.getOperand(0); 3507 SDValue Y = N0.getOperand(1); 3508 if (X == N1) 3509 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3510 3511 if (Y != N1) 3512 return SDValue(); 3513 3514 // (X + Y) == Y --> X == 0 3515 // (X ^ Y) == Y --> X == 0 3516 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3517 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3518 3519 // The shift would not be valid if the operands are boolean (i1). 3520 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3521 return SDValue(); 3522 3523 // (X - Y) == Y --> X == Y << 1 3524 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3525 !DCI.isBeforeLegalize()); 3526 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3527 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3528 if (!DCI.isCalledByLegalizer()) 3529 DCI.AddToWorklist(YShl1.getNode()); 3530 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3531 } 3532 3533 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3534 SDValue N0, const APInt &C1, 3535 ISD::CondCode Cond, const SDLoc &dl, 3536 SelectionDAG &DAG) { 3537 // Look through truncs that don't change the value of a ctpop. 3538 // FIXME: Add vector support? Need to be careful with setcc result type below. 3539 SDValue CTPOP = N0; 3540 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3541 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3542 CTPOP = N0.getOperand(0); 3543 3544 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3545 return SDValue(); 3546 3547 EVT CTVT = CTPOP.getValueType(); 3548 SDValue CTOp = CTPOP.getOperand(0); 3549 3550 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3551 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3552 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3553 return SDValue(); 3554 3555 // (ctpop x) u< 2 -> (x & x-1) == 0 3556 // (ctpop x) u> 1 -> (x & x-1) != 0 3557 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 3558 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 3559 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 3560 return SDValue(); 3561 if (C1 == 0 && (Cond == ISD::SETULT)) 3562 return SDValue(); // This is handled elsewhere. 3563 3564 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 3565 3566 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3567 SDValue Result = CTOp; 3568 for (unsigned i = 0; i < Passes; i++) { 3569 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 3570 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 3571 } 3572 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3573 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 3574 } 3575 3576 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3577 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3578 // For scalars, keep CTPOP if it is legal or custom. 3579 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 3580 return SDValue(); 3581 // This is based on X86's custom lowering for CTPOP which produces more 3582 // instructions than the expansion here. 3583 3584 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3585 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3586 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3587 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3588 assert(CTVT.isInteger()); 3589 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3590 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3591 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3592 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3593 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3594 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3595 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3596 } 3597 3598 return SDValue(); 3599 } 3600 3601 /// Try to simplify a setcc built with the specified operands and cc. If it is 3602 /// unable to simplify it, return a null SDValue. 3603 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3604 ISD::CondCode Cond, bool foldBooleans, 3605 DAGCombinerInfo &DCI, 3606 const SDLoc &dl) const { 3607 SelectionDAG &DAG = DCI.DAG; 3608 const DataLayout &Layout = DAG.getDataLayout(); 3609 EVT OpVT = N0.getValueType(); 3610 3611 // Constant fold or commute setcc. 3612 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3613 return Fold; 3614 3615 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3616 // TODO: Handle non-splat vector constants. All undef causes trouble. 3617 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 3618 // infinite loop here when we encounter one. 3619 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3620 if (isConstOrConstSplat(N0) && 3621 (!OpVT.isScalableVector() || !isConstOrConstSplat(N1)) && 3622 (DCI.isBeforeLegalizeOps() || 3623 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3624 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3625 3626 // If we have a subtract with the same 2 non-constant operands as this setcc 3627 // -- but in reverse order -- then try to commute the operands of this setcc 3628 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3629 // instruction on some targets. 3630 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3631 (DCI.isBeforeLegalizeOps() || 3632 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3633 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 3634 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 3635 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3636 3637 if (auto *N1C = isConstOrConstSplat(N1)) { 3638 const APInt &C1 = N1C->getAPIntValue(); 3639 3640 // Optimize some CTPOP cases. 3641 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 3642 return V; 3643 3644 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3645 // equality comparison, then we're just comparing whether X itself is 3646 // zero. 3647 if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && 3648 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3649 isPowerOf2_32(N0.getScalarValueSizeInBits())) { 3650 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 3651 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3652 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 3653 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3654 // (srl (ctlz x), 5) == 0 -> X != 0 3655 // (srl (ctlz x), 5) != 1 -> X != 0 3656 Cond = ISD::SETNE; 3657 } else { 3658 // (srl (ctlz x), 5) != 0 -> X == 0 3659 // (srl (ctlz x), 5) == 1 -> X == 0 3660 Cond = ISD::SETEQ; 3661 } 3662 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3663 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 3664 Cond); 3665 } 3666 } 3667 } 3668 } 3669 3670 // FIXME: Support vectors. 3671 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3672 const APInt &C1 = N1C->getAPIntValue(); 3673 3674 // (zext x) == C --> x == (trunc C) 3675 // (sext x) == C --> x == (trunc C) 3676 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3677 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3678 unsigned MinBits = N0.getValueSizeInBits(); 3679 SDValue PreExt; 3680 bool Signed = false; 3681 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3682 // ZExt 3683 MinBits = N0->getOperand(0).getValueSizeInBits(); 3684 PreExt = N0->getOperand(0); 3685 } else if (N0->getOpcode() == ISD::AND) { 3686 // DAGCombine turns costly ZExts into ANDs 3687 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3688 if ((C->getAPIntValue()+1).isPowerOf2()) { 3689 MinBits = C->getAPIntValue().countTrailingOnes(); 3690 PreExt = N0->getOperand(0); 3691 } 3692 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3693 // SExt 3694 MinBits = N0->getOperand(0).getValueSizeInBits(); 3695 PreExt = N0->getOperand(0); 3696 Signed = true; 3697 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3698 // ZEXTLOAD / SEXTLOAD 3699 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3700 MinBits = LN0->getMemoryVT().getSizeInBits(); 3701 PreExt = N0; 3702 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3703 Signed = true; 3704 MinBits = LN0->getMemoryVT().getSizeInBits(); 3705 PreExt = N0; 3706 } 3707 } 3708 3709 // Figure out how many bits we need to preserve this constant. 3710 unsigned ReqdBits = Signed ? C1.getMinSignedBits() : C1.getActiveBits(); 3711 3712 // Make sure we're not losing bits from the constant. 3713 if (MinBits > 0 && 3714 MinBits < C1.getBitWidth() && 3715 MinBits >= ReqdBits) { 3716 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3717 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3718 // Will get folded away. 3719 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3720 if (MinBits == 1 && C1 == 1) 3721 // Invert the condition. 3722 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3723 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3724 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3725 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3726 } 3727 3728 // If truncating the setcc operands is not desirable, we can still 3729 // simplify the expression in some cases: 3730 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3731 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3732 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3733 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3734 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3735 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3736 SDValue TopSetCC = N0->getOperand(0); 3737 unsigned N0Opc = N0->getOpcode(); 3738 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3739 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3740 TopSetCC.getOpcode() == ISD::SETCC && 3741 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3742 (isConstFalseVal(N1C) || 3743 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3744 3745 bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) || 3746 (!N1C->isZero() && Cond == ISD::SETNE); 3747 3748 if (!Inverse) 3749 return TopSetCC; 3750 3751 ISD::CondCode InvCond = ISD::getSetCCInverse( 3752 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3753 TopSetCC.getOperand(0).getValueType()); 3754 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3755 TopSetCC.getOperand(1), 3756 InvCond); 3757 } 3758 } 3759 } 3760 3761 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3762 // equality or unsigned, and all 1 bits of the const are in the same 3763 // partial word, see if we can shorten the load. 3764 if (DCI.isBeforeLegalize() && 3765 !ISD::isSignedIntSetCC(Cond) && 3766 N0.getOpcode() == ISD::AND && C1 == 0 && 3767 N0.getNode()->hasOneUse() && 3768 isa<LoadSDNode>(N0.getOperand(0)) && 3769 N0.getOperand(0).getNode()->hasOneUse() && 3770 isa<ConstantSDNode>(N0.getOperand(1))) { 3771 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3772 APInt bestMask; 3773 unsigned bestWidth = 0, bestOffset = 0; 3774 if (Lod->isSimple() && Lod->isUnindexed()) { 3775 unsigned origWidth = N0.getValueSizeInBits(); 3776 unsigned maskWidth = origWidth; 3777 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3778 // 8 bits, but have to be careful... 3779 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3780 origWidth = Lod->getMemoryVT().getSizeInBits(); 3781 const APInt &Mask = N0.getConstantOperandAPInt(1); 3782 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3783 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3784 for (unsigned offset=0; offset<origWidth/width; offset++) { 3785 if (Mask.isSubsetOf(newMask)) { 3786 if (Layout.isLittleEndian()) 3787 bestOffset = (uint64_t)offset * (width/8); 3788 else 3789 bestOffset = (origWidth/width - offset - 1) * (width/8); 3790 bestMask = Mask.lshr(offset * (width/8) * 8); 3791 bestWidth = width; 3792 break; 3793 } 3794 newMask <<= width; 3795 } 3796 } 3797 } 3798 if (bestWidth) { 3799 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3800 if (newVT.isRound() && 3801 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3802 SDValue Ptr = Lod->getBasePtr(); 3803 if (bestOffset != 0) 3804 Ptr = 3805 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 3806 SDValue NewLoad = 3807 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 3808 Lod->getPointerInfo().getWithOffset(bestOffset), 3809 Lod->getOriginalAlign()); 3810 return DAG.getSetCC(dl, VT, 3811 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3812 DAG.getConstant(bestMask.trunc(bestWidth), 3813 dl, newVT)), 3814 DAG.getConstant(0LL, dl, newVT), Cond); 3815 } 3816 } 3817 } 3818 3819 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3820 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3821 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3822 3823 // If the comparison constant has bits in the upper part, the 3824 // zero-extended value could never match. 3825 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3826 C1.getBitWidth() - InSize))) { 3827 switch (Cond) { 3828 case ISD::SETUGT: 3829 case ISD::SETUGE: 3830 case ISD::SETEQ: 3831 return DAG.getConstant(0, dl, VT); 3832 case ISD::SETULT: 3833 case ISD::SETULE: 3834 case ISD::SETNE: 3835 return DAG.getConstant(1, dl, VT); 3836 case ISD::SETGT: 3837 case ISD::SETGE: 3838 // True if the sign bit of C1 is set. 3839 return DAG.getConstant(C1.isNegative(), dl, VT); 3840 case ISD::SETLT: 3841 case ISD::SETLE: 3842 // True if the sign bit of C1 isn't set. 3843 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3844 default: 3845 break; 3846 } 3847 } 3848 3849 // Otherwise, we can perform the comparison with the low bits. 3850 switch (Cond) { 3851 case ISD::SETEQ: 3852 case ISD::SETNE: 3853 case ISD::SETUGT: 3854 case ISD::SETUGE: 3855 case ISD::SETULT: 3856 case ISD::SETULE: { 3857 EVT newVT = N0.getOperand(0).getValueType(); 3858 if (DCI.isBeforeLegalizeOps() || 3859 (isOperationLegal(ISD::SETCC, newVT) && 3860 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3861 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3862 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3863 3864 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3865 NewConst, Cond); 3866 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3867 } 3868 break; 3869 } 3870 default: 3871 break; // todo, be more careful with signed comparisons 3872 } 3873 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3874 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3875 !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(), 3876 OpVT)) { 3877 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3878 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3879 EVT ExtDstTy = N0.getValueType(); 3880 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3881 3882 // If the constant doesn't fit into the number of bits for the source of 3883 // the sign extension, it is impossible for both sides to be equal. 3884 if (C1.getMinSignedBits() > ExtSrcTyBits) 3885 return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT); 3886 3887 assert(ExtDstTy == N0.getOperand(0).getValueType() && 3888 ExtDstTy != ExtSrcTy && "Unexpected types!"); 3889 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3890 SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0), 3891 DAG.getConstant(Imm, dl, ExtDstTy)); 3892 if (!DCI.isCalledByLegalizer()) 3893 DCI.AddToWorklist(ZextOp.getNode()); 3894 // Otherwise, make this a use of a zext. 3895 return DAG.getSetCC(dl, VT, ZextOp, 3896 DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond); 3897 } else if ((N1C->isZero() || N1C->isOne()) && 3898 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3899 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3900 if (N0.getOpcode() == ISD::SETCC && 3901 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3902 (N0.getValueType() == MVT::i1 || 3903 getBooleanContents(N0.getOperand(0).getValueType()) == 3904 ZeroOrOneBooleanContent)) { 3905 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3906 if (TrueWhenTrue) 3907 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3908 // Invert the condition. 3909 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3910 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3911 if (DCI.isBeforeLegalizeOps() || 3912 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3913 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3914 } 3915 3916 if ((N0.getOpcode() == ISD::XOR || 3917 (N0.getOpcode() == ISD::AND && 3918 N0.getOperand(0).getOpcode() == ISD::XOR && 3919 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3920 isOneConstant(N0.getOperand(1))) { 3921 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3922 // can only do this if the top bits are known zero. 3923 unsigned BitWidth = N0.getValueSizeInBits(); 3924 if (DAG.MaskedValueIsZero(N0, 3925 APInt::getHighBitsSet(BitWidth, 3926 BitWidth-1))) { 3927 // Okay, get the un-inverted input value. 3928 SDValue Val; 3929 if (N0.getOpcode() == ISD::XOR) { 3930 Val = N0.getOperand(0); 3931 } else { 3932 assert(N0.getOpcode() == ISD::AND && 3933 N0.getOperand(0).getOpcode() == ISD::XOR); 3934 // ((X^1)&1)^1 -> X & 1 3935 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3936 N0.getOperand(0).getOperand(0), 3937 N0.getOperand(1)); 3938 } 3939 3940 return DAG.getSetCC(dl, VT, Val, N1, 3941 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3942 } 3943 } else if (N1C->isOne()) { 3944 SDValue Op0 = N0; 3945 if (Op0.getOpcode() == ISD::TRUNCATE) 3946 Op0 = Op0.getOperand(0); 3947 3948 if ((Op0.getOpcode() == ISD::XOR) && 3949 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3950 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3951 SDValue XorLHS = Op0.getOperand(0); 3952 SDValue XorRHS = Op0.getOperand(1); 3953 // Ensure that the input setccs return an i1 type or 0/1 value. 3954 if (Op0.getValueType() == MVT::i1 || 3955 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3956 ZeroOrOneBooleanContent && 3957 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3958 ZeroOrOneBooleanContent)) { 3959 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3960 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3961 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3962 } 3963 } 3964 if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) { 3965 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3966 if (Op0.getValueType().bitsGT(VT)) 3967 Op0 = DAG.getNode(ISD::AND, dl, VT, 3968 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3969 DAG.getConstant(1, dl, VT)); 3970 else if (Op0.getValueType().bitsLT(VT)) 3971 Op0 = DAG.getNode(ISD::AND, dl, VT, 3972 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3973 DAG.getConstant(1, dl, VT)); 3974 3975 return DAG.getSetCC(dl, VT, Op0, 3976 DAG.getConstant(0, dl, Op0.getValueType()), 3977 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3978 } 3979 if (Op0.getOpcode() == ISD::AssertZext && 3980 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3981 return DAG.getSetCC(dl, VT, Op0, 3982 DAG.getConstant(0, dl, Op0.getValueType()), 3983 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3984 } 3985 } 3986 3987 // Given: 3988 // icmp eq/ne (urem %x, %y), 0 3989 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3990 // icmp eq/ne %x, 0 3991 if (N0.getOpcode() == ISD::UREM && N1C->isZero() && 3992 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3993 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3994 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3995 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3996 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3997 } 3998 3999 // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0 4000 // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0 4001 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4002 N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) && 4003 N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 && 4004 N1C && N1C->isAllOnes()) { 4005 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4006 DAG.getConstant(0, dl, OpVT), 4007 Cond == ISD::SETEQ ? ISD::SETLT : ISD::SETGE); 4008 } 4009 4010 if (SDValue V = 4011 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 4012 return V; 4013 } 4014 4015 // These simplifications apply to splat vectors as well. 4016 // TODO: Handle more splat vector cases. 4017 if (auto *N1C = isConstOrConstSplat(N1)) { 4018 const APInt &C1 = N1C->getAPIntValue(); 4019 4020 APInt MinVal, MaxVal; 4021 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 4022 if (ISD::isSignedIntSetCC(Cond)) { 4023 MinVal = APInt::getSignedMinValue(OperandBitSize); 4024 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 4025 } else { 4026 MinVal = APInt::getMinValue(OperandBitSize); 4027 MaxVal = APInt::getMaxValue(OperandBitSize); 4028 } 4029 4030 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 4031 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 4032 // X >= MIN --> true 4033 if (C1 == MinVal) 4034 return DAG.getBoolConstant(true, dl, VT, OpVT); 4035 4036 if (!VT.isVector()) { // TODO: Support this for vectors. 4037 // X >= C0 --> X > (C0 - 1) 4038 APInt C = C1 - 1; 4039 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 4040 if ((DCI.isBeforeLegalizeOps() || 4041 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4042 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4043 isLegalICmpImmediate(C.getSExtValue())))) { 4044 return DAG.getSetCC(dl, VT, N0, 4045 DAG.getConstant(C, dl, N1.getValueType()), 4046 NewCC); 4047 } 4048 } 4049 } 4050 4051 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 4052 // X <= MAX --> true 4053 if (C1 == MaxVal) 4054 return DAG.getBoolConstant(true, dl, VT, OpVT); 4055 4056 // X <= C0 --> X < (C0 + 1) 4057 if (!VT.isVector()) { // TODO: Support this for vectors. 4058 APInt C = C1 + 1; 4059 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 4060 if ((DCI.isBeforeLegalizeOps() || 4061 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4062 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4063 isLegalICmpImmediate(C.getSExtValue())))) { 4064 return DAG.getSetCC(dl, VT, N0, 4065 DAG.getConstant(C, dl, N1.getValueType()), 4066 NewCC); 4067 } 4068 } 4069 } 4070 4071 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 4072 if (C1 == MinVal) 4073 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 4074 4075 // TODO: Support this for vectors after legalize ops. 4076 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4077 // Canonicalize setlt X, Max --> setne X, Max 4078 if (C1 == MaxVal) 4079 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4080 4081 // If we have setult X, 1, turn it into seteq X, 0 4082 if (C1 == MinVal+1) 4083 return DAG.getSetCC(dl, VT, N0, 4084 DAG.getConstant(MinVal, dl, N0.getValueType()), 4085 ISD::SETEQ); 4086 } 4087 } 4088 4089 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 4090 if (C1 == MaxVal) 4091 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 4092 4093 // TODO: Support this for vectors after legalize ops. 4094 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4095 // Canonicalize setgt X, Min --> setne X, Min 4096 if (C1 == MinVal) 4097 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4098 4099 // If we have setugt X, Max-1, turn it into seteq X, Max 4100 if (C1 == MaxVal-1) 4101 return DAG.getSetCC(dl, VT, N0, 4102 DAG.getConstant(MaxVal, dl, N0.getValueType()), 4103 ISD::SETEQ); 4104 } 4105 } 4106 4107 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 4108 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4109 if (C1.isZero()) 4110 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 4111 VT, N0, N1, Cond, DCI, dl)) 4112 return CC; 4113 4114 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 4115 // For example, when high 32-bits of i64 X are known clear: 4116 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 4117 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 4118 bool CmpZero = N1C->getAPIntValue().isZero(); 4119 bool CmpNegOne = N1C->getAPIntValue().isAllOnes(); 4120 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 4121 // Match or(lo,shl(hi,bw/2)) pattern. 4122 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 4123 unsigned EltBits = V.getScalarValueSizeInBits(); 4124 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 4125 return false; 4126 SDValue LHS = V.getOperand(0); 4127 SDValue RHS = V.getOperand(1); 4128 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 4129 // Unshifted element must have zero upperbits. 4130 if (RHS.getOpcode() == ISD::SHL && 4131 isa<ConstantSDNode>(RHS.getOperand(1)) && 4132 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4133 DAG.MaskedValueIsZero(LHS, HiBits)) { 4134 Lo = LHS; 4135 Hi = RHS.getOperand(0); 4136 return true; 4137 } 4138 if (LHS.getOpcode() == ISD::SHL && 4139 isa<ConstantSDNode>(LHS.getOperand(1)) && 4140 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4141 DAG.MaskedValueIsZero(RHS, HiBits)) { 4142 Lo = RHS; 4143 Hi = LHS.getOperand(0); 4144 return true; 4145 } 4146 return false; 4147 }; 4148 4149 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4150 unsigned EltBits = N0.getScalarValueSizeInBits(); 4151 unsigned HalfBits = EltBits / 2; 4152 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4153 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4154 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4155 SDValue NewN0 = 4156 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4157 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4158 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4159 }; 4160 4161 SDValue Lo, Hi; 4162 if (IsConcat(N0, Lo, Hi)) 4163 return MergeConcat(Lo, Hi); 4164 4165 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4166 SDValue Lo0, Lo1, Hi0, Hi1; 4167 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4168 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4169 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4170 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4171 } 4172 } 4173 } 4174 } 4175 4176 // If we have "setcc X, C0", check to see if we can shrink the immediate 4177 // by changing cc. 4178 // TODO: Support this for vectors after legalize ops. 4179 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4180 // SETUGT X, SINTMAX -> SETLT X, 0 4181 // SETUGE X, SINTMIN -> SETLT X, 0 4182 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4183 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4184 return DAG.getSetCC(dl, VT, N0, 4185 DAG.getConstant(0, dl, N1.getValueType()), 4186 ISD::SETLT); 4187 4188 // SETULT X, SINTMIN -> SETGT X, -1 4189 // SETULE X, SINTMAX -> SETGT X, -1 4190 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4191 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4192 return DAG.getSetCC(dl, VT, N0, 4193 DAG.getAllOnesConstant(dl, N1.getValueType()), 4194 ISD::SETGT); 4195 } 4196 } 4197 4198 // Back to non-vector simplifications. 4199 // TODO: Can we do these for vector splats? 4200 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4201 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4202 const APInt &C1 = N1C->getAPIntValue(); 4203 EVT ShValTy = N0.getValueType(); 4204 4205 // Fold bit comparisons when we can. This will result in an 4206 // incorrect value when boolean false is negative one, unless 4207 // the bitsize is 1 in which case the false value is the same 4208 // in practice regardless of the representation. 4209 if ((VT.getSizeInBits() == 1 || 4210 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4211 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4212 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4213 N0.getOpcode() == ISD::AND) { 4214 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4215 EVT ShiftTy = 4216 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4217 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4218 // Perform the xform if the AND RHS is a single bit. 4219 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4220 if (AndRHS->getAPIntValue().isPowerOf2() && 4221 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4222 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4223 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4224 DAG.getConstant(ShCt, dl, ShiftTy))); 4225 } 4226 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4227 // (X & 8) == 8 --> (X & 8) >> 3 4228 // Perform the xform if C1 is a single bit. 4229 unsigned ShCt = C1.logBase2(); 4230 if (C1.isPowerOf2() && 4231 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4232 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4233 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4234 DAG.getConstant(ShCt, dl, ShiftTy))); 4235 } 4236 } 4237 } 4238 } 4239 4240 if (C1.getMinSignedBits() <= 64 && 4241 !isLegalICmpImmediate(C1.getSExtValue())) { 4242 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4243 // (X & -256) == 256 -> (X >> 8) == 1 4244 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4245 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4246 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4247 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4248 if (AndRHSC.isNegatedPowerOf2() && (AndRHSC & C1) == C1) { 4249 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4250 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4251 SDValue Shift = 4252 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4253 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4254 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4255 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4256 } 4257 } 4258 } 4259 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4260 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4261 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4262 // X < 0x100000000 -> (X >> 32) < 1 4263 // X >= 0x100000000 -> (X >> 32) >= 1 4264 // X <= 0x0ffffffff -> (X >> 32) < 1 4265 // X > 0x0ffffffff -> (X >> 32) >= 1 4266 unsigned ShiftBits; 4267 APInt NewC = C1; 4268 ISD::CondCode NewCond = Cond; 4269 if (AdjOne) { 4270 ShiftBits = C1.countTrailingOnes(); 4271 NewC = NewC + 1; 4272 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4273 } else { 4274 ShiftBits = C1.countTrailingZeros(); 4275 } 4276 NewC.lshrInPlace(ShiftBits); 4277 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4278 isLegalICmpImmediate(NewC.getSExtValue()) && 4279 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4280 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4281 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4282 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4283 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4284 } 4285 } 4286 } 4287 } 4288 4289 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4290 auto *CFP = cast<ConstantFPSDNode>(N1); 4291 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4292 4293 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4294 // constant if knowing that the operand is non-nan is enough. We prefer to 4295 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4296 // materialize 0.0. 4297 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4298 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4299 4300 // setcc (fneg x), C -> setcc swap(pred) x, -C 4301 if (N0.getOpcode() == ISD::FNEG) { 4302 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4303 if (DCI.isBeforeLegalizeOps() || 4304 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4305 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4306 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4307 } 4308 } 4309 4310 // If the condition is not legal, see if we can find an equivalent one 4311 // which is legal. 4312 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4313 // If the comparison was an awkward floating-point == or != and one of 4314 // the comparison operands is infinity or negative infinity, convert the 4315 // condition to a less-awkward <= or >=. 4316 if (CFP->getValueAPF().isInfinity()) { 4317 bool IsNegInf = CFP->getValueAPF().isNegative(); 4318 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4319 switch (Cond) { 4320 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4321 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4322 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4323 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4324 default: break; 4325 } 4326 if (NewCond != ISD::SETCC_INVALID && 4327 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4328 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4329 } 4330 } 4331 } 4332 4333 if (N0 == N1) { 4334 // The sext(setcc()) => setcc() optimization relies on the appropriate 4335 // constant being emitted. 4336 assert(!N0.getValueType().isInteger() && 4337 "Integer types should be handled by FoldSetCC"); 4338 4339 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4340 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4341 if (UOF == 2) // FP operators that are undefined on NaNs. 4342 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4343 if (UOF == unsigned(EqTrue)) 4344 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4345 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4346 // if it is not already. 4347 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4348 if (NewCond != Cond && 4349 (DCI.isBeforeLegalizeOps() || 4350 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4351 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4352 } 4353 4354 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4355 N0.getValueType().isInteger()) { 4356 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4357 N0.getOpcode() == ISD::XOR) { 4358 // Simplify (X+Y) == (X+Z) --> Y == Z 4359 if (N0.getOpcode() == N1.getOpcode()) { 4360 if (N0.getOperand(0) == N1.getOperand(0)) 4361 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4362 if (N0.getOperand(1) == N1.getOperand(1)) 4363 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4364 if (isCommutativeBinOp(N0.getOpcode())) { 4365 // If X op Y == Y op X, try other combinations. 4366 if (N0.getOperand(0) == N1.getOperand(1)) 4367 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4368 Cond); 4369 if (N0.getOperand(1) == N1.getOperand(0)) 4370 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4371 Cond); 4372 } 4373 } 4374 4375 // If RHS is a legal immediate value for a compare instruction, we need 4376 // to be careful about increasing register pressure needlessly. 4377 bool LegalRHSImm = false; 4378 4379 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4380 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4381 // Turn (X+C1) == C2 --> X == C2-C1 4382 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 4383 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4384 DAG.getConstant(RHSC->getAPIntValue()- 4385 LHSR->getAPIntValue(), 4386 dl, N0.getValueType()), Cond); 4387 } 4388 4389 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 4390 if (N0.getOpcode() == ISD::XOR) 4391 // If we know that all of the inverted bits are zero, don't bother 4392 // performing the inversion. 4393 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 4394 return 4395 DAG.getSetCC(dl, VT, N0.getOperand(0), 4396 DAG.getConstant(LHSR->getAPIntValue() ^ 4397 RHSC->getAPIntValue(), 4398 dl, N0.getValueType()), 4399 Cond); 4400 } 4401 4402 // Turn (C1-X) == C2 --> X == C1-C2 4403 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 4404 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 4405 return 4406 DAG.getSetCC(dl, VT, N0.getOperand(1), 4407 DAG.getConstant(SUBC->getAPIntValue() - 4408 RHSC->getAPIntValue(), 4409 dl, N0.getValueType()), 4410 Cond); 4411 } 4412 } 4413 4414 // Could RHSC fold directly into a compare? 4415 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4416 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4417 } 4418 4419 // (X+Y) == X --> Y == 0 and similar folds. 4420 // Don't do this if X is an immediate that can fold into a cmp 4421 // instruction and X+Y has other uses. It could be an induction variable 4422 // chain, and the transform would increase register pressure. 4423 if (!LegalRHSImm || N0.hasOneUse()) 4424 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4425 return V; 4426 } 4427 4428 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4429 N1.getOpcode() == ISD::XOR) 4430 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4431 return V; 4432 4433 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4434 return V; 4435 } 4436 4437 // Fold remainder of division by a constant. 4438 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4439 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4440 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4441 4442 // When division is cheap or optimizing for minimum size, 4443 // fall through to DIVREM creation by skipping this fold. 4444 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) { 4445 if (N0.getOpcode() == ISD::UREM) { 4446 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4447 return Folded; 4448 } else if (N0.getOpcode() == ISD::SREM) { 4449 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4450 return Folded; 4451 } 4452 } 4453 } 4454 4455 // Fold away ALL boolean setcc's. 4456 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4457 SDValue Temp; 4458 switch (Cond) { 4459 default: llvm_unreachable("Unknown integer setcc!"); 4460 case ISD::SETEQ: // X == Y -> ~(X^Y) 4461 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4462 N0 = DAG.getNOT(dl, Temp, OpVT); 4463 if (!DCI.isCalledByLegalizer()) 4464 DCI.AddToWorklist(Temp.getNode()); 4465 break; 4466 case ISD::SETNE: // X != Y --> (X^Y) 4467 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4468 break; 4469 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4470 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4471 Temp = DAG.getNOT(dl, N0, OpVT); 4472 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4473 if (!DCI.isCalledByLegalizer()) 4474 DCI.AddToWorklist(Temp.getNode()); 4475 break; 4476 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4477 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4478 Temp = DAG.getNOT(dl, N1, OpVT); 4479 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4480 if (!DCI.isCalledByLegalizer()) 4481 DCI.AddToWorklist(Temp.getNode()); 4482 break; 4483 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4484 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4485 Temp = DAG.getNOT(dl, N0, OpVT); 4486 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4487 if (!DCI.isCalledByLegalizer()) 4488 DCI.AddToWorklist(Temp.getNode()); 4489 break; 4490 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4491 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4492 Temp = DAG.getNOT(dl, N1, OpVT); 4493 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4494 break; 4495 } 4496 if (VT.getScalarType() != MVT::i1) { 4497 if (!DCI.isCalledByLegalizer()) 4498 DCI.AddToWorklist(N0.getNode()); 4499 // FIXME: If running after legalize, we probably can't do this. 4500 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4501 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4502 } 4503 return N0; 4504 } 4505 4506 // Could not fold it. 4507 return SDValue(); 4508 } 4509 4510 /// Returns true (and the GlobalValue and the offset) if the node is a 4511 /// GlobalAddress + offset. 4512 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4513 int64_t &Offset) const { 4514 4515 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4516 4517 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4518 GA = GASD->getGlobal(); 4519 Offset += GASD->getOffset(); 4520 return true; 4521 } 4522 4523 if (N->getOpcode() == ISD::ADD) { 4524 SDValue N1 = N->getOperand(0); 4525 SDValue N2 = N->getOperand(1); 4526 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4527 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4528 Offset += V->getSExtValue(); 4529 return true; 4530 } 4531 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4532 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4533 Offset += V->getSExtValue(); 4534 return true; 4535 } 4536 } 4537 } 4538 4539 return false; 4540 } 4541 4542 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4543 DAGCombinerInfo &DCI) const { 4544 // Default implementation: no optimization. 4545 return SDValue(); 4546 } 4547 4548 //===----------------------------------------------------------------------===// 4549 // Inline Assembler Implementation Methods 4550 //===----------------------------------------------------------------------===// 4551 4552 TargetLowering::ConstraintType 4553 TargetLowering::getConstraintType(StringRef Constraint) const { 4554 unsigned S = Constraint.size(); 4555 4556 if (S == 1) { 4557 switch (Constraint[0]) { 4558 default: break; 4559 case 'r': 4560 return C_RegisterClass; 4561 case 'm': // memory 4562 case 'o': // offsetable 4563 case 'V': // not offsetable 4564 return C_Memory; 4565 case 'n': // Simple Integer 4566 case 'E': // Floating Point Constant 4567 case 'F': // Floating Point Constant 4568 return C_Immediate; 4569 case 'i': // Simple Integer or Relocatable Constant 4570 case 's': // Relocatable Constant 4571 case 'p': // Address. 4572 case 'X': // Allow ANY value. 4573 case 'I': // Target registers. 4574 case 'J': 4575 case 'K': 4576 case 'L': 4577 case 'M': 4578 case 'N': 4579 case 'O': 4580 case 'P': 4581 case '<': 4582 case '>': 4583 return C_Other; 4584 } 4585 } 4586 4587 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4588 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4589 return C_Memory; 4590 return C_Register; 4591 } 4592 return C_Unknown; 4593 } 4594 4595 /// Try to replace an X constraint, which matches anything, with another that 4596 /// has more specific requirements based on the type of the corresponding 4597 /// operand. 4598 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4599 if (ConstraintVT.isInteger()) 4600 return "r"; 4601 if (ConstraintVT.isFloatingPoint()) 4602 return "f"; // works for many targets 4603 return nullptr; 4604 } 4605 4606 SDValue TargetLowering::LowerAsmOutputForConstraint( 4607 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 4608 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 4609 return SDValue(); 4610 } 4611 4612 /// Lower the specified operand into the Ops vector. 4613 /// If it is invalid, don't add anything to Ops. 4614 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4615 std::string &Constraint, 4616 std::vector<SDValue> &Ops, 4617 SelectionDAG &DAG) const { 4618 4619 if (Constraint.length() > 1) return; 4620 4621 char ConstraintLetter = Constraint[0]; 4622 switch (ConstraintLetter) { 4623 default: break; 4624 case 'X': // Allows any operand 4625 case 'i': // Simple Integer or Relocatable Constant 4626 case 'n': // Simple Integer 4627 case 's': { // Relocatable Constant 4628 4629 ConstantSDNode *C; 4630 uint64_t Offset = 0; 4631 4632 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4633 // etc., since getelementpointer is variadic. We can't use 4634 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4635 // while in this case the GA may be furthest from the root node which is 4636 // likely an ISD::ADD. 4637 while (true) { 4638 if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') { 4639 // gcc prints these as sign extended. Sign extend value to 64 bits 4640 // now; without this it would get ZExt'd later in 4641 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4642 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4643 BooleanContent BCont = getBooleanContents(MVT::i64); 4644 ISD::NodeType ExtOpc = 4645 IsBool ? getExtendForContent(BCont) : ISD::SIGN_EXTEND; 4646 int64_t ExtVal = 4647 ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() : C->getSExtValue(); 4648 Ops.push_back( 4649 DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64)); 4650 return; 4651 } 4652 if (ConstraintLetter != 'n') { 4653 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4654 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4655 GA->getValueType(0), 4656 Offset + GA->getOffset())); 4657 return; 4658 } 4659 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 4660 Ops.push_back(DAG.getTargetBlockAddress( 4661 BA->getBlockAddress(), BA->getValueType(0), 4662 Offset + BA->getOffset(), BA->getTargetFlags())); 4663 return; 4664 } 4665 if (isa<BasicBlockSDNode>(Op)) { 4666 Ops.push_back(Op); 4667 return; 4668 } 4669 } 4670 const unsigned OpCode = Op.getOpcode(); 4671 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4672 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4673 Op = Op.getOperand(1); 4674 // Subtraction is not commutative. 4675 else if (OpCode == ISD::ADD && 4676 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4677 Op = Op.getOperand(0); 4678 else 4679 return; 4680 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4681 continue; 4682 } 4683 return; 4684 } 4685 break; 4686 } 4687 } 4688 } 4689 4690 std::pair<unsigned, const TargetRegisterClass *> 4691 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4692 StringRef Constraint, 4693 MVT VT) const { 4694 if (Constraint.empty() || Constraint[0] != '{') 4695 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4696 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4697 4698 // Remove the braces from around the name. 4699 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4700 4701 std::pair<unsigned, const TargetRegisterClass *> R = 4702 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4703 4704 // Figure out which register class contains this reg. 4705 for (const TargetRegisterClass *RC : RI->regclasses()) { 4706 // If none of the value types for this register class are valid, we 4707 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4708 if (!isLegalRC(*RI, *RC)) 4709 continue; 4710 4711 for (const MCPhysReg &PR : *RC) { 4712 if (RegName.equals_insensitive(RI->getRegAsmName(PR))) { 4713 std::pair<unsigned, const TargetRegisterClass *> S = 4714 std::make_pair(PR, RC); 4715 4716 // If this register class has the requested value type, return it, 4717 // otherwise keep searching and return the first class found 4718 // if no other is found which explicitly has the requested type. 4719 if (RI->isTypeLegalForClass(*RC, VT)) 4720 return S; 4721 if (!R.second) 4722 R = S; 4723 } 4724 } 4725 } 4726 4727 return R; 4728 } 4729 4730 //===----------------------------------------------------------------------===// 4731 // Constraint Selection. 4732 4733 /// Return true of this is an input operand that is a matching constraint like 4734 /// "4". 4735 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4736 assert(!ConstraintCode.empty() && "No known constraint!"); 4737 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4738 } 4739 4740 /// If this is an input matching constraint, this method returns the output 4741 /// operand it matches. 4742 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4743 assert(!ConstraintCode.empty() && "No known constraint!"); 4744 return atoi(ConstraintCode.c_str()); 4745 } 4746 4747 /// Split up the constraint string from the inline assembly value into the 4748 /// specific constraints and their prefixes, and also tie in the associated 4749 /// operand values. 4750 /// If this returns an empty vector, and if the constraint string itself 4751 /// isn't empty, there was an error parsing. 4752 TargetLowering::AsmOperandInfoVector 4753 TargetLowering::ParseConstraints(const DataLayout &DL, 4754 const TargetRegisterInfo *TRI, 4755 const CallBase &Call) const { 4756 /// Information about all of the constraints. 4757 AsmOperandInfoVector ConstraintOperands; 4758 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 4759 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4760 4761 // Do a prepass over the constraints, canonicalizing them, and building up the 4762 // ConstraintOperands list. 4763 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4764 unsigned ResNo = 0; // ResNo - The result number of the next output. 4765 4766 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4767 ConstraintOperands.emplace_back(std::move(CI)); 4768 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4769 4770 // Update multiple alternative constraint count. 4771 if (OpInfo.multipleAlternatives.size() > maCount) 4772 maCount = OpInfo.multipleAlternatives.size(); 4773 4774 OpInfo.ConstraintVT = MVT::Other; 4775 4776 // Compute the value type for each operand. 4777 switch (OpInfo.Type) { 4778 case InlineAsm::isOutput: 4779 // Indirect outputs just consume an argument. 4780 if (OpInfo.isIndirect) { 4781 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 4782 break; 4783 } 4784 4785 // The return value of the call is this value. As such, there is no 4786 // corresponding argument. 4787 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 4788 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 4789 OpInfo.ConstraintVT = 4790 getSimpleValueType(DL, STy->getElementType(ResNo)); 4791 } else { 4792 assert(ResNo == 0 && "Asm only has one result!"); 4793 OpInfo.ConstraintVT = 4794 getAsmOperandValueType(DL, Call.getType()).getSimpleVT(); 4795 } 4796 ++ResNo; 4797 break; 4798 case InlineAsm::isInput: 4799 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 4800 break; 4801 case InlineAsm::isClobber: 4802 // Nothing to do. 4803 break; 4804 } 4805 4806 if (OpInfo.CallOperandVal) { 4807 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4808 if (OpInfo.isIndirect) { 4809 OpTy = Call.getAttributes().getParamElementType(ArgNo); 4810 assert(OpTy && "Indirect opernad must have elementtype attribute"); 4811 } 4812 4813 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4814 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4815 if (STy->getNumElements() == 1) 4816 OpTy = STy->getElementType(0); 4817 4818 // If OpTy is not a single value, it may be a struct/union that we 4819 // can tile with integers. 4820 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4821 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4822 switch (BitSize) { 4823 default: break; 4824 case 1: 4825 case 8: 4826 case 16: 4827 case 32: 4828 case 64: 4829 case 128: 4830 OpInfo.ConstraintVT = 4831 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4832 break; 4833 } 4834 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4835 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4836 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4837 } else { 4838 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4839 } 4840 4841 ArgNo++; 4842 } 4843 } 4844 4845 // If we have multiple alternative constraints, select the best alternative. 4846 if (!ConstraintOperands.empty()) { 4847 if (maCount) { 4848 unsigned bestMAIndex = 0; 4849 int bestWeight = -1; 4850 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4851 int weight = -1; 4852 unsigned maIndex; 4853 // Compute the sums of the weights for each alternative, keeping track 4854 // of the best (highest weight) one so far. 4855 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4856 int weightSum = 0; 4857 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4858 cIndex != eIndex; ++cIndex) { 4859 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4860 if (OpInfo.Type == InlineAsm::isClobber) 4861 continue; 4862 4863 // If this is an output operand with a matching input operand, 4864 // look up the matching input. If their types mismatch, e.g. one 4865 // is an integer, the other is floating point, or their sizes are 4866 // different, flag it as an maCantMatch. 4867 if (OpInfo.hasMatchingInput()) { 4868 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4869 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4870 if ((OpInfo.ConstraintVT.isInteger() != 4871 Input.ConstraintVT.isInteger()) || 4872 (OpInfo.ConstraintVT.getSizeInBits() != 4873 Input.ConstraintVT.getSizeInBits())) { 4874 weightSum = -1; // Can't match. 4875 break; 4876 } 4877 } 4878 } 4879 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4880 if (weight == -1) { 4881 weightSum = -1; 4882 break; 4883 } 4884 weightSum += weight; 4885 } 4886 // Update best. 4887 if (weightSum > bestWeight) { 4888 bestWeight = weightSum; 4889 bestMAIndex = maIndex; 4890 } 4891 } 4892 4893 // Now select chosen alternative in each constraint. 4894 for (AsmOperandInfo &cInfo : ConstraintOperands) 4895 if (cInfo.Type != InlineAsm::isClobber) 4896 cInfo.selectAlternative(bestMAIndex); 4897 } 4898 } 4899 4900 // Check and hook up tied operands, choose constraint code to use. 4901 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4902 cIndex != eIndex; ++cIndex) { 4903 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4904 4905 // If this is an output operand with a matching input operand, look up the 4906 // matching input. If their types mismatch, e.g. one is an integer, the 4907 // other is floating point, or their sizes are different, flag it as an 4908 // error. 4909 if (OpInfo.hasMatchingInput()) { 4910 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4911 4912 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4913 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4914 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4915 OpInfo.ConstraintVT); 4916 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4917 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4918 Input.ConstraintVT); 4919 if ((OpInfo.ConstraintVT.isInteger() != 4920 Input.ConstraintVT.isInteger()) || 4921 (MatchRC.second != InputRC.second)) { 4922 report_fatal_error("Unsupported asm: input constraint" 4923 " with a matching output constraint of" 4924 " incompatible type!"); 4925 } 4926 } 4927 } 4928 } 4929 4930 return ConstraintOperands; 4931 } 4932 4933 /// Return an integer indicating how general CT is. 4934 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4935 switch (CT) { 4936 case TargetLowering::C_Immediate: 4937 case TargetLowering::C_Other: 4938 case TargetLowering::C_Unknown: 4939 return 0; 4940 case TargetLowering::C_Register: 4941 return 1; 4942 case TargetLowering::C_RegisterClass: 4943 return 2; 4944 case TargetLowering::C_Memory: 4945 return 3; 4946 } 4947 llvm_unreachable("Invalid constraint type"); 4948 } 4949 4950 /// Examine constraint type and operand type and determine a weight value. 4951 /// This object must already have been set up with the operand type 4952 /// and the current alternative constraint selected. 4953 TargetLowering::ConstraintWeight 4954 TargetLowering::getMultipleConstraintMatchWeight( 4955 AsmOperandInfo &info, int maIndex) const { 4956 InlineAsm::ConstraintCodeVector *rCodes; 4957 if (maIndex >= (int)info.multipleAlternatives.size()) 4958 rCodes = &info.Codes; 4959 else 4960 rCodes = &info.multipleAlternatives[maIndex].Codes; 4961 ConstraintWeight BestWeight = CW_Invalid; 4962 4963 // Loop over the options, keeping track of the most general one. 4964 for (const std::string &rCode : *rCodes) { 4965 ConstraintWeight weight = 4966 getSingleConstraintMatchWeight(info, rCode.c_str()); 4967 if (weight > BestWeight) 4968 BestWeight = weight; 4969 } 4970 4971 return BestWeight; 4972 } 4973 4974 /// Examine constraint type and operand type and determine a weight value. 4975 /// This object must already have been set up with the operand type 4976 /// and the current alternative constraint selected. 4977 TargetLowering::ConstraintWeight 4978 TargetLowering::getSingleConstraintMatchWeight( 4979 AsmOperandInfo &info, const char *constraint) const { 4980 ConstraintWeight weight = CW_Invalid; 4981 Value *CallOperandVal = info.CallOperandVal; 4982 // If we don't have a value, we can't do a match, 4983 // but allow it at the lowest weight. 4984 if (!CallOperandVal) 4985 return CW_Default; 4986 // Look at the constraint type. 4987 switch (*constraint) { 4988 case 'i': // immediate integer. 4989 case 'n': // immediate integer with a known value. 4990 if (isa<ConstantInt>(CallOperandVal)) 4991 weight = CW_Constant; 4992 break; 4993 case 's': // non-explicit intregal immediate. 4994 if (isa<GlobalValue>(CallOperandVal)) 4995 weight = CW_Constant; 4996 break; 4997 case 'E': // immediate float if host format. 4998 case 'F': // immediate float. 4999 if (isa<ConstantFP>(CallOperandVal)) 5000 weight = CW_Constant; 5001 break; 5002 case '<': // memory operand with autodecrement. 5003 case '>': // memory operand with autoincrement. 5004 case 'm': // memory operand. 5005 case 'o': // offsettable memory operand 5006 case 'V': // non-offsettable memory operand 5007 weight = CW_Memory; 5008 break; 5009 case 'r': // general register. 5010 case 'g': // general register, memory operand or immediate integer. 5011 // note: Clang converts "g" to "imr". 5012 if (CallOperandVal->getType()->isIntegerTy()) 5013 weight = CW_Register; 5014 break; 5015 case 'X': // any operand. 5016 default: 5017 weight = CW_Default; 5018 break; 5019 } 5020 return weight; 5021 } 5022 5023 /// If there are multiple different constraints that we could pick for this 5024 /// operand (e.g. "imr") try to pick the 'best' one. 5025 /// This is somewhat tricky: constraints fall into four classes: 5026 /// Other -> immediates and magic values 5027 /// Register -> one specific register 5028 /// RegisterClass -> a group of regs 5029 /// Memory -> memory 5030 /// Ideally, we would pick the most specific constraint possible: if we have 5031 /// something that fits into a register, we would pick it. The problem here 5032 /// is that if we have something that could either be in a register or in 5033 /// memory that use of the register could cause selection of *other* 5034 /// operands to fail: they might only succeed if we pick memory. Because of 5035 /// this the heuristic we use is: 5036 /// 5037 /// 1) If there is an 'other' constraint, and if the operand is valid for 5038 /// that constraint, use it. This makes us take advantage of 'i' 5039 /// constraints when available. 5040 /// 2) Otherwise, pick the most general constraint present. This prefers 5041 /// 'm' over 'r', for example. 5042 /// 5043 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 5044 const TargetLowering &TLI, 5045 SDValue Op, SelectionDAG *DAG) { 5046 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 5047 unsigned BestIdx = 0; 5048 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 5049 int BestGenerality = -1; 5050 5051 // Loop over the options, keeping track of the most general one. 5052 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 5053 TargetLowering::ConstraintType CType = 5054 TLI.getConstraintType(OpInfo.Codes[i]); 5055 5056 // Indirect 'other' or 'immediate' constraints are not allowed. 5057 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 5058 CType == TargetLowering::C_Register || 5059 CType == TargetLowering::C_RegisterClass)) 5060 continue; 5061 5062 // If this is an 'other' or 'immediate' constraint, see if the operand is 5063 // valid for it. For example, on X86 we might have an 'rI' constraint. If 5064 // the operand is an integer in the range [0..31] we want to use I (saving a 5065 // load of a register), otherwise we must use 'r'. 5066 if ((CType == TargetLowering::C_Other || 5067 CType == TargetLowering::C_Immediate) && Op.getNode()) { 5068 assert(OpInfo.Codes[i].size() == 1 && 5069 "Unhandled multi-letter 'other' constraint"); 5070 std::vector<SDValue> ResultOps; 5071 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 5072 ResultOps, *DAG); 5073 if (!ResultOps.empty()) { 5074 BestType = CType; 5075 BestIdx = i; 5076 break; 5077 } 5078 } 5079 5080 // Things with matching constraints can only be registers, per gcc 5081 // documentation. This mainly affects "g" constraints. 5082 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 5083 continue; 5084 5085 // This constraint letter is more general than the previous one, use it. 5086 int Generality = getConstraintGenerality(CType); 5087 if (Generality > BestGenerality) { 5088 BestType = CType; 5089 BestIdx = i; 5090 BestGenerality = Generality; 5091 } 5092 } 5093 5094 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 5095 OpInfo.ConstraintType = BestType; 5096 } 5097 5098 /// Determines the constraint code and constraint type to use for the specific 5099 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5100 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5101 SDValue Op, 5102 SelectionDAG *DAG) const { 5103 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 5104 5105 // Single-letter constraints ('r') are very common. 5106 if (OpInfo.Codes.size() == 1) { 5107 OpInfo.ConstraintCode = OpInfo.Codes[0]; 5108 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5109 } else { 5110 ChooseConstraint(OpInfo, *this, Op, DAG); 5111 } 5112 5113 // 'X' matches anything. 5114 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 5115 // Constants are handled elsewhere. For Functions, the type here is the 5116 // type of the result, which is not what we want to look at; leave them 5117 // alone. 5118 Value *v = OpInfo.CallOperandVal; 5119 if (isa<ConstantInt>(v) || isa<Function>(v)) { 5120 return; 5121 } 5122 5123 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) { 5124 OpInfo.ConstraintCode = "i"; 5125 return; 5126 } 5127 5128 // Otherwise, try to resolve it to something we know about by looking at 5129 // the actual operand type. 5130 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5131 OpInfo.ConstraintCode = Repl; 5132 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5133 } 5134 } 5135 } 5136 5137 /// Given an exact SDIV by a constant, create a multiplication 5138 /// with the multiplicative inverse of the constant. 5139 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5140 const SDLoc &dl, SelectionDAG &DAG, 5141 SmallVectorImpl<SDNode *> &Created) { 5142 SDValue Op0 = N->getOperand(0); 5143 SDValue Op1 = N->getOperand(1); 5144 EVT VT = N->getValueType(0); 5145 EVT SVT = VT.getScalarType(); 5146 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5147 EVT ShSVT = ShVT.getScalarType(); 5148 5149 bool UseSRA = false; 5150 SmallVector<SDValue, 16> Shifts, Factors; 5151 5152 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5153 if (C->isZero()) 5154 return false; 5155 APInt Divisor = C->getAPIntValue(); 5156 unsigned Shift = Divisor.countTrailingZeros(); 5157 if (Shift) { 5158 Divisor.ashrInPlace(Shift); 5159 UseSRA = true; 5160 } 5161 // Calculate the multiplicative inverse, using Newton's method. 5162 APInt t; 5163 APInt Factor = Divisor; 5164 while ((t = Divisor * Factor) != 1) 5165 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5166 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5167 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5168 return true; 5169 }; 5170 5171 // Collect all magic values from the build vector. 5172 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5173 return SDValue(); 5174 5175 SDValue Shift, Factor; 5176 if (Op1.getOpcode() == ISD::BUILD_VECTOR) { 5177 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5178 Factor = DAG.getBuildVector(VT, dl, Factors); 5179 } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) { 5180 assert(Shifts.size() == 1 && Factors.size() == 1 && 5181 "Expected matchUnaryPredicate to return one element for scalable " 5182 "vectors"); 5183 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5184 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5185 } else { 5186 assert(isa<ConstantSDNode>(Op1) && "Expected a constant"); 5187 Shift = Shifts[0]; 5188 Factor = Factors[0]; 5189 } 5190 5191 SDValue Res = Op0; 5192 5193 // Shift the value upfront if it is even, so the LSB is one. 5194 if (UseSRA) { 5195 // TODO: For UDIV use SRL instead of SRA. 5196 SDNodeFlags Flags; 5197 Flags.setExact(true); 5198 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5199 Created.push_back(Res.getNode()); 5200 } 5201 5202 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5203 } 5204 5205 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5206 SelectionDAG &DAG, 5207 SmallVectorImpl<SDNode *> &Created) const { 5208 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5209 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5210 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5211 return SDValue(N, 0); // Lower SDIV as SDIV 5212 return SDValue(); 5213 } 5214 5215 /// Given an ISD::SDIV node expressing a divide by constant, 5216 /// return a DAG expression to select that will generate the same value by 5217 /// multiplying by a magic number. 5218 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5219 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5220 bool IsAfterLegalization, 5221 SmallVectorImpl<SDNode *> &Created) const { 5222 SDLoc dl(N); 5223 EVT VT = N->getValueType(0); 5224 EVT SVT = VT.getScalarType(); 5225 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5226 EVT ShSVT = ShVT.getScalarType(); 5227 unsigned EltBits = VT.getScalarSizeInBits(); 5228 EVT MulVT; 5229 5230 // Check to see if we can do this. 5231 // FIXME: We should be more aggressive here. 5232 if (!isTypeLegal(VT)) { 5233 // Limit this to simple scalars for now. 5234 if (VT.isVector() || !VT.isSimple()) 5235 return SDValue(); 5236 5237 // If this type will be promoted to a large enough type with a legal 5238 // multiply operation, we can go ahead and do this transform. 5239 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5240 return SDValue(); 5241 5242 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5243 if (MulVT.getSizeInBits() < (2 * EltBits) || 5244 !isOperationLegal(ISD::MUL, MulVT)) 5245 return SDValue(); 5246 } 5247 5248 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5249 if (N->getFlags().hasExact()) 5250 return BuildExactSDIV(*this, N, dl, DAG, Created); 5251 5252 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5253 5254 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5255 if (C->isZero()) 5256 return false; 5257 5258 const APInt &Divisor = C->getAPIntValue(); 5259 SignedDivisionByConstantInfo magics = SignedDivisionByConstantInfo::get(Divisor); 5260 int NumeratorFactor = 0; 5261 int ShiftMask = -1; 5262 5263 if (Divisor.isOne() || Divisor.isAllOnes()) { 5264 // If d is +1/-1, we just multiply the numerator by +1/-1. 5265 NumeratorFactor = Divisor.getSExtValue(); 5266 magics.Magic = 0; 5267 magics.ShiftAmount = 0; 5268 ShiftMask = 0; 5269 } else if (Divisor.isStrictlyPositive() && magics.Magic.isNegative()) { 5270 // If d > 0 and m < 0, add the numerator. 5271 NumeratorFactor = 1; 5272 } else if (Divisor.isNegative() && magics.Magic.isStrictlyPositive()) { 5273 // If d < 0 and m > 0, subtract the numerator. 5274 NumeratorFactor = -1; 5275 } 5276 5277 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5278 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5279 Shifts.push_back(DAG.getConstant(magics.ShiftAmount, dl, ShSVT)); 5280 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5281 return true; 5282 }; 5283 5284 SDValue N0 = N->getOperand(0); 5285 SDValue N1 = N->getOperand(1); 5286 5287 // Collect the shifts / magic values from each element. 5288 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5289 return SDValue(); 5290 5291 SDValue MagicFactor, Factor, Shift, ShiftMask; 5292 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5293 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5294 Factor = DAG.getBuildVector(VT, dl, Factors); 5295 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5296 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5297 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5298 assert(MagicFactors.size() == 1 && Factors.size() == 1 && 5299 Shifts.size() == 1 && ShiftMasks.size() == 1 && 5300 "Expected matchUnaryPredicate to return one element for scalable " 5301 "vectors"); 5302 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5303 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5304 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5305 ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]); 5306 } else { 5307 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5308 MagicFactor = MagicFactors[0]; 5309 Factor = Factors[0]; 5310 Shift = Shifts[0]; 5311 ShiftMask = ShiftMasks[0]; 5312 } 5313 5314 // Multiply the numerator (operand 0) by the magic value. 5315 // FIXME: We should support doing a MUL in a wider type. 5316 auto GetMULHS = [&](SDValue X, SDValue Y) { 5317 // If the type isn't legal, use a wider mul of the the type calculated 5318 // earlier. 5319 if (!isTypeLegal(VT)) { 5320 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); 5321 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, Y); 5322 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5323 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5324 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5325 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5326 } 5327 5328 if (isOperationLegalOrCustom(ISD::MULHS, VT, IsAfterLegalization)) 5329 return DAG.getNode(ISD::MULHS, dl, VT, X, Y); 5330 if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT, IsAfterLegalization)) { 5331 SDValue LoHi = 5332 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5333 return SDValue(LoHi.getNode(), 1); 5334 } 5335 return SDValue(); 5336 }; 5337 5338 SDValue Q = GetMULHS(N0, MagicFactor); 5339 if (!Q) 5340 return SDValue(); 5341 5342 Created.push_back(Q.getNode()); 5343 5344 // (Optionally) Add/subtract the numerator using Factor. 5345 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5346 Created.push_back(Factor.getNode()); 5347 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5348 Created.push_back(Q.getNode()); 5349 5350 // Shift right algebraic by shift value. 5351 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5352 Created.push_back(Q.getNode()); 5353 5354 // Extract the sign bit, mask it and add it to the quotient. 5355 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5356 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5357 Created.push_back(T.getNode()); 5358 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5359 Created.push_back(T.getNode()); 5360 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5361 } 5362 5363 /// Given an ISD::UDIV node expressing a divide by constant, 5364 /// return a DAG expression to select that will generate the same value by 5365 /// multiplying by a magic number. 5366 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5367 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5368 bool IsAfterLegalization, 5369 SmallVectorImpl<SDNode *> &Created) const { 5370 SDLoc dl(N); 5371 EVT VT = N->getValueType(0); 5372 EVT SVT = VT.getScalarType(); 5373 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5374 EVT ShSVT = ShVT.getScalarType(); 5375 unsigned EltBits = VT.getScalarSizeInBits(); 5376 EVT MulVT; 5377 5378 // Check to see if we can do this. 5379 // FIXME: We should be more aggressive here. 5380 if (!isTypeLegal(VT)) { 5381 // Limit this to simple scalars for now. 5382 if (VT.isVector() || !VT.isSimple()) 5383 return SDValue(); 5384 5385 // If this type will be promoted to a large enough type with a legal 5386 // multiply operation, we can go ahead and do this transform. 5387 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5388 return SDValue(); 5389 5390 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5391 if (MulVT.getSizeInBits() < (2 * EltBits) || 5392 !isOperationLegal(ISD::MUL, MulVT)) 5393 return SDValue(); 5394 } 5395 5396 bool UseNPQ = false; 5397 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5398 5399 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5400 if (C->isZero()) 5401 return false; 5402 // FIXME: We should use a narrower constant when the upper 5403 // bits are known to be zero. 5404 const APInt& Divisor = C->getAPIntValue(); 5405 UnsignedDivisonByConstantInfo magics = UnsignedDivisonByConstantInfo::get(Divisor); 5406 unsigned PreShift = 0, PostShift = 0; 5407 5408 // If the divisor is even, we can avoid using the expensive fixup by 5409 // shifting the divided value upfront. 5410 if (magics.IsAdd != 0 && !Divisor[0]) { 5411 PreShift = Divisor.countTrailingZeros(); 5412 // Get magic number for the shifted divisor. 5413 magics = UnsignedDivisonByConstantInfo::get(Divisor.lshr(PreShift), PreShift); 5414 assert(magics.IsAdd == 0 && "Should use cheap fixup now"); 5415 } 5416 5417 APInt Magic = magics.Magic; 5418 5419 unsigned SelNPQ; 5420 if (magics.IsAdd == 0 || Divisor.isOne()) { 5421 assert(magics.ShiftAmount < Divisor.getBitWidth() && 5422 "We shouldn't generate an undefined shift!"); 5423 PostShift = magics.ShiftAmount; 5424 SelNPQ = false; 5425 } else { 5426 PostShift = magics.ShiftAmount - 1; 5427 SelNPQ = true; 5428 } 5429 5430 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5431 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5432 NPQFactors.push_back( 5433 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5434 : APInt::getZero(EltBits), 5435 dl, SVT)); 5436 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5437 UseNPQ |= SelNPQ; 5438 return true; 5439 }; 5440 5441 SDValue N0 = N->getOperand(0); 5442 SDValue N1 = N->getOperand(1); 5443 5444 // Collect the shifts/magic values from each element. 5445 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5446 return SDValue(); 5447 5448 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5449 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5450 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5451 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5452 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5453 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5454 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5455 assert(PreShifts.size() == 1 && MagicFactors.size() == 1 && 5456 NPQFactors.size() == 1 && PostShifts.size() == 1 && 5457 "Expected matchUnaryPredicate to return one for scalable vectors"); 5458 PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]); 5459 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5460 NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]); 5461 PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]); 5462 } else { 5463 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5464 PreShift = PreShifts[0]; 5465 MagicFactor = MagicFactors[0]; 5466 PostShift = PostShifts[0]; 5467 } 5468 5469 SDValue Q = N0; 5470 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5471 Created.push_back(Q.getNode()); 5472 5473 // FIXME: We should support doing a MUL in a wider type. 5474 auto GetMULHU = [&](SDValue X, SDValue Y) { 5475 // If the type isn't legal, use a wider mul of the the type calculated 5476 // earlier. 5477 if (!isTypeLegal(VT)) { 5478 X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); 5479 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, Y); 5480 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5481 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5482 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5483 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5484 } 5485 5486 if (isOperationLegalOrCustom(ISD::MULHU, VT, IsAfterLegalization)) 5487 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5488 if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT, IsAfterLegalization)) { 5489 SDValue LoHi = 5490 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5491 return SDValue(LoHi.getNode(), 1); 5492 } 5493 return SDValue(); // No mulhu or equivalent 5494 }; 5495 5496 // Multiply the numerator (operand 0) by the magic value. 5497 Q = GetMULHU(Q, MagicFactor); 5498 if (!Q) 5499 return SDValue(); 5500 5501 Created.push_back(Q.getNode()); 5502 5503 if (UseNPQ) { 5504 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5505 Created.push_back(NPQ.getNode()); 5506 5507 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5508 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5509 if (VT.isVector()) 5510 NPQ = GetMULHU(NPQ, NPQFactor); 5511 else 5512 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5513 5514 Created.push_back(NPQ.getNode()); 5515 5516 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5517 Created.push_back(Q.getNode()); 5518 } 5519 5520 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5521 Created.push_back(Q.getNode()); 5522 5523 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 5524 5525 SDValue One = DAG.getConstant(1, dl, VT); 5526 SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ); 5527 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5528 } 5529 5530 /// If all values in Values that *don't* match the predicate are same 'splat' 5531 /// value, then replace all values with that splat value. 5532 /// Else, if AlternativeReplacement was provided, then replace all values that 5533 /// do match predicate with AlternativeReplacement value. 5534 static void 5535 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5536 std::function<bool(SDValue)> Predicate, 5537 SDValue AlternativeReplacement = SDValue()) { 5538 SDValue Replacement; 5539 // Is there a value for which the Predicate does *NOT* match? What is it? 5540 auto SplatValue = llvm::find_if_not(Values, Predicate); 5541 if (SplatValue != Values.end()) { 5542 // Does Values consist only of SplatValue's and values matching Predicate? 5543 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5544 return Value == *SplatValue || Predicate(Value); 5545 })) // Then we shall replace values matching predicate with SplatValue. 5546 Replacement = *SplatValue; 5547 } 5548 if (!Replacement) { 5549 // Oops, we did not find the "baseline" splat value. 5550 if (!AlternativeReplacement) 5551 return; // Nothing to do. 5552 // Let's replace with provided value then. 5553 Replacement = AlternativeReplacement; 5554 } 5555 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5556 } 5557 5558 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5559 /// where the divisor is constant and the comparison target is zero, 5560 /// return a DAG expression that will generate the same comparison result 5561 /// using only multiplications, additions and shifts/rotations. 5562 /// Ref: "Hacker's Delight" 10-17. 5563 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5564 SDValue CompTargetNode, 5565 ISD::CondCode Cond, 5566 DAGCombinerInfo &DCI, 5567 const SDLoc &DL) const { 5568 SmallVector<SDNode *, 5> Built; 5569 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5570 DCI, DL, Built)) { 5571 for (SDNode *N : Built) 5572 DCI.AddToWorklist(N); 5573 return Folded; 5574 } 5575 5576 return SDValue(); 5577 } 5578 5579 SDValue 5580 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5581 SDValue CompTargetNode, ISD::CondCode Cond, 5582 DAGCombinerInfo &DCI, const SDLoc &DL, 5583 SmallVectorImpl<SDNode *> &Created) const { 5584 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5585 // - D must be constant, with D = D0 * 2^K where D0 is odd 5586 // - P is the multiplicative inverse of D0 modulo 2^W 5587 // - Q = floor(((2^W) - 1) / D) 5588 // where W is the width of the common type of N and D. 5589 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5590 "Only applicable for (in)equality comparisons."); 5591 5592 SelectionDAG &DAG = DCI.DAG; 5593 5594 EVT VT = REMNode.getValueType(); 5595 EVT SVT = VT.getScalarType(); 5596 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 5597 EVT ShSVT = ShVT.getScalarType(); 5598 5599 // If MUL is unavailable, we cannot proceed in any case. 5600 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 5601 return SDValue(); 5602 5603 bool ComparingWithAllZeros = true; 5604 bool AllComparisonsWithNonZerosAreTautological = true; 5605 bool HadTautologicalLanes = false; 5606 bool AllLanesAreTautological = true; 5607 bool HadEvenDivisor = false; 5608 bool AllDivisorsArePowerOfTwo = true; 5609 bool HadTautologicalInvertedLanes = false; 5610 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5611 5612 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5613 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5614 if (CDiv->isZero()) 5615 return false; 5616 5617 const APInt &D = CDiv->getAPIntValue(); 5618 const APInt &Cmp = CCmp->getAPIntValue(); 5619 5620 ComparingWithAllZeros &= Cmp.isZero(); 5621 5622 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5623 // if C2 is not less than C1, the comparison is always false. 5624 // But we will only be able to produce the comparison that will give the 5625 // opposive tautological answer. So this lane would need to be fixed up. 5626 bool TautologicalInvertedLane = D.ule(Cmp); 5627 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5628 5629 // If all lanes are tautological (either all divisors are ones, or divisor 5630 // is not greater than the constant we are comparing with), 5631 // we will prefer to avoid the fold. 5632 bool TautologicalLane = D.isOne() || TautologicalInvertedLane; 5633 HadTautologicalLanes |= TautologicalLane; 5634 AllLanesAreTautological &= TautologicalLane; 5635 5636 // If we are comparing with non-zero, we need'll need to subtract said 5637 // comparison value from the LHS. But there is no point in doing that if 5638 // every lane where we are comparing with non-zero is tautological.. 5639 if (!Cmp.isZero()) 5640 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5641 5642 // Decompose D into D0 * 2^K 5643 unsigned K = D.countTrailingZeros(); 5644 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 5645 APInt D0 = D.lshr(K); 5646 5647 // D is even if it has trailing zeros. 5648 HadEvenDivisor |= (K != 0); 5649 // D is a power-of-two if D0 is one. 5650 // If all divisors are power-of-two, we will prefer to avoid the fold. 5651 AllDivisorsArePowerOfTwo &= D0.isOne(); 5652 5653 // P = inv(D0, 2^W) 5654 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5655 unsigned W = D.getBitWidth(); 5656 APInt P = D0.zext(W + 1) 5657 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5658 .trunc(W); 5659 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 5660 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 5661 5662 // Q = floor((2^W - 1) u/ D) 5663 // R = ((2^W - 1) u% D) 5664 APInt Q, R; 5665 APInt::udivrem(APInt::getAllOnes(W), D, Q, R); 5666 5667 // If we are comparing with zero, then that comparison constant is okay, 5668 // else it may need to be one less than that. 5669 if (Cmp.ugt(R)) 5670 Q -= 1; 5671 5672 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 5673 "We are expecting that K is always less than all-ones for ShSVT"); 5674 5675 // If the lane is tautological the result can be constant-folded. 5676 if (TautologicalLane) { 5677 // Set P and K amount to a bogus values so we can try to splat them. 5678 P = 0; 5679 K = -1; 5680 // And ensure that comparison constant is tautological, 5681 // it will always compare true/false. 5682 Q = -1; 5683 } 5684 5685 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5686 KAmts.push_back( 5687 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5688 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5689 return true; 5690 }; 5691 5692 SDValue N = REMNode.getOperand(0); 5693 SDValue D = REMNode.getOperand(1); 5694 5695 // Collect the values from each element. 5696 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5697 return SDValue(); 5698 5699 // If all lanes are tautological, the result can be constant-folded. 5700 if (AllLanesAreTautological) 5701 return SDValue(); 5702 5703 // If this is a urem by a powers-of-two, avoid the fold since it can be 5704 // best implemented as a bit test. 5705 if (AllDivisorsArePowerOfTwo) 5706 return SDValue(); 5707 5708 SDValue PVal, KVal, QVal; 5709 if (D.getOpcode() == ISD::BUILD_VECTOR) { 5710 if (HadTautologicalLanes) { 5711 // Try to turn PAmts into a splat, since we don't care about the values 5712 // that are currently '0'. If we can't, just keep '0'`s. 5713 turnVectorIntoSplatVector(PAmts, isNullConstant); 5714 // Try to turn KAmts into a splat, since we don't care about the values 5715 // that are currently '-1'. If we can't, change them to '0'`s. 5716 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5717 DAG.getConstant(0, DL, ShSVT)); 5718 } 5719 5720 PVal = DAG.getBuildVector(VT, DL, PAmts); 5721 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5722 QVal = DAG.getBuildVector(VT, DL, QAmts); 5723 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 5724 assert(PAmts.size() == 1 && KAmts.size() == 1 && QAmts.size() == 1 && 5725 "Expected matchBinaryPredicate to return one element for " 5726 "SPLAT_VECTORs"); 5727 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 5728 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 5729 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 5730 } else { 5731 PVal = PAmts[0]; 5732 KVal = KAmts[0]; 5733 QVal = QAmts[0]; 5734 } 5735 5736 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5737 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::SUB, VT)) 5738 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5739 assert(CompTargetNode.getValueType() == N.getValueType() && 5740 "Expecting that the types on LHS and RHS of comparisons match."); 5741 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5742 } 5743 5744 // (mul N, P) 5745 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5746 Created.push_back(Op0.getNode()); 5747 5748 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5749 // divisors as a performance improvement, since rotating by 0 is a no-op. 5750 if (HadEvenDivisor) { 5751 // We need ROTR to do this. 5752 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 5753 return SDValue(); 5754 // UREM: (rotr (mul N, P), K) 5755 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 5756 Created.push_back(Op0.getNode()); 5757 } 5758 5759 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5760 SDValue NewCC = 5761 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5762 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5763 if (!HadTautologicalInvertedLanes) 5764 return NewCC; 5765 5766 // If any lanes previously compared always-false, the NewCC will give 5767 // always-true result for them, so we need to fixup those lanes. 5768 // Or the other way around for inequality predicate. 5769 assert(VT.isVector() && "Can/should only get here for vectors."); 5770 Created.push_back(NewCC.getNode()); 5771 5772 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5773 // if C2 is not less than C1, the comparison is always false. 5774 // But we have produced the comparison that will give the 5775 // opposive tautological answer. So these lanes would need to be fixed up. 5776 SDValue TautologicalInvertedChannels = 5777 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5778 Created.push_back(TautologicalInvertedChannels.getNode()); 5779 5780 // NOTE: we avoid letting illegal types through even if we're before legalize 5781 // ops – legalization has a hard time producing good code for this. 5782 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5783 // If we have a vector select, let's replace the comparison results in the 5784 // affected lanes with the correct tautological result. 5785 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5786 DL, SETCCVT, SETCCVT); 5787 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5788 Replacement, NewCC); 5789 } 5790 5791 // Else, we can just invert the comparison result in the appropriate lanes. 5792 // 5793 // NOTE: see the note above VSELECT above. 5794 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5795 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5796 TautologicalInvertedChannels); 5797 5798 return SDValue(); // Don't know how to lower. 5799 } 5800 5801 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5802 /// where the divisor is constant and the comparison target is zero, 5803 /// return a DAG expression that will generate the same comparison result 5804 /// using only multiplications, additions and shifts/rotations. 5805 /// Ref: "Hacker's Delight" 10-17. 5806 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5807 SDValue CompTargetNode, 5808 ISD::CondCode Cond, 5809 DAGCombinerInfo &DCI, 5810 const SDLoc &DL) const { 5811 SmallVector<SDNode *, 7> Built; 5812 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5813 DCI, DL, Built)) { 5814 assert(Built.size() <= 7 && "Max size prediction failed."); 5815 for (SDNode *N : Built) 5816 DCI.AddToWorklist(N); 5817 return Folded; 5818 } 5819 5820 return SDValue(); 5821 } 5822 5823 SDValue 5824 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5825 SDValue CompTargetNode, ISD::CondCode Cond, 5826 DAGCombinerInfo &DCI, const SDLoc &DL, 5827 SmallVectorImpl<SDNode *> &Created) const { 5828 // Fold: 5829 // (seteq/ne (srem N, D), 0) 5830 // To: 5831 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5832 // 5833 // - D must be constant, with D = D0 * 2^K where D0 is odd 5834 // - P is the multiplicative inverse of D0 modulo 2^W 5835 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5836 // - Q = floor((2 * A) / (2^K)) 5837 // where W is the width of the common type of N and D. 5838 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5839 "Only applicable for (in)equality comparisons."); 5840 5841 SelectionDAG &DAG = DCI.DAG; 5842 5843 EVT VT = REMNode.getValueType(); 5844 EVT SVT = VT.getScalarType(); 5845 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 5846 EVT ShSVT = ShVT.getScalarType(); 5847 5848 // If we are after ops legalization, and MUL is unavailable, we can not 5849 // proceed. 5850 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 5851 return SDValue(); 5852 5853 // TODO: Could support comparing with non-zero too. 5854 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5855 if (!CompTarget || !CompTarget->isZero()) 5856 return SDValue(); 5857 5858 bool HadIntMinDivisor = false; 5859 bool HadOneDivisor = false; 5860 bool AllDivisorsAreOnes = true; 5861 bool HadEvenDivisor = false; 5862 bool NeedToApplyOffset = false; 5863 bool AllDivisorsArePowerOfTwo = true; 5864 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5865 5866 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5867 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5868 if (C->isZero()) 5869 return false; 5870 5871 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5872 5873 // WARNING: this fold is only valid for positive divisors! 5874 APInt D = C->getAPIntValue(); 5875 if (D.isNegative()) 5876 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5877 5878 HadIntMinDivisor |= D.isMinSignedValue(); 5879 5880 // If all divisors are ones, we will prefer to avoid the fold. 5881 HadOneDivisor |= D.isOne(); 5882 AllDivisorsAreOnes &= D.isOne(); 5883 5884 // Decompose D into D0 * 2^K 5885 unsigned K = D.countTrailingZeros(); 5886 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 5887 APInt D0 = D.lshr(K); 5888 5889 if (!D.isMinSignedValue()) { 5890 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5891 // we don't care about this lane in this fold, we'll special-handle it. 5892 HadEvenDivisor |= (K != 0); 5893 } 5894 5895 // D is a power-of-two if D0 is one. This includes INT_MIN. 5896 // If all divisors are power-of-two, we will prefer to avoid the fold. 5897 AllDivisorsArePowerOfTwo &= D0.isOne(); 5898 5899 // P = inv(D0, 2^W) 5900 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5901 unsigned W = D.getBitWidth(); 5902 APInt P = D0.zext(W + 1) 5903 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5904 .trunc(W); 5905 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 5906 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 5907 5908 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5909 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5910 A.clearLowBits(K); 5911 5912 if (!D.isMinSignedValue()) { 5913 // If divisor INT_MIN, then we don't care about this lane in this fold, 5914 // we'll special-handle it. 5915 NeedToApplyOffset |= A != 0; 5916 } 5917 5918 // Q = floor((2 * A) / (2^K)) 5919 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5920 5921 assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) && 5922 "We are expecting that A is always less than all-ones for SVT"); 5923 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 5924 "We are expecting that K is always less than all-ones for ShSVT"); 5925 5926 // If the divisor is 1 the result can be constant-folded. Likewise, we 5927 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5928 if (D.isOne()) { 5929 // Set P, A and K to a bogus values so we can try to splat them. 5930 P = 0; 5931 A = -1; 5932 K = -1; 5933 5934 // x ?% 1 == 0 <--> true <--> x u<= -1 5935 Q = -1; 5936 } 5937 5938 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5939 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5940 KAmts.push_back( 5941 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5942 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5943 return true; 5944 }; 5945 5946 SDValue N = REMNode.getOperand(0); 5947 SDValue D = REMNode.getOperand(1); 5948 5949 // Collect the values from each element. 5950 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5951 return SDValue(); 5952 5953 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5954 if (AllDivisorsAreOnes) 5955 return SDValue(); 5956 5957 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5958 // since it can be best implemented as a bit test. 5959 if (AllDivisorsArePowerOfTwo) 5960 return SDValue(); 5961 5962 SDValue PVal, AVal, KVal, QVal; 5963 if (D.getOpcode() == ISD::BUILD_VECTOR) { 5964 if (HadOneDivisor) { 5965 // Try to turn PAmts into a splat, since we don't care about the values 5966 // that are currently '0'. If we can't, just keep '0'`s. 5967 turnVectorIntoSplatVector(PAmts, isNullConstant); 5968 // Try to turn AAmts into a splat, since we don't care about the 5969 // values that are currently '-1'. If we can't, change them to '0'`s. 5970 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5971 DAG.getConstant(0, DL, SVT)); 5972 // Try to turn KAmts into a splat, since we don't care about the values 5973 // that are currently '-1'. If we can't, change them to '0'`s. 5974 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5975 DAG.getConstant(0, DL, ShSVT)); 5976 } 5977 5978 PVal = DAG.getBuildVector(VT, DL, PAmts); 5979 AVal = DAG.getBuildVector(VT, DL, AAmts); 5980 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5981 QVal = DAG.getBuildVector(VT, DL, QAmts); 5982 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 5983 assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 && 5984 QAmts.size() == 1 && 5985 "Expected matchUnaryPredicate to return one element for scalable " 5986 "vectors"); 5987 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 5988 AVal = DAG.getSplatVector(VT, DL, AAmts[0]); 5989 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 5990 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 5991 } else { 5992 assert(isa<ConstantSDNode>(D) && "Expected a constant"); 5993 PVal = PAmts[0]; 5994 AVal = AAmts[0]; 5995 KVal = KAmts[0]; 5996 QVal = QAmts[0]; 5997 } 5998 5999 // (mul N, P) 6000 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6001 Created.push_back(Op0.getNode()); 6002 6003 if (NeedToApplyOffset) { 6004 // We need ADD to do this. 6005 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ADD, VT)) 6006 return SDValue(); 6007 6008 // (add (mul N, P), A) 6009 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 6010 Created.push_back(Op0.getNode()); 6011 } 6012 6013 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6014 // divisors as a performance improvement, since rotating by 0 is a no-op. 6015 if (HadEvenDivisor) { 6016 // We need ROTR to do this. 6017 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6018 return SDValue(); 6019 // SREM: (rotr (add (mul N, P), A), K) 6020 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6021 Created.push_back(Op0.getNode()); 6022 } 6023 6024 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 6025 SDValue Fold = 6026 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6027 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6028 6029 // If we didn't have lanes with INT_MIN divisor, then we're done. 6030 if (!HadIntMinDivisor) 6031 return Fold; 6032 6033 // That fold is only valid for positive divisors. Which effectively means, 6034 // it is invalid for INT_MIN divisors. So if we have such a lane, 6035 // we must fix-up results for said lanes. 6036 assert(VT.isVector() && "Can/should only get here for vectors."); 6037 6038 // NOTE: we avoid letting illegal types through even if we're before legalize 6039 // ops – legalization has a hard time producing good code for the code that 6040 // follows. 6041 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 6042 !isOperationLegalOrCustom(ISD::AND, VT) || 6043 !isOperationLegalOrCustom(Cond, VT) || 6044 !isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) 6045 return SDValue(); 6046 6047 Created.push_back(Fold.getNode()); 6048 6049 SDValue IntMin = DAG.getConstant( 6050 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 6051 SDValue IntMax = DAG.getConstant( 6052 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 6053 SDValue Zero = 6054 DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT); 6055 6056 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 6057 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 6058 Created.push_back(DivisorIsIntMin.getNode()); 6059 6060 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 6061 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 6062 Created.push_back(Masked.getNode()); 6063 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 6064 Created.push_back(MaskedIsZero.getNode()); 6065 6066 // To produce final result we need to blend 2 vectors: 'SetCC' and 6067 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 6068 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 6069 // constant-folded, select can get lowered to a shuffle with constant mask. 6070 SDValue Blended = DAG.getNode(ISD::VSELECT, DL, SETCCVT, DivisorIsIntMin, 6071 MaskedIsZero, Fold); 6072 6073 return Blended; 6074 } 6075 6076 bool TargetLowering:: 6077 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 6078 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 6079 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 6080 "be a constant integer"); 6081 return true; 6082 } 6083 6084 return false; 6085 } 6086 6087 SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 6088 const DenormalMode &Mode) const { 6089 SDLoc DL(Op); 6090 EVT VT = Op.getValueType(); 6091 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6092 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 6093 // Testing it with denormal inputs to avoid wrong estimate. 6094 if (Mode.Input == DenormalMode::IEEE) { 6095 // This is specifically a check for the handling of denormal inputs, 6096 // not the result. 6097 6098 // Test = fabs(X) < SmallestNormal 6099 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 6100 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem); 6101 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT); 6102 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op); 6103 return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT); 6104 } 6105 // Test = X == 0.0 6106 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 6107 } 6108 6109 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 6110 bool LegalOps, bool OptForSize, 6111 NegatibleCost &Cost, 6112 unsigned Depth) const { 6113 // fneg is removable even if it has multiple uses. 6114 if (Op.getOpcode() == ISD::FNEG) { 6115 Cost = NegatibleCost::Cheaper; 6116 return Op.getOperand(0); 6117 } 6118 6119 // Don't recurse exponentially. 6120 if (Depth > SelectionDAG::MaxRecursionDepth) 6121 return SDValue(); 6122 6123 // Pre-increment recursion depth for use in recursive calls. 6124 ++Depth; 6125 const SDNodeFlags Flags = Op->getFlags(); 6126 const TargetOptions &Options = DAG.getTarget().Options; 6127 EVT VT = Op.getValueType(); 6128 unsigned Opcode = Op.getOpcode(); 6129 6130 // Don't allow anything with multiple uses unless we know it is free. 6131 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 6132 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 6133 isFPExtFree(VT, Op.getOperand(0).getValueType()); 6134 if (!IsFreeExtend) 6135 return SDValue(); 6136 } 6137 6138 auto RemoveDeadNode = [&](SDValue N) { 6139 if (N && N.getNode()->use_empty()) 6140 DAG.RemoveDeadNode(N.getNode()); 6141 }; 6142 6143 SDLoc DL(Op); 6144 6145 // Because getNegatedExpression can delete nodes we need a handle to keep 6146 // temporary nodes alive in case the recursion manages to create an identical 6147 // node. 6148 std::list<HandleSDNode> Handles; 6149 6150 switch (Opcode) { 6151 case ISD::ConstantFP: { 6152 // Don't invert constant FP values after legalization unless the target says 6153 // the negated constant is legal. 6154 bool IsOpLegal = 6155 isOperationLegal(ISD::ConstantFP, VT) || 6156 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 6157 OptForSize); 6158 6159 if (LegalOps && !IsOpLegal) 6160 break; 6161 6162 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6163 V.changeSign(); 6164 SDValue CFP = DAG.getConstantFP(V, DL, VT); 6165 6166 // If we already have the use of the negated floating constant, it is free 6167 // to negate it even it has multiple uses. 6168 if (!Op.hasOneUse() && CFP.use_empty()) 6169 break; 6170 Cost = NegatibleCost::Neutral; 6171 return CFP; 6172 } 6173 case ISD::BUILD_VECTOR: { 6174 // Only permit BUILD_VECTOR of constants. 6175 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 6176 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 6177 })) 6178 break; 6179 6180 bool IsOpLegal = 6181 (isOperationLegal(ISD::ConstantFP, VT) && 6182 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 6183 llvm::all_of(Op->op_values(), [&](SDValue N) { 6184 return N.isUndef() || 6185 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 6186 OptForSize); 6187 }); 6188 6189 if (LegalOps && !IsOpLegal) 6190 break; 6191 6192 SmallVector<SDValue, 4> Ops; 6193 for (SDValue C : Op->op_values()) { 6194 if (C.isUndef()) { 6195 Ops.push_back(C); 6196 continue; 6197 } 6198 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 6199 V.changeSign(); 6200 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 6201 } 6202 Cost = NegatibleCost::Neutral; 6203 return DAG.getBuildVector(VT, DL, Ops); 6204 } 6205 case ISD::FADD: { 6206 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6207 break; 6208 6209 // After operation legalization, it might not be legal to create new FSUBs. 6210 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 6211 break; 6212 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6213 6214 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 6215 NegatibleCost CostX = NegatibleCost::Expensive; 6216 SDValue NegX = 6217 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6218 // Prevent this node from being deleted by the next call. 6219 if (NegX) 6220 Handles.emplace_back(NegX); 6221 6222 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 6223 NegatibleCost CostY = NegatibleCost::Expensive; 6224 SDValue NegY = 6225 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6226 6227 // We're done with the handles. 6228 Handles.clear(); 6229 6230 // Negate the X if its cost is less or equal than Y. 6231 if (NegX && (CostX <= CostY)) { 6232 Cost = CostX; 6233 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 6234 if (NegY != N) 6235 RemoveDeadNode(NegY); 6236 return N; 6237 } 6238 6239 // Negate the Y if it is not expensive. 6240 if (NegY) { 6241 Cost = CostY; 6242 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 6243 if (NegX != N) 6244 RemoveDeadNode(NegX); 6245 return N; 6246 } 6247 break; 6248 } 6249 case ISD::FSUB: { 6250 // We can't turn -(A-B) into B-A when we honor signed zeros. 6251 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6252 break; 6253 6254 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6255 // fold (fneg (fsub 0, Y)) -> Y 6256 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 6257 if (C->isZero()) { 6258 Cost = NegatibleCost::Cheaper; 6259 return Y; 6260 } 6261 6262 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 6263 Cost = NegatibleCost::Neutral; 6264 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 6265 } 6266 case ISD::FMUL: 6267 case ISD::FDIV: { 6268 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6269 6270 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 6271 NegatibleCost CostX = NegatibleCost::Expensive; 6272 SDValue NegX = 6273 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6274 // Prevent this node from being deleted by the next call. 6275 if (NegX) 6276 Handles.emplace_back(NegX); 6277 6278 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 6279 NegatibleCost CostY = NegatibleCost::Expensive; 6280 SDValue NegY = 6281 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6282 6283 // We're done with the handles. 6284 Handles.clear(); 6285 6286 // Negate the X if its cost is less or equal than Y. 6287 if (NegX && (CostX <= CostY)) { 6288 Cost = CostX; 6289 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 6290 if (NegY != N) 6291 RemoveDeadNode(NegY); 6292 return N; 6293 } 6294 6295 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 6296 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 6297 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 6298 break; 6299 6300 // Negate the Y if it is not expensive. 6301 if (NegY) { 6302 Cost = CostY; 6303 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 6304 if (NegX != N) 6305 RemoveDeadNode(NegX); 6306 return N; 6307 } 6308 break; 6309 } 6310 case ISD::FMA: 6311 case ISD::FMAD: { 6312 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6313 break; 6314 6315 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 6316 NegatibleCost CostZ = NegatibleCost::Expensive; 6317 SDValue NegZ = 6318 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 6319 // Give up if fail to negate the Z. 6320 if (!NegZ) 6321 break; 6322 6323 // Prevent this node from being deleted by the next two calls. 6324 Handles.emplace_back(NegZ); 6325 6326 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 6327 NegatibleCost CostX = NegatibleCost::Expensive; 6328 SDValue NegX = 6329 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6330 // Prevent this node from being deleted by the next call. 6331 if (NegX) 6332 Handles.emplace_back(NegX); 6333 6334 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 6335 NegatibleCost CostY = NegatibleCost::Expensive; 6336 SDValue NegY = 6337 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6338 6339 // We're done with the handles. 6340 Handles.clear(); 6341 6342 // Negate the X if its cost is less or equal than Y. 6343 if (NegX && (CostX <= CostY)) { 6344 Cost = std::min(CostX, CostZ); 6345 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 6346 if (NegY != N) 6347 RemoveDeadNode(NegY); 6348 return N; 6349 } 6350 6351 // Negate the Y if it is not expensive. 6352 if (NegY) { 6353 Cost = std::min(CostY, CostZ); 6354 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 6355 if (NegX != N) 6356 RemoveDeadNode(NegX); 6357 return N; 6358 } 6359 break; 6360 } 6361 6362 case ISD::FP_EXTEND: 6363 case ISD::FSIN: 6364 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6365 OptForSize, Cost, Depth)) 6366 return DAG.getNode(Opcode, DL, VT, NegV); 6367 break; 6368 case ISD::FP_ROUND: 6369 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6370 OptForSize, Cost, Depth)) 6371 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6372 break; 6373 } 6374 6375 return SDValue(); 6376 } 6377 6378 //===----------------------------------------------------------------------===// 6379 // Legalization Utilities 6380 //===----------------------------------------------------------------------===// 6381 6382 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6383 SDValue LHS, SDValue RHS, 6384 SmallVectorImpl<SDValue> &Result, 6385 EVT HiLoVT, SelectionDAG &DAG, 6386 MulExpansionKind Kind, SDValue LL, 6387 SDValue LH, SDValue RL, SDValue RH) const { 6388 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6389 Opcode == ISD::SMUL_LOHI); 6390 6391 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6392 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6393 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6394 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6395 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6396 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6397 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6398 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6399 6400 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6401 return false; 6402 6403 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6404 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6405 6406 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6407 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6408 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6409 6410 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6411 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6412 bool Signed) -> bool { 6413 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6414 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6415 Hi = SDValue(Lo.getNode(), 1); 6416 return true; 6417 } 6418 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6419 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6420 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6421 return true; 6422 } 6423 return false; 6424 }; 6425 6426 SDValue Lo, Hi; 6427 6428 if (!LL.getNode() && !RL.getNode() && 6429 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6430 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6431 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6432 } 6433 6434 if (!LL.getNode()) 6435 return false; 6436 6437 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6438 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6439 DAG.MaskedValueIsZero(RHS, HighMask)) { 6440 // The inputs are both zero-extended. 6441 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6442 Result.push_back(Lo); 6443 Result.push_back(Hi); 6444 if (Opcode != ISD::MUL) { 6445 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6446 Result.push_back(Zero); 6447 Result.push_back(Zero); 6448 } 6449 return true; 6450 } 6451 } 6452 6453 if (!VT.isVector() && Opcode == ISD::MUL && 6454 DAG.ComputeNumSignBits(LHS) > InnerBitSize && 6455 DAG.ComputeNumSignBits(RHS) > InnerBitSize) { 6456 // The input values are both sign-extended. 6457 // TODO non-MUL case? 6458 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6459 Result.push_back(Lo); 6460 Result.push_back(Hi); 6461 return true; 6462 } 6463 } 6464 6465 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6466 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6467 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6468 6469 if (!LH.getNode() && !RH.getNode() && 6470 isOperationLegalOrCustom(ISD::SRL, VT) && 6471 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6472 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6473 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6474 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6475 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6476 } 6477 6478 if (!LH.getNode()) 6479 return false; 6480 6481 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6482 return false; 6483 6484 Result.push_back(Lo); 6485 6486 if (Opcode == ISD::MUL) { 6487 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6488 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6489 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6490 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6491 Result.push_back(Hi); 6492 return true; 6493 } 6494 6495 // Compute the full width result. 6496 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6497 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6498 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6499 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6500 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6501 }; 6502 6503 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6504 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6505 return false; 6506 6507 // This is effectively the add part of a multiply-add of half-sized operands, 6508 // so it cannot overflow. 6509 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6510 6511 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6512 return false; 6513 6514 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6515 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6516 6517 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6518 isOperationLegalOrCustom(ISD::ADDE, VT)); 6519 if (UseGlue) 6520 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6521 Merge(Lo, Hi)); 6522 else 6523 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6524 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6525 6526 SDValue Carry = Next.getValue(1); 6527 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6528 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6529 6530 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6531 return false; 6532 6533 if (UseGlue) 6534 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6535 Carry); 6536 else 6537 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6538 Zero, Carry); 6539 6540 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6541 6542 if (Opcode == ISD::SMUL_LOHI) { 6543 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6544 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6545 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6546 6547 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6548 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6549 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6550 } 6551 6552 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6553 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6554 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6555 return true; 6556 } 6557 6558 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6559 SelectionDAG &DAG, MulExpansionKind Kind, 6560 SDValue LL, SDValue LH, SDValue RL, 6561 SDValue RH) const { 6562 SmallVector<SDValue, 2> Result; 6563 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 6564 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6565 DAG, Kind, LL, LH, RL, RH); 6566 if (Ok) { 6567 assert(Result.size() == 2); 6568 Lo = Result[0]; 6569 Hi = Result[1]; 6570 } 6571 return Ok; 6572 } 6573 6574 // Check that (every element of) Z is undef or not an exact multiple of BW. 6575 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 6576 return ISD::matchUnaryPredicate( 6577 Z, 6578 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 6579 true); 6580 } 6581 6582 SDValue TargetLowering::expandFunnelShift(SDNode *Node, 6583 SelectionDAG &DAG) const { 6584 EVT VT = Node->getValueType(0); 6585 6586 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6587 !isOperationLegalOrCustom(ISD::SRL, VT) || 6588 !isOperationLegalOrCustom(ISD::SUB, VT) || 6589 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6590 return SDValue(); 6591 6592 SDValue X = Node->getOperand(0); 6593 SDValue Y = Node->getOperand(1); 6594 SDValue Z = Node->getOperand(2); 6595 6596 unsigned BW = VT.getScalarSizeInBits(); 6597 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6598 SDLoc DL(SDValue(Node, 0)); 6599 6600 EVT ShVT = Z.getValueType(); 6601 6602 // If a funnel shift in the other direction is more supported, use it. 6603 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 6604 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 6605 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 6606 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6607 // fshl X, Y, Z -> fshr X, Y, -Z 6608 // fshr X, Y, Z -> fshl X, Y, -Z 6609 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6610 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 6611 } else { 6612 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 6613 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 6614 SDValue One = DAG.getConstant(1, DL, ShVT); 6615 if (IsFSHL) { 6616 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6617 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 6618 } else { 6619 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 6620 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 6621 } 6622 Z = DAG.getNOT(DL, Z, ShVT); 6623 } 6624 return DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 6625 } 6626 6627 SDValue ShX, ShY; 6628 SDValue ShAmt, InvShAmt; 6629 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 6630 // fshl: X << C | Y >> (BW - C) 6631 // fshr: X << (BW - C) | Y >> C 6632 // where C = Z % BW is not zero 6633 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6634 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6635 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 6636 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 6637 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6638 } else { 6639 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6640 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6641 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 6642 if (isPowerOf2_32(BW)) { 6643 // Z % BW -> Z & (BW - 1) 6644 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6645 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6646 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6647 } else { 6648 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6649 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6650 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6651 } 6652 6653 SDValue One = DAG.getConstant(1, DL, ShVT); 6654 if (IsFSHL) { 6655 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 6656 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 6657 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 6658 } else { 6659 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 6660 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 6661 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 6662 } 6663 } 6664 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 6665 } 6666 6667 // TODO: Merge with expandFunnelShift. 6668 SDValue TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 6669 SelectionDAG &DAG) const { 6670 EVT VT = Node->getValueType(0); 6671 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6672 bool IsLeft = Node->getOpcode() == ISD::ROTL; 6673 SDValue Op0 = Node->getOperand(0); 6674 SDValue Op1 = Node->getOperand(1); 6675 SDLoc DL(SDValue(Node, 0)); 6676 6677 EVT ShVT = Op1.getValueType(); 6678 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6679 6680 // If a rotate in the other direction is more supported, use it. 6681 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 6682 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 6683 isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 6684 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6685 return DAG.getNode(RevRot, DL, VT, Op0, Sub); 6686 } 6687 6688 if (!AllowVectorOps && VT.isVector() && 6689 (!isOperationLegalOrCustom(ISD::SHL, VT) || 6690 !isOperationLegalOrCustom(ISD::SRL, VT) || 6691 !isOperationLegalOrCustom(ISD::SUB, VT) || 6692 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6693 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6694 return SDValue(); 6695 6696 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6697 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6698 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6699 SDValue ShVal; 6700 SDValue HsVal; 6701 if (isPowerOf2_32(EltSizeInBits)) { 6702 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 6703 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 6704 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6705 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6706 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6707 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6708 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 6709 } else { 6710 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 6711 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 6712 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 6713 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 6714 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 6715 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 6716 SDValue One = DAG.getConstant(1, DL, ShVT); 6717 HsVal = 6718 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 6719 } 6720 return DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 6721 } 6722 6723 void TargetLowering::expandShiftParts(SDNode *Node, SDValue &Lo, SDValue &Hi, 6724 SelectionDAG &DAG) const { 6725 assert(Node->getNumOperands() == 3 && "Not a double-shift!"); 6726 EVT VT = Node->getValueType(0); 6727 unsigned VTBits = VT.getScalarSizeInBits(); 6728 assert(isPowerOf2_32(VTBits) && "Power-of-two integer type expected"); 6729 6730 bool IsSHL = Node->getOpcode() == ISD::SHL_PARTS; 6731 bool IsSRA = Node->getOpcode() == ISD::SRA_PARTS; 6732 SDValue ShOpLo = Node->getOperand(0); 6733 SDValue ShOpHi = Node->getOperand(1); 6734 SDValue ShAmt = Node->getOperand(2); 6735 EVT ShAmtVT = ShAmt.getValueType(); 6736 EVT ShAmtCCVT = 6737 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShAmtVT); 6738 SDLoc dl(Node); 6739 6740 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and 6741 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's usually optimized 6742 // away during isel. 6743 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 6744 DAG.getConstant(VTBits - 1, dl, ShAmtVT)); 6745 SDValue Tmp1 = IsSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 6746 DAG.getConstant(VTBits - 1, dl, ShAmtVT)) 6747 : DAG.getConstant(0, dl, VT); 6748 6749 SDValue Tmp2, Tmp3; 6750 if (IsSHL) { 6751 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); 6752 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); 6753 } else { 6754 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); 6755 Tmp3 = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); 6756 } 6757 6758 // If the shift amount is larger or equal than the width of a part we don't 6759 // use the result from the FSHL/FSHR. Insert a test and select the appropriate 6760 // values for large shift amounts. 6761 SDValue AndNode = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 6762 DAG.getConstant(VTBits, dl, ShAmtVT)); 6763 SDValue Cond = DAG.getSetCC(dl, ShAmtCCVT, AndNode, 6764 DAG.getConstant(0, dl, ShAmtVT), ISD::SETNE); 6765 6766 if (IsSHL) { 6767 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 6768 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 6769 } else { 6770 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 6771 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 6772 } 6773 } 6774 6775 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6776 SelectionDAG &DAG) const { 6777 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6778 SDValue Src = Node->getOperand(OpNo); 6779 EVT SrcVT = Src.getValueType(); 6780 EVT DstVT = Node->getValueType(0); 6781 SDLoc dl(SDValue(Node, 0)); 6782 6783 // FIXME: Only f32 to i64 conversions are supported. 6784 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6785 return false; 6786 6787 if (Node->isStrictFPOpcode()) 6788 // When a NaN is converted to an integer a trap is allowed. We can't 6789 // use this expansion here because it would eliminate that trap. Other 6790 // traps are also allowed and cannot be eliminated. See 6791 // IEEE 754-2008 sec 5.8. 6792 return false; 6793 6794 // Expand f32 -> i64 conversion 6795 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6796 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c 6797 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6798 EVT IntVT = SrcVT.changeTypeToInteger(); 6799 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6800 6801 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6802 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6803 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6804 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6805 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6806 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6807 6808 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6809 6810 SDValue ExponentBits = DAG.getNode( 6811 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6812 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6813 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6814 6815 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6816 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6817 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6818 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6819 6820 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6821 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6822 DAG.getConstant(0x00800000, dl, IntVT)); 6823 6824 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6825 6826 R = DAG.getSelectCC( 6827 dl, Exponent, ExponentLoBit, 6828 DAG.getNode(ISD::SHL, dl, DstVT, R, 6829 DAG.getZExtOrTrunc( 6830 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6831 dl, IntShVT)), 6832 DAG.getNode(ISD::SRL, dl, DstVT, R, 6833 DAG.getZExtOrTrunc( 6834 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6835 dl, IntShVT)), 6836 ISD::SETGT); 6837 6838 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6839 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6840 6841 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6842 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6843 return true; 6844 } 6845 6846 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6847 SDValue &Chain, 6848 SelectionDAG &DAG) const { 6849 SDLoc dl(SDValue(Node, 0)); 6850 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6851 SDValue Src = Node->getOperand(OpNo); 6852 6853 EVT SrcVT = Src.getValueType(); 6854 EVT DstVT = Node->getValueType(0); 6855 EVT SetCCVT = 6856 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6857 EVT DstSetCCVT = 6858 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6859 6860 // Only expand vector types if we have the appropriate vector bit operations. 6861 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6862 ISD::FP_TO_SINT; 6863 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6864 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6865 return false; 6866 6867 // If the maximum float value is smaller then the signed integer range, 6868 // the destination signmask can't be represented by the float, so we can 6869 // just use FP_TO_SINT directly. 6870 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6871 APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits())); 6872 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6873 if (APFloat::opOverflow & 6874 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6875 if (Node->isStrictFPOpcode()) { 6876 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6877 { Node->getOperand(0), Src }); 6878 Chain = Result.getValue(1); 6879 } else 6880 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6881 return true; 6882 } 6883 6884 // Don't expand it if there isn't cheap fsub instruction. 6885 if (!isOperationLegalOrCustom( 6886 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 6887 return false; 6888 6889 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6890 SDValue Sel; 6891 6892 if (Node->isStrictFPOpcode()) { 6893 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6894 Node->getOperand(0), /*IsSignaling*/ true); 6895 Chain = Sel.getValue(1); 6896 } else { 6897 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6898 } 6899 6900 bool Strict = Node->isStrictFPOpcode() || 6901 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6902 6903 if (Strict) { 6904 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6905 // signmask then offset (the result of which should be fully representable). 6906 // Sel = Src < 0x8000000000000000 6907 // FltOfs = select Sel, 0, 0x8000000000000000 6908 // IntOfs = select Sel, 0, 0x8000000000000000 6909 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6910 6911 // TODO: Should any fast-math-flags be set for the FSUB? 6912 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6913 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6914 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6915 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6916 DAG.getConstant(0, dl, DstVT), 6917 DAG.getConstant(SignMask, dl, DstVT)); 6918 SDValue SInt; 6919 if (Node->isStrictFPOpcode()) { 6920 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6921 { Chain, Src, FltOfs }); 6922 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6923 { Val.getValue(1), Val }); 6924 Chain = SInt.getValue(1); 6925 } else { 6926 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6927 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6928 } 6929 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6930 } else { 6931 // Expand based on maximum range of FP_TO_SINT: 6932 // True = fp_to_sint(Src) 6933 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6934 // Result = select (Src < 0x8000000000000000), True, False 6935 6936 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6937 // TODO: Should any fast-math-flags be set for the FSUB? 6938 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6939 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6940 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6941 DAG.getConstant(SignMask, dl, DstVT)); 6942 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6943 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6944 } 6945 return true; 6946 } 6947 6948 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6949 SDValue &Chain, 6950 SelectionDAG &DAG) const { 6951 // This transform is not correct for converting 0 when rounding mode is set 6952 // to round toward negative infinity which will produce -0.0. So disable under 6953 // strictfp. 6954 if (Node->isStrictFPOpcode()) 6955 return false; 6956 6957 SDValue Src = Node->getOperand(0); 6958 EVT SrcVT = Src.getValueType(); 6959 EVT DstVT = Node->getValueType(0); 6960 6961 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6962 return false; 6963 6964 // Only expand vector types if we have the appropriate vector bit operations. 6965 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6966 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6967 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6968 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6969 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6970 return false; 6971 6972 SDLoc dl(SDValue(Node, 0)); 6973 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6974 6975 // Implementation of unsigned i64 to f64 following the algorithm in 6976 // __floatundidf in compiler_rt. This implementation performs rounding 6977 // correctly in all rounding modes with the exception of converting 0 6978 // when rounding toward negative infinity. In that case the fsub will produce 6979 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 6980 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6981 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6982 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6983 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6984 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6985 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6986 6987 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6988 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6989 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6990 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6991 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6992 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6993 SDValue HiSub = 6994 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6995 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6996 return true; 6997 } 6998 6999 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 7000 SelectionDAG &DAG) const { 7001 SDLoc dl(Node); 7002 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 7003 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 7004 EVT VT = Node->getValueType(0); 7005 7006 if (VT.isScalableVector()) 7007 report_fatal_error( 7008 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 7009 7010 if (isOperationLegalOrCustom(NewOp, VT)) { 7011 SDValue Quiet0 = Node->getOperand(0); 7012 SDValue Quiet1 = Node->getOperand(1); 7013 7014 if (!Node->getFlags().hasNoNaNs()) { 7015 // Insert canonicalizes if it's possible we need to quiet to get correct 7016 // sNaN behavior. 7017 if (!DAG.isKnownNeverSNaN(Quiet0)) { 7018 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 7019 Node->getFlags()); 7020 } 7021 if (!DAG.isKnownNeverSNaN(Quiet1)) { 7022 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 7023 Node->getFlags()); 7024 } 7025 } 7026 7027 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 7028 } 7029 7030 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 7031 // instead if there are no NaNs. 7032 if (Node->getFlags().hasNoNaNs()) { 7033 unsigned IEEE2018Op = 7034 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 7035 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 7036 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 7037 Node->getOperand(1), Node->getFlags()); 7038 } 7039 } 7040 7041 // If none of the above worked, but there are no NaNs, then expand to 7042 // a compare/select sequence. This is required for correctness since 7043 // InstCombine might have canonicalized a fcmp+select sequence to a 7044 // FMINNUM/FMAXNUM node. If we were to fall through to the default 7045 // expansion to libcall, we might introduce a link-time dependency 7046 // on libm into a file that originally did not have one. 7047 if (Node->getFlags().hasNoNaNs()) { 7048 ISD::CondCode Pred = 7049 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 7050 SDValue Op1 = Node->getOperand(0); 7051 SDValue Op2 = Node->getOperand(1); 7052 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 7053 // Copy FMF flags, but always set the no-signed-zeros flag 7054 // as this is implied by the FMINNUM/FMAXNUM semantics. 7055 SDNodeFlags Flags = Node->getFlags(); 7056 Flags.setNoSignedZeros(true); 7057 SelCC->setFlags(Flags); 7058 return SelCC; 7059 } 7060 7061 return SDValue(); 7062 } 7063 7064 // Only expand vector types if we have the appropriate vector bit operations. 7065 static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT) { 7066 assert(VT.isVector() && "Expected vector type"); 7067 unsigned Len = VT.getScalarSizeInBits(); 7068 return TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 7069 TLI.isOperationLegalOrCustom(ISD::SUB, VT) && 7070 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 7071 (Len == 8 || TLI.isOperationLegalOrCustom(ISD::MUL, VT)) && 7072 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT); 7073 } 7074 7075 SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const { 7076 SDLoc dl(Node); 7077 EVT VT = Node->getValueType(0); 7078 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7079 SDValue Op = Node->getOperand(0); 7080 unsigned Len = VT.getScalarSizeInBits(); 7081 assert(VT.isInteger() && "CTPOP not implemented for this type."); 7082 7083 // TODO: Add support for irregular type lengths. 7084 if (!(Len <= 128 && Len % 8 == 0)) 7085 return SDValue(); 7086 7087 // Only expand vector types if we have the appropriate vector bit operations. 7088 if (VT.isVector() && !canExpandVectorCTPOP(*this, VT)) 7089 return SDValue(); 7090 7091 // This is the "best" algorithm from 7092 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 7093 SDValue Mask55 = 7094 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 7095 SDValue Mask33 = 7096 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 7097 SDValue Mask0F = 7098 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 7099 SDValue Mask01 = 7100 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 7101 7102 // v = v - ((v >> 1) & 0x55555555...) 7103 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 7104 DAG.getNode(ISD::AND, dl, VT, 7105 DAG.getNode(ISD::SRL, dl, VT, Op, 7106 DAG.getConstant(1, dl, ShVT)), 7107 Mask55)); 7108 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 7109 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 7110 DAG.getNode(ISD::AND, dl, VT, 7111 DAG.getNode(ISD::SRL, dl, VT, Op, 7112 DAG.getConstant(2, dl, ShVT)), 7113 Mask33)); 7114 // v = (v + (v >> 4)) & 0x0F0F0F0F... 7115 Op = DAG.getNode(ISD::AND, dl, VT, 7116 DAG.getNode(ISD::ADD, dl, VT, Op, 7117 DAG.getNode(ISD::SRL, dl, VT, Op, 7118 DAG.getConstant(4, dl, ShVT))), 7119 Mask0F); 7120 // v = (v * 0x01010101...) >> (Len - 8) 7121 if (Len > 8) 7122 Op = 7123 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 7124 DAG.getConstant(Len - 8, dl, ShVT)); 7125 7126 return Op; 7127 } 7128 7129 SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { 7130 SDLoc dl(Node); 7131 EVT VT = Node->getValueType(0); 7132 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7133 SDValue Op = Node->getOperand(0); 7134 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7135 7136 // If the non-ZERO_UNDEF version is supported we can use that instead. 7137 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 7138 isOperationLegalOrCustom(ISD::CTLZ, VT)) 7139 return DAG.getNode(ISD::CTLZ, dl, VT, Op); 7140 7141 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7142 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 7143 EVT SetCCVT = 7144 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7145 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 7146 SDValue Zero = DAG.getConstant(0, dl, VT); 7147 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7148 return DAG.getSelect(dl, VT, SrcIsZero, 7149 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 7150 } 7151 7152 // Only expand vector types if we have the appropriate vector bit operations. 7153 // This includes the operations needed to expand CTPOP if it isn't supported. 7154 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7155 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7156 !canExpandVectorCTPOP(*this, VT)) || 7157 !isOperationLegalOrCustom(ISD::SRL, VT) || 7158 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7159 return SDValue(); 7160 7161 // for now, we do this: 7162 // x = x | (x >> 1); 7163 // x = x | (x >> 2); 7164 // ... 7165 // x = x | (x >>16); 7166 // x = x | (x >>32); // for 64-bit input 7167 // return popcount(~x); 7168 // 7169 // Ref: "Hacker's Delight" by Henry Warren 7170 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 7171 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 7172 Op = DAG.getNode(ISD::OR, dl, VT, Op, 7173 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 7174 } 7175 Op = DAG.getNOT(dl, Op, VT); 7176 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 7177 } 7178 7179 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { 7180 SDLoc dl(Node); 7181 EVT VT = Node->getValueType(0); 7182 SDValue Op = Node->getOperand(0); 7183 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7184 7185 // If the non-ZERO_UNDEF version is supported we can use that instead. 7186 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 7187 isOperationLegalOrCustom(ISD::CTTZ, VT)) 7188 return DAG.getNode(ISD::CTTZ, dl, VT, Op); 7189 7190 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7191 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 7192 EVT SetCCVT = 7193 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7194 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 7195 SDValue Zero = DAG.getConstant(0, dl, VT); 7196 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7197 return DAG.getSelect(dl, VT, SrcIsZero, 7198 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 7199 } 7200 7201 // Only expand vector types if we have the appropriate vector bit operations. 7202 // This includes the operations needed to expand CTPOP if it isn't supported. 7203 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7204 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7205 !isOperationLegalOrCustom(ISD::CTLZ, VT) && 7206 !canExpandVectorCTPOP(*this, VT)) || 7207 !isOperationLegalOrCustom(ISD::SUB, VT) || 7208 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 7209 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7210 return SDValue(); 7211 7212 // for now, we use: { return popcount(~x & (x - 1)); } 7213 // unless the target has ctlz but not ctpop, in which case we use: 7214 // { return 32 - nlz(~x & (x-1)); } 7215 // Ref: "Hacker's Delight" by Henry Warren 7216 SDValue Tmp = DAG.getNode( 7217 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 7218 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 7219 7220 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 7221 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 7222 return DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 7223 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 7224 } 7225 7226 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 7227 } 7228 7229 SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, 7230 bool IsNegative) const { 7231 SDLoc dl(N); 7232 EVT VT = N->getValueType(0); 7233 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7234 SDValue Op = N->getOperand(0); 7235 7236 // abs(x) -> smax(x,sub(0,x)) 7237 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7238 isOperationLegal(ISD::SMAX, VT)) { 7239 SDValue Zero = DAG.getConstant(0, dl, VT); 7240 return DAG.getNode(ISD::SMAX, dl, VT, Op, 7241 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7242 } 7243 7244 // abs(x) -> umin(x,sub(0,x)) 7245 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7246 isOperationLegal(ISD::UMIN, VT)) { 7247 SDValue Zero = DAG.getConstant(0, dl, VT); 7248 return DAG.getNode(ISD::UMIN, dl, VT, Op, 7249 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7250 } 7251 7252 // 0 - abs(x) -> smin(x, sub(0,x)) 7253 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 7254 isOperationLegal(ISD::SMIN, VT)) { 7255 SDValue Zero = DAG.getConstant(0, dl, VT); 7256 return DAG.getNode(ISD::SMIN, dl, VT, Op, 7257 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7258 } 7259 7260 // Only expand vector types if we have the appropriate vector operations. 7261 if (VT.isVector() && 7262 (!isOperationLegalOrCustom(ISD::SRA, VT) || 7263 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 7264 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 7265 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7266 return SDValue(); 7267 7268 SDValue Shift = 7269 DAG.getNode(ISD::SRA, dl, VT, Op, 7270 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 7271 if (!IsNegative) { 7272 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 7273 return DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 7274 } 7275 7276 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 7277 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 7278 return DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 7279 } 7280 7281 SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const { 7282 SDLoc dl(N); 7283 EVT VT = N->getValueType(0); 7284 SDValue Op = N->getOperand(0); 7285 7286 if (!VT.isSimple()) 7287 return SDValue(); 7288 7289 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7290 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 7291 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 7292 default: 7293 return SDValue(); 7294 case MVT::i16: 7295 // Use a rotate by 8. This can be further expanded if necessary. 7296 return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7297 case MVT::i32: 7298 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7299 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7300 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7301 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7302 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 7303 DAG.getConstant(0xFF0000, dl, VT)); 7304 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); 7305 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 7306 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 7307 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 7308 case MVT::i64: 7309 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 7310 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 7311 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7312 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7313 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7314 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7315 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 7316 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 7317 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, 7318 DAG.getConstant(255ULL<<48, dl, VT)); 7319 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, 7320 DAG.getConstant(255ULL<<40, dl, VT)); 7321 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, 7322 DAG.getConstant(255ULL<<32, dl, VT)); 7323 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, 7324 DAG.getConstant(255ULL<<24, dl, VT)); 7325 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 7326 DAG.getConstant(255ULL<<16, dl, VT)); 7327 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, 7328 DAG.getConstant(255ULL<<8 , dl, VT)); 7329 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 7330 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 7331 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 7332 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 7333 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 7334 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 7335 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 7336 } 7337 } 7338 7339 SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 7340 SDLoc dl(N); 7341 EVT VT = N->getValueType(0); 7342 SDValue Op = N->getOperand(0); 7343 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7344 unsigned Sz = VT.getScalarSizeInBits(); 7345 7346 SDValue Tmp, Tmp2, Tmp3; 7347 7348 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 7349 // and finally the i1 pairs. 7350 // TODO: We can easily support i4/i2 legal types if any target ever does. 7351 if (Sz >= 8 && isPowerOf2_32(Sz)) { 7352 // Create the masks - repeating the pattern every byte. 7353 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 7354 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 7355 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 7356 7357 // BSWAP if the type is wider than a single byte. 7358 Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); 7359 7360 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 7361 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT)); 7362 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask4, dl, VT)); 7363 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT)); 7364 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); 7365 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7366 7367 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 7368 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT)); 7369 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask2, dl, VT)); 7370 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT)); 7371 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); 7372 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7373 7374 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 7375 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT)); 7376 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask1, dl, VT)); 7377 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT)); 7378 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); 7379 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 7380 return Tmp; 7381 } 7382 7383 Tmp = DAG.getConstant(0, dl, VT); 7384 for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { 7385 if (I < J) 7386 Tmp2 = 7387 DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); 7388 else 7389 Tmp2 = 7390 DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); 7391 7392 APInt Shift(Sz, 1); 7393 Shift <<= J; 7394 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); 7395 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); 7396 } 7397 7398 return Tmp; 7399 } 7400 7401 std::pair<SDValue, SDValue> 7402 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 7403 SelectionDAG &DAG) const { 7404 SDLoc SL(LD); 7405 SDValue Chain = LD->getChain(); 7406 SDValue BasePTR = LD->getBasePtr(); 7407 EVT SrcVT = LD->getMemoryVT(); 7408 EVT DstVT = LD->getValueType(0); 7409 ISD::LoadExtType ExtType = LD->getExtensionType(); 7410 7411 if (SrcVT.isScalableVector()) 7412 report_fatal_error("Cannot scalarize scalable vector loads"); 7413 7414 unsigned NumElem = SrcVT.getVectorNumElements(); 7415 7416 EVT SrcEltVT = SrcVT.getScalarType(); 7417 EVT DstEltVT = DstVT.getScalarType(); 7418 7419 // A vector must always be stored in memory as-is, i.e. without any padding 7420 // between the elements, since various code depend on it, e.g. in the 7421 // handling of a bitcast of a vector type to int, which may be done with a 7422 // vector store followed by an integer load. A vector that does not have 7423 // elements that are byte-sized must therefore be stored as an integer 7424 // built out of the extracted vector elements. 7425 if (!SrcEltVT.isByteSized()) { 7426 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 7427 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 7428 7429 unsigned NumSrcBits = SrcVT.getSizeInBits(); 7430 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 7431 7432 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 7433 SDValue SrcEltBitMask = DAG.getConstant( 7434 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 7435 7436 // Load the whole vector and avoid masking off the top bits as it makes 7437 // the codegen worse. 7438 SDValue Load = 7439 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 7440 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 7441 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7442 7443 SmallVector<SDValue, 8> Vals; 7444 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7445 unsigned ShiftIntoIdx = 7446 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7447 SDValue ShiftAmount = 7448 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 7449 LoadVT, SL, /*LegalTypes=*/false); 7450 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 7451 SDValue Elt = 7452 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 7453 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 7454 7455 if (ExtType != ISD::NON_EXTLOAD) { 7456 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 7457 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 7458 } 7459 7460 Vals.push_back(Scalar); 7461 } 7462 7463 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7464 return std::make_pair(Value, Load.getValue(1)); 7465 } 7466 7467 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 7468 assert(SrcEltVT.isByteSized()); 7469 7470 SmallVector<SDValue, 8> Vals; 7471 SmallVector<SDValue, 8> LoadChains; 7472 7473 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7474 SDValue ScalarLoad = 7475 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 7476 LD->getPointerInfo().getWithOffset(Idx * Stride), 7477 SrcEltVT, LD->getOriginalAlign(), 7478 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7479 7480 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 7481 7482 Vals.push_back(ScalarLoad.getValue(0)); 7483 LoadChains.push_back(ScalarLoad.getValue(1)); 7484 } 7485 7486 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 7487 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 7488 7489 return std::make_pair(Value, NewChain); 7490 } 7491 7492 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 7493 SelectionDAG &DAG) const { 7494 SDLoc SL(ST); 7495 7496 SDValue Chain = ST->getChain(); 7497 SDValue BasePtr = ST->getBasePtr(); 7498 SDValue Value = ST->getValue(); 7499 EVT StVT = ST->getMemoryVT(); 7500 7501 if (StVT.isScalableVector()) 7502 report_fatal_error("Cannot scalarize scalable vector stores"); 7503 7504 // The type of the data we want to save 7505 EVT RegVT = Value.getValueType(); 7506 EVT RegSclVT = RegVT.getScalarType(); 7507 7508 // The type of data as saved in memory. 7509 EVT MemSclVT = StVT.getScalarType(); 7510 7511 unsigned NumElem = StVT.getVectorNumElements(); 7512 7513 // A vector must always be stored in memory as-is, i.e. without any padding 7514 // between the elements, since various code depend on it, e.g. in the 7515 // handling of a bitcast of a vector type to int, which may be done with a 7516 // vector store followed by an integer load. A vector that does not have 7517 // elements that are byte-sized must therefore be stored as an integer 7518 // built out of the extracted vector elements. 7519 if (!MemSclVT.isByteSized()) { 7520 unsigned NumBits = StVT.getSizeInBits(); 7521 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 7522 7523 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 7524 7525 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7526 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7527 DAG.getVectorIdxConstant(Idx, SL)); 7528 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 7529 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 7530 unsigned ShiftIntoIdx = 7531 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 7532 SDValue ShiftAmount = 7533 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 7534 SDValue ShiftedElt = 7535 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 7536 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 7537 } 7538 7539 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 7540 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7541 ST->getAAInfo()); 7542 } 7543 7544 // Store Stride in bytes 7545 unsigned Stride = MemSclVT.getSizeInBits() / 8; 7546 assert(Stride && "Zero stride!"); 7547 // Extract each of the elements from the original vector and save them into 7548 // memory individually. 7549 SmallVector<SDValue, 8> Stores; 7550 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 7551 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 7552 DAG.getVectorIdxConstant(Idx, SL)); 7553 7554 SDValue Ptr = 7555 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 7556 7557 // This scalar TruncStore may be illegal, but we legalize it later. 7558 SDValue Store = DAG.getTruncStore( 7559 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 7560 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 7561 ST->getAAInfo()); 7562 7563 Stores.push_back(Store); 7564 } 7565 7566 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 7567 } 7568 7569 std::pair<SDValue, SDValue> 7570 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 7571 assert(LD->getAddressingMode() == ISD::UNINDEXED && 7572 "unaligned indexed loads not implemented!"); 7573 SDValue Chain = LD->getChain(); 7574 SDValue Ptr = LD->getBasePtr(); 7575 EVT VT = LD->getValueType(0); 7576 EVT LoadedVT = LD->getMemoryVT(); 7577 SDLoc dl(LD); 7578 auto &MF = DAG.getMachineFunction(); 7579 7580 if (VT.isFloatingPoint() || VT.isVector()) { 7581 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 7582 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 7583 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 7584 LoadedVT.isVector()) { 7585 // Scalarize the load and let the individual components be handled. 7586 return scalarizeVectorLoad(LD, DAG); 7587 } 7588 7589 // Expand to a (misaligned) integer load of the same size, 7590 // then bitconvert to floating point or vector. 7591 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 7592 LD->getMemOperand()); 7593 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 7594 if (LoadedVT != VT) 7595 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 7596 ISD::ANY_EXTEND, dl, VT, Result); 7597 7598 return std::make_pair(Result, newLoad.getValue(1)); 7599 } 7600 7601 // Copy the value to a (aligned) stack slot using (unaligned) integer 7602 // loads and stores, then do a (aligned) load from the stack slot. 7603 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 7604 unsigned LoadedBytes = LoadedVT.getStoreSize(); 7605 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7606 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 7607 7608 // Make sure the stack slot is also aligned for the register type. 7609 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 7610 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 7611 SmallVector<SDValue, 8> Stores; 7612 SDValue StackPtr = StackBase; 7613 unsigned Offset = 0; 7614 7615 EVT PtrVT = Ptr.getValueType(); 7616 EVT StackPtrVT = StackPtr.getValueType(); 7617 7618 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7619 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7620 7621 // Do all but one copies using the full register width. 7622 for (unsigned i = 1; i < NumRegs; i++) { 7623 // Load one integer register's worth from the original location. 7624 SDValue Load = DAG.getLoad( 7625 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 7626 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7627 LD->getAAInfo()); 7628 // Follow the load with a store to the stack slot. Remember the store. 7629 Stores.push_back(DAG.getStore( 7630 Load.getValue(1), dl, Load, StackPtr, 7631 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 7632 // Increment the pointers. 7633 Offset += RegBytes; 7634 7635 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7636 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7637 } 7638 7639 // The last copy may be partial. Do an extending load. 7640 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 7641 8 * (LoadedBytes - Offset)); 7642 SDValue Load = 7643 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 7644 LD->getPointerInfo().getWithOffset(Offset), MemVT, 7645 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 7646 LD->getAAInfo()); 7647 // Follow the load with a store to the stack slot. Remember the store. 7648 // On big-endian machines this requires a truncating store to ensure 7649 // that the bits end up in the right place. 7650 Stores.push_back(DAG.getTruncStore( 7651 Load.getValue(1), dl, Load, StackPtr, 7652 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 7653 7654 // The order of the stores doesn't matter - say it with a TokenFactor. 7655 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7656 7657 // Finally, perform the original load only redirected to the stack slot. 7658 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 7659 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 7660 LoadedVT); 7661 7662 // Callers expect a MERGE_VALUES node. 7663 return std::make_pair(Load, TF); 7664 } 7665 7666 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 7667 "Unaligned load of unsupported type."); 7668 7669 // Compute the new VT that is half the size of the old one. This is an 7670 // integer MVT. 7671 unsigned NumBits = LoadedVT.getSizeInBits(); 7672 EVT NewLoadedVT; 7673 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 7674 NumBits >>= 1; 7675 7676 Align Alignment = LD->getOriginalAlign(); 7677 unsigned IncrementSize = NumBits / 8; 7678 ISD::LoadExtType HiExtType = LD->getExtensionType(); 7679 7680 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 7681 if (HiExtType == ISD::NON_EXTLOAD) 7682 HiExtType = ISD::ZEXTLOAD; 7683 7684 // Load the value in two parts 7685 SDValue Lo, Hi; 7686 if (DAG.getDataLayout().isLittleEndian()) { 7687 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7688 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7689 LD->getAAInfo()); 7690 7691 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7692 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 7693 LD->getPointerInfo().getWithOffset(IncrementSize), 7694 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7695 LD->getAAInfo()); 7696 } else { 7697 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7698 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7699 LD->getAAInfo()); 7700 7701 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7702 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 7703 LD->getPointerInfo().getWithOffset(IncrementSize), 7704 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7705 LD->getAAInfo()); 7706 } 7707 7708 // aggregate the two parts 7709 SDValue ShiftAmount = 7710 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 7711 DAG.getDataLayout())); 7712 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 7713 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 7714 7715 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 7716 Hi.getValue(1)); 7717 7718 return std::make_pair(Result, TF); 7719 } 7720 7721 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 7722 SelectionDAG &DAG) const { 7723 assert(ST->getAddressingMode() == ISD::UNINDEXED && 7724 "unaligned indexed stores not implemented!"); 7725 SDValue Chain = ST->getChain(); 7726 SDValue Ptr = ST->getBasePtr(); 7727 SDValue Val = ST->getValue(); 7728 EVT VT = Val.getValueType(); 7729 Align Alignment = ST->getOriginalAlign(); 7730 auto &MF = DAG.getMachineFunction(); 7731 EVT StoreMemVT = ST->getMemoryVT(); 7732 7733 SDLoc dl(ST); 7734 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 7735 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7736 if (isTypeLegal(intVT)) { 7737 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 7738 StoreMemVT.isVector()) { 7739 // Scalarize the store and let the individual components be handled. 7740 SDValue Result = scalarizeVectorStore(ST, DAG); 7741 return Result; 7742 } 7743 // Expand to a bitconvert of the value to the integer type of the 7744 // same size, then a (misaligned) int store. 7745 // FIXME: Does not handle truncating floating point stores! 7746 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 7747 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 7748 Alignment, ST->getMemOperand()->getFlags()); 7749 return Result; 7750 } 7751 // Do a (aligned) store to a stack slot, then copy from the stack slot 7752 // to the final destination using (unaligned) integer loads and stores. 7753 MVT RegVT = getRegisterType( 7754 *DAG.getContext(), 7755 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 7756 EVT PtrVT = Ptr.getValueType(); 7757 unsigned StoredBytes = StoreMemVT.getStoreSize(); 7758 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7759 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 7760 7761 // Make sure the stack slot is also aligned for the register type. 7762 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 7763 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 7764 7765 // Perform the original store, only redirected to the stack slot. 7766 SDValue Store = DAG.getTruncStore( 7767 Chain, dl, Val, StackPtr, 7768 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 7769 7770 EVT StackPtrVT = StackPtr.getValueType(); 7771 7772 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7773 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7774 SmallVector<SDValue, 8> Stores; 7775 unsigned Offset = 0; 7776 7777 // Do all but one copies using the full register width. 7778 for (unsigned i = 1; i < NumRegs; i++) { 7779 // Load one integer register's worth from the stack slot. 7780 SDValue Load = DAG.getLoad( 7781 RegVT, dl, Store, StackPtr, 7782 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 7783 // Store it to the final location. Remember the store. 7784 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 7785 ST->getPointerInfo().getWithOffset(Offset), 7786 ST->getOriginalAlign(), 7787 ST->getMemOperand()->getFlags())); 7788 // Increment the pointers. 7789 Offset += RegBytes; 7790 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7791 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7792 } 7793 7794 // The last store may be partial. Do a truncating store. On big-endian 7795 // machines this requires an extending load from the stack slot to ensure 7796 // that the bits are in the right place. 7797 EVT LoadMemVT = 7798 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 7799 7800 // Load from the stack slot. 7801 SDValue Load = DAG.getExtLoad( 7802 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 7803 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 7804 7805 Stores.push_back( 7806 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 7807 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 7808 ST->getOriginalAlign(), 7809 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 7810 // The order of the stores doesn't matter - say it with a TokenFactor. 7811 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7812 return Result; 7813 } 7814 7815 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 7816 "Unaligned store of unknown type."); 7817 // Get the half-size VT 7818 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 7819 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 7820 unsigned IncrementSize = NumBits / 8; 7821 7822 // Divide the stored value in two parts. 7823 SDValue ShiftAmount = DAG.getConstant( 7824 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 7825 SDValue Lo = Val; 7826 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 7827 7828 // Store the two parts 7829 SDValue Store1, Store2; 7830 Store1 = DAG.getTruncStore(Chain, dl, 7831 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 7832 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 7833 ST->getMemOperand()->getFlags()); 7834 7835 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 7836 Store2 = DAG.getTruncStore( 7837 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 7838 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 7839 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 7840 7841 SDValue Result = 7842 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 7843 return Result; 7844 } 7845 7846 SDValue 7847 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 7848 const SDLoc &DL, EVT DataVT, 7849 SelectionDAG &DAG, 7850 bool IsCompressedMemory) const { 7851 SDValue Increment; 7852 EVT AddrVT = Addr.getValueType(); 7853 EVT MaskVT = Mask.getValueType(); 7854 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 7855 "Incompatible types of Data and Mask"); 7856 if (IsCompressedMemory) { 7857 if (DataVT.isScalableVector()) 7858 report_fatal_error( 7859 "Cannot currently handle compressed memory with scalable vectors"); 7860 // Incrementing the pointer according to number of '1's in the mask. 7861 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 7862 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 7863 if (MaskIntVT.getSizeInBits() < 32) { 7864 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 7865 MaskIntVT = MVT::i32; 7866 } 7867 7868 // Count '1's with POPCNT. 7869 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 7870 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 7871 // Scale is an element size in bytes. 7872 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 7873 AddrVT); 7874 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 7875 } else if (DataVT.isScalableVector()) { 7876 Increment = DAG.getVScale(DL, AddrVT, 7877 APInt(AddrVT.getFixedSizeInBits(), 7878 DataVT.getStoreSize().getKnownMinSize())); 7879 } else 7880 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 7881 7882 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 7883 } 7884 7885 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, 7886 EVT VecVT, const SDLoc &dl, 7887 ElementCount SubEC) { 7888 assert(!(SubEC.isScalable() && VecVT.isFixedLengthVector()) && 7889 "Cannot index a scalable vector within a fixed-width vector"); 7890 7891 unsigned NElts = VecVT.getVectorMinNumElements(); 7892 unsigned NumSubElts = SubEC.getKnownMinValue(); 7893 EVT IdxVT = Idx.getValueType(); 7894 7895 if (VecVT.isScalableVector() && !SubEC.isScalable()) { 7896 // If this is a constant index and we know the value plus the number of the 7897 // elements in the subvector minus one is less than the minimum number of 7898 // elements then it's safe to return Idx. 7899 if (auto *IdxCst = dyn_cast<ConstantSDNode>(Idx)) 7900 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts) 7901 return Idx; 7902 SDValue VS = 7903 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getFixedSizeInBits(), NElts)); 7904 unsigned SubOpcode = NumSubElts <= NElts ? ISD::SUB : ISD::USUBSAT; 7905 SDValue Sub = DAG.getNode(SubOpcode, dl, IdxVT, VS, 7906 DAG.getConstant(NumSubElts, dl, IdxVT)); 7907 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 7908 } 7909 if (isPowerOf2_32(NElts) && NumSubElts == 1) { 7910 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), Log2_32(NElts)); 7911 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 7912 DAG.getConstant(Imm, dl, IdxVT)); 7913 } 7914 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0; 7915 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 7916 DAG.getConstant(MaxIndex, dl, IdxVT)); 7917 } 7918 7919 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 7920 SDValue VecPtr, EVT VecVT, 7921 SDValue Index) const { 7922 return getVectorSubVecPointer( 7923 DAG, VecPtr, VecVT, 7924 EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1), 7925 Index); 7926 } 7927 7928 SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, 7929 SDValue VecPtr, EVT VecVT, 7930 EVT SubVecVT, 7931 SDValue Index) const { 7932 SDLoc dl(Index); 7933 // Make sure the index type is big enough to compute in. 7934 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 7935 7936 EVT EltVT = VecVT.getVectorElementType(); 7937 7938 // Calculate the element offset and add it to the pointer. 7939 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 7940 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 7941 "Converting bits to bytes lost precision"); 7942 assert(SubVecVT.getVectorElementType() == EltVT && 7943 "Sub-vector must be a vector with matching element type"); 7944 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl, 7945 SubVecVT.getVectorElementCount()); 7946 7947 EVT IdxVT = Index.getValueType(); 7948 if (SubVecVT.isScalableVector()) 7949 Index = 7950 DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7951 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getSizeInBits(), 1))); 7952 7953 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7954 DAG.getConstant(EltSize, dl, IdxVT)); 7955 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 7956 } 7957 7958 //===----------------------------------------------------------------------===// 7959 // Implementation of Emulated TLS Model 7960 //===----------------------------------------------------------------------===// 7961 7962 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 7963 SelectionDAG &DAG) const { 7964 // Access to address of TLS varialbe xyz is lowered to a function call: 7965 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 7966 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7967 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 7968 SDLoc dl(GA); 7969 7970 ArgListTy Args; 7971 ArgListEntry Entry; 7972 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 7973 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 7974 StringRef EmuTlsVarName(NameString); 7975 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 7976 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 7977 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 7978 Entry.Ty = VoidPtrType; 7979 Args.push_back(Entry); 7980 7981 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 7982 7983 TargetLowering::CallLoweringInfo CLI(DAG); 7984 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 7985 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 7986 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7987 7988 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7989 // At last for X86 targets, maybe good for other targets too? 7990 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7991 MFI.setAdjustsStack(true); // Is this only for X86 target? 7992 MFI.setHasCalls(true); 7993 7994 assert((GA->getOffset() == 0) && 7995 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7996 return CallResult.first; 7997 } 7998 7999 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 8000 SelectionDAG &DAG) const { 8001 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 8002 if (!isCtlzFast()) 8003 return SDValue(); 8004 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8005 SDLoc dl(Op); 8006 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 8007 if (C->isZero() && CC == ISD::SETEQ) { 8008 EVT VT = Op.getOperand(0).getValueType(); 8009 SDValue Zext = Op.getOperand(0); 8010 if (VT.bitsLT(MVT::i32)) { 8011 VT = MVT::i32; 8012 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 8013 } 8014 unsigned Log2b = Log2_32(VT.getSizeInBits()); 8015 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 8016 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 8017 DAG.getConstant(Log2b, dl, MVT::i32)); 8018 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 8019 } 8020 } 8021 return SDValue(); 8022 } 8023 8024 // Convert redundant addressing modes (e.g. scaling is redundant 8025 // when accessing bytes). 8026 ISD::MemIndexType 8027 TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT, 8028 SDValue Offsets) const { 8029 bool IsScaledIndex = 8030 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED); 8031 bool IsSignedIndex = 8032 (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED); 8033 8034 // Scaling is unimportant for bytes, canonicalize to unscaled. 8035 if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) 8036 return IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED; 8037 8038 return IndexType; 8039 } 8040 8041 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 8042 SDValue Op0 = Node->getOperand(0); 8043 SDValue Op1 = Node->getOperand(1); 8044 EVT VT = Op0.getValueType(); 8045 unsigned Opcode = Node->getOpcode(); 8046 SDLoc DL(Node); 8047 8048 // umin(x,y) -> sub(x,usubsat(x,y)) 8049 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 8050 isOperationLegal(ISD::USUBSAT, VT)) { 8051 return DAG.getNode(ISD::SUB, DL, VT, Op0, 8052 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 8053 } 8054 8055 // umax(x,y) -> add(x,usubsat(y,x)) 8056 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 8057 isOperationLegal(ISD::USUBSAT, VT)) { 8058 return DAG.getNode(ISD::ADD, DL, VT, Op0, 8059 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 8060 } 8061 8062 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 8063 ISD::CondCode CC; 8064 switch (Opcode) { 8065 default: llvm_unreachable("How did we get here?"); 8066 case ISD::SMAX: CC = ISD::SETGT; break; 8067 case ISD::SMIN: CC = ISD::SETLT; break; 8068 case ISD::UMAX: CC = ISD::SETUGT; break; 8069 case ISD::UMIN: CC = ISD::SETULT; break; 8070 } 8071 8072 // FIXME: Should really try to split the vector in case it's legal on a 8073 // subvector. 8074 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8075 return DAG.UnrollVectorOp(Node); 8076 8077 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8078 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 8079 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 8080 } 8081 8082 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 8083 unsigned Opcode = Node->getOpcode(); 8084 SDValue LHS = Node->getOperand(0); 8085 SDValue RHS = Node->getOperand(1); 8086 EVT VT = LHS.getValueType(); 8087 SDLoc dl(Node); 8088 8089 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8090 assert(VT.isInteger() && "Expected operands to be integers"); 8091 8092 // usub.sat(a, b) -> umax(a, b) - b 8093 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 8094 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 8095 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 8096 } 8097 8098 // uadd.sat(a, b) -> umin(a, ~b) + b 8099 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 8100 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 8101 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 8102 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 8103 } 8104 8105 unsigned OverflowOp; 8106 switch (Opcode) { 8107 case ISD::SADDSAT: 8108 OverflowOp = ISD::SADDO; 8109 break; 8110 case ISD::UADDSAT: 8111 OverflowOp = ISD::UADDO; 8112 break; 8113 case ISD::SSUBSAT: 8114 OverflowOp = ISD::SSUBO; 8115 break; 8116 case ISD::USUBSAT: 8117 OverflowOp = ISD::USUBO; 8118 break; 8119 default: 8120 llvm_unreachable("Expected method to receive signed or unsigned saturation " 8121 "addition or subtraction node."); 8122 } 8123 8124 // FIXME: Should really try to split the vector in case it's legal on a 8125 // subvector. 8126 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8127 return DAG.UnrollVectorOp(Node); 8128 8129 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 8130 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8131 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8132 SDValue SumDiff = Result.getValue(0); 8133 SDValue Overflow = Result.getValue(1); 8134 SDValue Zero = DAG.getConstant(0, dl, VT); 8135 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 8136 8137 if (Opcode == ISD::UADDSAT) { 8138 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8139 // (LHS + RHS) | OverflowMask 8140 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8141 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 8142 } 8143 // Overflow ? 0xffff.... : (LHS + RHS) 8144 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 8145 } 8146 8147 if (Opcode == ISD::USUBSAT) { 8148 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8149 // (LHS - RHS) & ~OverflowMask 8150 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8151 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 8152 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 8153 } 8154 // Overflow ? 0 : (LHS - RHS) 8155 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 8156 } 8157 8158 // Overflow ? (SumDiff >> BW) ^ MinVal : SumDiff 8159 APInt MinVal = APInt::getSignedMinValue(BitWidth); 8160 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8161 SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, SumDiff, 8162 DAG.getConstant(BitWidth - 1, dl, VT)); 8163 Result = DAG.getNode(ISD::XOR, dl, VT, Shift, SatMin); 8164 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 8165 } 8166 8167 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 8168 unsigned Opcode = Node->getOpcode(); 8169 bool IsSigned = Opcode == ISD::SSHLSAT; 8170 SDValue LHS = Node->getOperand(0); 8171 SDValue RHS = Node->getOperand(1); 8172 EVT VT = LHS.getValueType(); 8173 SDLoc dl(Node); 8174 8175 assert((Node->getOpcode() == ISD::SSHLSAT || 8176 Node->getOpcode() == ISD::USHLSAT) && 8177 "Expected a SHLSAT opcode"); 8178 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8179 assert(VT.isInteger() && "Expected operands to be integers"); 8180 8181 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 8182 8183 unsigned BW = VT.getScalarSizeInBits(); 8184 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 8185 SDValue Orig = 8186 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 8187 8188 SDValue SatVal; 8189 if (IsSigned) { 8190 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 8191 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 8192 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 8193 SatMin, SatMax, ISD::SETLT); 8194 } else { 8195 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 8196 } 8197 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 8198 8199 return Result; 8200 } 8201 8202 SDValue 8203 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 8204 assert((Node->getOpcode() == ISD::SMULFIX || 8205 Node->getOpcode() == ISD::UMULFIX || 8206 Node->getOpcode() == ISD::SMULFIXSAT || 8207 Node->getOpcode() == ISD::UMULFIXSAT) && 8208 "Expected a fixed point multiplication opcode"); 8209 8210 SDLoc dl(Node); 8211 SDValue LHS = Node->getOperand(0); 8212 SDValue RHS = Node->getOperand(1); 8213 EVT VT = LHS.getValueType(); 8214 unsigned Scale = Node->getConstantOperandVal(2); 8215 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 8216 Node->getOpcode() == ISD::UMULFIXSAT); 8217 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 8218 Node->getOpcode() == ISD::SMULFIXSAT); 8219 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8220 unsigned VTSize = VT.getScalarSizeInBits(); 8221 8222 if (!Scale) { 8223 // [us]mul.fix(a, b, 0) -> mul(a, b) 8224 if (!Saturating) { 8225 if (isOperationLegalOrCustom(ISD::MUL, VT)) 8226 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8227 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 8228 SDValue Result = 8229 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8230 SDValue Product = Result.getValue(0); 8231 SDValue Overflow = Result.getValue(1); 8232 SDValue Zero = DAG.getConstant(0, dl, VT); 8233 8234 APInt MinVal = APInt::getSignedMinValue(VTSize); 8235 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 8236 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8237 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8238 // Xor the inputs, if resulting sign bit is 0 the product will be 8239 // positive, else negative. 8240 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS); 8241 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Xor, Zero, ISD::SETLT); 8242 Result = DAG.getSelect(dl, VT, ProdNeg, SatMin, SatMax); 8243 return DAG.getSelect(dl, VT, Overflow, Result, Product); 8244 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 8245 SDValue Result = 8246 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8247 SDValue Product = Result.getValue(0); 8248 SDValue Overflow = Result.getValue(1); 8249 8250 APInt MaxVal = APInt::getMaxValue(VTSize); 8251 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8252 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 8253 } 8254 } 8255 8256 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 8257 "Expected scale to be less than the number of bits if signed or at " 8258 "most the number of bits if unsigned."); 8259 assert(LHS.getValueType() == RHS.getValueType() && 8260 "Expected both operands to be the same type"); 8261 8262 // Get the upper and lower bits of the result. 8263 SDValue Lo, Hi; 8264 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 8265 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 8266 if (isOperationLegalOrCustom(LoHiOp, VT)) { 8267 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 8268 Lo = Result.getValue(0); 8269 Hi = Result.getValue(1); 8270 } else if (isOperationLegalOrCustom(HiOp, VT)) { 8271 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8272 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 8273 } else if (VT.isVector()) { 8274 return SDValue(); 8275 } else { 8276 report_fatal_error("Unable to expand fixed point multiplication."); 8277 } 8278 8279 if (Scale == VTSize) 8280 // Result is just the top half since we'd be shifting by the width of the 8281 // operand. Overflow impossible so this works for both UMULFIX and 8282 // UMULFIXSAT. 8283 return Hi; 8284 8285 // The result will need to be shifted right by the scale since both operands 8286 // are scaled. The result is given to us in 2 halves, so we only want part of 8287 // both in the result. 8288 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8289 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 8290 DAG.getConstant(Scale, dl, ShiftTy)); 8291 if (!Saturating) 8292 return Result; 8293 8294 if (!Signed) { 8295 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 8296 // widened multiplication) aren't all zeroes. 8297 8298 // Saturate to max if ((Hi >> Scale) != 0), 8299 // which is the same as if (Hi > ((1 << Scale) - 1)) 8300 APInt MaxVal = APInt::getMaxValue(VTSize); 8301 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 8302 dl, VT); 8303 Result = DAG.getSelectCC(dl, Hi, LowMask, 8304 DAG.getConstant(MaxVal, dl, VT), Result, 8305 ISD::SETUGT); 8306 8307 return Result; 8308 } 8309 8310 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 8311 // widened multiplication) aren't all ones or all zeroes. 8312 8313 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 8314 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 8315 8316 if (Scale == 0) { 8317 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 8318 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 8319 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 8320 // Saturated to SatMin if wide product is negative, and SatMax if wide 8321 // product is positive ... 8322 SDValue Zero = DAG.getConstant(0, dl, VT); 8323 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 8324 ISD::SETLT); 8325 // ... but only if we overflowed. 8326 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 8327 } 8328 8329 // We handled Scale==0 above so all the bits to examine is in Hi. 8330 8331 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 8332 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 8333 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 8334 dl, VT); 8335 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 8336 // Saturate to min if (Hi >> (Scale - 1)) < -1), 8337 // which is the same as if (HI < (-1 << (Scale - 1)) 8338 SDValue HighMask = 8339 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 8340 dl, VT); 8341 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 8342 return Result; 8343 } 8344 8345 SDValue 8346 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 8347 SDValue LHS, SDValue RHS, 8348 unsigned Scale, SelectionDAG &DAG) const { 8349 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 8350 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 8351 "Expected a fixed point division opcode"); 8352 8353 EVT VT = LHS.getValueType(); 8354 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 8355 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 8356 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8357 8358 // If there is enough room in the type to upscale the LHS or downscale the 8359 // RHS before the division, we can perform it in this type without having to 8360 // resize. For signed operations, the LHS headroom is the number of 8361 // redundant sign bits, and for unsigned ones it is the number of zeroes. 8362 // The headroom for the RHS is the number of trailing zeroes. 8363 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 8364 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 8365 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 8366 8367 // For signed saturating operations, we need to be able to detect true integer 8368 // division overflow; that is, when you have MIN / -EPS. However, this 8369 // is undefined behavior and if we emit divisions that could take such 8370 // values it may cause undesired behavior (arithmetic exceptions on x86, for 8371 // example). 8372 // Avoid this by requiring an extra bit so that we never get this case. 8373 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 8374 // signed saturating division, we need to emit a whopping 32-bit division. 8375 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 8376 return SDValue(); 8377 8378 unsigned LHSShift = std::min(LHSLead, Scale); 8379 unsigned RHSShift = Scale - LHSShift; 8380 8381 // At this point, we know that if we shift the LHS up by LHSShift and the 8382 // RHS down by RHSShift, we can emit a regular division with a final scaling 8383 // factor of Scale. 8384 8385 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8386 if (LHSShift) 8387 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 8388 DAG.getConstant(LHSShift, dl, ShiftTy)); 8389 if (RHSShift) 8390 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 8391 DAG.getConstant(RHSShift, dl, ShiftTy)); 8392 8393 SDValue Quot; 8394 if (Signed) { 8395 // For signed operations, if the resulting quotient is negative and the 8396 // remainder is nonzero, subtract 1 from the quotient to round towards 8397 // negative infinity. 8398 SDValue Rem; 8399 // FIXME: Ideally we would always produce an SDIVREM here, but if the 8400 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 8401 // we couldn't just form a libcall, but the type legalizer doesn't do it. 8402 if (isTypeLegal(VT) && 8403 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 8404 Quot = DAG.getNode(ISD::SDIVREM, dl, 8405 DAG.getVTList(VT, VT), 8406 LHS, RHS); 8407 Rem = Quot.getValue(1); 8408 Quot = Quot.getValue(0); 8409 } else { 8410 Quot = DAG.getNode(ISD::SDIV, dl, VT, 8411 LHS, RHS); 8412 Rem = DAG.getNode(ISD::SREM, dl, VT, 8413 LHS, RHS); 8414 } 8415 SDValue Zero = DAG.getConstant(0, dl, VT); 8416 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 8417 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 8418 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 8419 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 8420 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 8421 DAG.getConstant(1, dl, VT)); 8422 Quot = DAG.getSelect(dl, VT, 8423 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 8424 Sub1, Quot); 8425 } else 8426 Quot = DAG.getNode(ISD::UDIV, dl, VT, 8427 LHS, RHS); 8428 8429 return Quot; 8430 } 8431 8432 void TargetLowering::expandUADDSUBO( 8433 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 8434 SDLoc dl(Node); 8435 SDValue LHS = Node->getOperand(0); 8436 SDValue RHS = Node->getOperand(1); 8437 bool IsAdd = Node->getOpcode() == ISD::UADDO; 8438 8439 // If ADD/SUBCARRY is legal, use that instead. 8440 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 8441 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 8442 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 8443 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 8444 { LHS, RHS, CarryIn }); 8445 Result = SDValue(NodeCarry.getNode(), 0); 8446 Overflow = SDValue(NodeCarry.getNode(), 1); 8447 return; 8448 } 8449 8450 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 8451 LHS.getValueType(), LHS, RHS); 8452 8453 EVT ResultType = Node->getValueType(1); 8454 EVT SetCCType = getSetCCResultType( 8455 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 8456 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 8457 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 8458 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 8459 } 8460 8461 void TargetLowering::expandSADDSUBO( 8462 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 8463 SDLoc dl(Node); 8464 SDValue LHS = Node->getOperand(0); 8465 SDValue RHS = Node->getOperand(1); 8466 bool IsAdd = Node->getOpcode() == ISD::SADDO; 8467 8468 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 8469 LHS.getValueType(), LHS, RHS); 8470 8471 EVT ResultType = Node->getValueType(1); 8472 EVT OType = getSetCCResultType( 8473 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 8474 8475 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 8476 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 8477 if (isOperationLegal(OpcSat, LHS.getValueType())) { 8478 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 8479 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 8480 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 8481 return; 8482 } 8483 8484 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 8485 8486 // For an addition, the result should be less than one of the operands (LHS) 8487 // if and only if the other operand (RHS) is negative, otherwise there will 8488 // be overflow. 8489 // For a subtraction, the result should be less than one of the operands 8490 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 8491 // otherwise there will be overflow. 8492 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 8493 SDValue ConditionRHS = 8494 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 8495 8496 Overflow = DAG.getBoolExtOrTrunc( 8497 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 8498 ResultType, ResultType); 8499 } 8500 8501 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 8502 SDValue &Overflow, SelectionDAG &DAG) const { 8503 SDLoc dl(Node); 8504 EVT VT = Node->getValueType(0); 8505 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8506 SDValue LHS = Node->getOperand(0); 8507 SDValue RHS = Node->getOperand(1); 8508 bool isSigned = Node->getOpcode() == ISD::SMULO; 8509 8510 // For power-of-two multiplications we can use a simpler shift expansion. 8511 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 8512 const APInt &C = RHSC->getAPIntValue(); 8513 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 8514 if (C.isPowerOf2()) { 8515 // smulo(x, signed_min) is same as umulo(x, signed_min). 8516 bool UseArithShift = isSigned && !C.isMinSignedValue(); 8517 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8518 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 8519 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 8520 Overflow = DAG.getSetCC(dl, SetCCVT, 8521 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 8522 dl, VT, Result, ShiftAmt), 8523 LHS, ISD::SETNE); 8524 return true; 8525 } 8526 } 8527 8528 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 8529 if (VT.isVector()) 8530 WideVT = 8531 EVT::getVectorVT(*DAG.getContext(), WideVT, VT.getVectorElementCount()); 8532 8533 SDValue BottomHalf; 8534 SDValue TopHalf; 8535 static const unsigned Ops[2][3] = 8536 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 8537 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 8538 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 8539 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8540 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 8541 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 8542 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 8543 RHS); 8544 TopHalf = BottomHalf.getValue(1); 8545 } else if (isTypeLegal(WideVT)) { 8546 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 8547 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 8548 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 8549 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 8550 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 8551 getShiftAmountTy(WideVT, DAG.getDataLayout())); 8552 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 8553 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 8554 } else { 8555 if (VT.isVector()) 8556 return false; 8557 8558 // We can fall back to a libcall with an illegal type for the MUL if we 8559 // have a libcall big enough. 8560 // Also, we can fall back to a division in some cases, but that's a big 8561 // performance hit in the general case. 8562 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 8563 if (WideVT == MVT::i16) 8564 LC = RTLIB::MUL_I16; 8565 else if (WideVT == MVT::i32) 8566 LC = RTLIB::MUL_I32; 8567 else if (WideVT == MVT::i64) 8568 LC = RTLIB::MUL_I64; 8569 else if (WideVT == MVT::i128) 8570 LC = RTLIB::MUL_I128; 8571 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 8572 8573 SDValue HiLHS; 8574 SDValue HiRHS; 8575 if (isSigned) { 8576 // The high part is obtained by SRA'ing all but one of the bits of low 8577 // part. 8578 unsigned LoSize = VT.getFixedSizeInBits(); 8579 HiLHS = 8580 DAG.getNode(ISD::SRA, dl, VT, LHS, 8581 DAG.getConstant(LoSize - 1, dl, 8582 getPointerTy(DAG.getDataLayout()))); 8583 HiRHS = 8584 DAG.getNode(ISD::SRA, dl, VT, RHS, 8585 DAG.getConstant(LoSize - 1, dl, 8586 getPointerTy(DAG.getDataLayout()))); 8587 } else { 8588 HiLHS = DAG.getConstant(0, dl, VT); 8589 HiRHS = DAG.getConstant(0, dl, VT); 8590 } 8591 8592 // Here we're passing the 2 arguments explicitly as 4 arguments that are 8593 // pre-lowered to the correct types. This all depends upon WideVT not 8594 // being a legal type for the architecture and thus has to be split to 8595 // two arguments. 8596 SDValue Ret; 8597 TargetLowering::MakeLibCallOptions CallOptions; 8598 CallOptions.setSExt(isSigned); 8599 CallOptions.setIsPostTypeLegalization(true); 8600 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 8601 // Halves of WideVT are packed into registers in different order 8602 // depending on platform endianness. This is usually handled by 8603 // the C calling convention, but we can't defer to it in 8604 // the legalizer. 8605 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 8606 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8607 } else { 8608 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 8609 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 8610 } 8611 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 8612 "Ret value is a collection of constituent nodes holding result."); 8613 if (DAG.getDataLayout().isLittleEndian()) { 8614 // Same as above. 8615 BottomHalf = Ret.getOperand(0); 8616 TopHalf = Ret.getOperand(1); 8617 } else { 8618 BottomHalf = Ret.getOperand(1); 8619 TopHalf = Ret.getOperand(0); 8620 } 8621 } 8622 8623 Result = BottomHalf; 8624 if (isSigned) { 8625 SDValue ShiftAmt = DAG.getConstant( 8626 VT.getScalarSizeInBits() - 1, dl, 8627 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 8628 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 8629 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 8630 } else { 8631 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 8632 DAG.getConstant(0, dl, VT), ISD::SETNE); 8633 } 8634 8635 // Truncate the result if SetCC returns a larger type than needed. 8636 EVT RType = Node->getValueType(1); 8637 if (RType.bitsLT(Overflow.getValueType())) 8638 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 8639 8640 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 8641 "Unexpected result type for S/UMULO legalization"); 8642 return true; 8643 } 8644 8645 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 8646 SDLoc dl(Node); 8647 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 8648 SDValue Op = Node->getOperand(0); 8649 EVT VT = Op.getValueType(); 8650 8651 if (VT.isScalableVector()) 8652 report_fatal_error( 8653 "Expanding reductions for scalable vectors is undefined."); 8654 8655 // Try to use a shuffle reduction for power of two vectors. 8656 if (VT.isPow2VectorType()) { 8657 while (VT.getVectorNumElements() > 1) { 8658 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 8659 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 8660 break; 8661 8662 SDValue Lo, Hi; 8663 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 8664 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 8665 VT = HalfVT; 8666 } 8667 } 8668 8669 EVT EltVT = VT.getVectorElementType(); 8670 unsigned NumElts = VT.getVectorNumElements(); 8671 8672 SmallVector<SDValue, 8> Ops; 8673 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 8674 8675 SDValue Res = Ops[0]; 8676 for (unsigned i = 1; i < NumElts; i++) 8677 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 8678 8679 // Result type may be wider than element type. 8680 if (EltVT != Node->getValueType(0)) 8681 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 8682 return Res; 8683 } 8684 8685 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 8686 SDLoc dl(Node); 8687 SDValue AccOp = Node->getOperand(0); 8688 SDValue VecOp = Node->getOperand(1); 8689 SDNodeFlags Flags = Node->getFlags(); 8690 8691 EVT VT = VecOp.getValueType(); 8692 EVT EltVT = VT.getVectorElementType(); 8693 8694 if (VT.isScalableVector()) 8695 report_fatal_error( 8696 "Expanding reductions for scalable vectors is undefined."); 8697 8698 unsigned NumElts = VT.getVectorNumElements(); 8699 8700 SmallVector<SDValue, 8> Ops; 8701 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 8702 8703 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 8704 8705 SDValue Res = AccOp; 8706 for (unsigned i = 0; i < NumElts; i++) 8707 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 8708 8709 return Res; 8710 } 8711 8712 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 8713 SelectionDAG &DAG) const { 8714 EVT VT = Node->getValueType(0); 8715 SDLoc dl(Node); 8716 bool isSigned = Node->getOpcode() == ISD::SREM; 8717 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 8718 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 8719 SDValue Dividend = Node->getOperand(0); 8720 SDValue Divisor = Node->getOperand(1); 8721 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 8722 SDVTList VTs = DAG.getVTList(VT, VT); 8723 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 8724 return true; 8725 } 8726 if (isOperationLegalOrCustom(DivOpc, VT)) { 8727 // X % Y -> X-X/Y*Y 8728 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 8729 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 8730 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 8731 return true; 8732 } 8733 return false; 8734 } 8735 8736 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 8737 SelectionDAG &DAG) const { 8738 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 8739 SDLoc dl(SDValue(Node, 0)); 8740 SDValue Src = Node->getOperand(0); 8741 8742 // DstVT is the result type, while SatVT is the size to which we saturate 8743 EVT SrcVT = Src.getValueType(); 8744 EVT DstVT = Node->getValueType(0); 8745 8746 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 8747 unsigned SatWidth = SatVT.getScalarSizeInBits(); 8748 unsigned DstWidth = DstVT.getScalarSizeInBits(); 8749 assert(SatWidth <= DstWidth && 8750 "Expected saturation width smaller than result width"); 8751 8752 // Determine minimum and maximum integer values and their corresponding 8753 // floating-point values. 8754 APInt MinInt, MaxInt; 8755 if (IsSigned) { 8756 MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth); 8757 MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth); 8758 } else { 8759 MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth); 8760 MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth); 8761 } 8762 8763 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 8764 // libcall emission cannot handle this. Large result types will fail. 8765 if (SrcVT == MVT::f16) { 8766 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 8767 SrcVT = Src.getValueType(); 8768 } 8769 8770 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 8771 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 8772 8773 APFloat::opStatus MinStatus = 8774 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 8775 APFloat::opStatus MaxStatus = 8776 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 8777 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 8778 !(MaxStatus & APFloat::opStatus::opInexact); 8779 8780 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 8781 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 8782 8783 // If the integer bounds are exactly representable as floats and min/max are 8784 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 8785 // of comparisons and selects. 8786 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 8787 isOperationLegal(ISD::FMAXNUM, SrcVT); 8788 if (AreExactFloatBounds && MinMaxLegal) { 8789 SDValue Clamped = Src; 8790 8791 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 8792 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 8793 // Clamp by MaxFloat from above. NaN cannot occur. 8794 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 8795 // Convert clamped value to integer. 8796 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 8797 dl, DstVT, Clamped); 8798 8799 // In the unsigned case we're done, because we mapped NaN to MinFloat, 8800 // which will cast to zero. 8801 if (!IsSigned) 8802 return FpToInt; 8803 8804 // Otherwise, select 0 if Src is NaN. 8805 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 8806 return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt, 8807 ISD::CondCode::SETUO); 8808 } 8809 8810 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 8811 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 8812 8813 // Result of direct conversion. The assumption here is that the operation is 8814 // non-trapping and it's fine to apply it to an out-of-range value if we 8815 // select it away later. 8816 SDValue FpToInt = 8817 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 8818 8819 SDValue Select = FpToInt; 8820 8821 // If Src ULT MinFloat, select MinInt. In particular, this also selects 8822 // MinInt if Src is NaN. 8823 Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select, 8824 ISD::CondCode::SETULT); 8825 // If Src OGT MaxFloat, select MaxInt. 8826 Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select, 8827 ISD::CondCode::SETOGT); 8828 8829 // In the unsigned case we are done, because we mapped NaN to MinInt, which 8830 // is already zero. 8831 if (!IsSigned) 8832 return Select; 8833 8834 // Otherwise, select 0 if Src is NaN. 8835 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 8836 return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO); 8837 } 8838 8839 SDValue TargetLowering::expandVectorSplice(SDNode *Node, 8840 SelectionDAG &DAG) const { 8841 assert(Node->getOpcode() == ISD::VECTOR_SPLICE && "Unexpected opcode!"); 8842 assert(Node->getValueType(0).isScalableVector() && 8843 "Fixed length vector types expected to use SHUFFLE_VECTOR!"); 8844 8845 EVT VT = Node->getValueType(0); 8846 SDValue V1 = Node->getOperand(0); 8847 SDValue V2 = Node->getOperand(1); 8848 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue(); 8849 SDLoc DL(Node); 8850 8851 // Expand through memory thusly: 8852 // Alloca CONCAT_VECTORS_TYPES(V1, V2) Ptr 8853 // Store V1, Ptr 8854 // Store V2, Ptr + sizeof(V1) 8855 // If (Imm < 0) 8856 // TrailingElts = -Imm 8857 // Ptr = Ptr + sizeof(V1) - (TrailingElts * sizeof(VT.Elt)) 8858 // else 8859 // Ptr = Ptr + (Imm * sizeof(VT.Elt)) 8860 // Res = Load Ptr 8861 8862 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 8863 8864 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 8865 VT.getVectorElementCount() * 2); 8866 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 8867 EVT PtrVT = StackPtr.getValueType(); 8868 auto &MF = DAG.getMachineFunction(); 8869 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 8870 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 8871 8872 // Store the lo part of CONCAT_VECTORS(V1, V2) 8873 SDValue StoreV1 = DAG.getStore(DAG.getEntryNode(), DL, V1, StackPtr, PtrInfo); 8874 // Store the hi part of CONCAT_VECTORS(V1, V2) 8875 SDValue OffsetToV2 = DAG.getVScale( 8876 DL, PtrVT, 8877 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 8878 SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); 8879 SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); 8880 8881 if (Imm >= 0) { 8882 // Load back the required element. getVectorElementPointer takes care of 8883 // clamping the index if it's out-of-bounds. 8884 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2)); 8885 // Load the spliced result 8886 return DAG.getLoad(VT, DL, StoreV2, StackPtr, 8887 MachinePointerInfo::getUnknownStack(MF)); 8888 } 8889 8890 uint64_t TrailingElts = -Imm; 8891 8892 // NOTE: TrailingElts must be clamped so as not to read outside of V1:V2. 8893 TypeSize EltByteSize = VT.getVectorElementType().getStoreSize(); 8894 SDValue TrailingBytes = 8895 DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT); 8896 8897 if (TrailingElts > VT.getVectorMinNumElements()) { 8898 SDValue VLBytes = DAG.getVScale( 8899 DL, PtrVT, 8900 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 8901 TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); 8902 } 8903 8904 // Calculate the start address of the spliced result. 8905 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes); 8906 8907 // Load the spliced result 8908 return DAG.getLoad(VT, DL, StoreV2, StackPtr2, 8909 MachinePointerInfo::getUnknownStack(MF)); 8910 } 8911 8912 bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, 8913 SDValue &LHS, SDValue &RHS, 8914 SDValue &CC, bool &NeedInvert, 8915 const SDLoc &dl, SDValue &Chain, 8916 bool IsSignaling) const { 8917 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8918 MVT OpVT = LHS.getSimpleValueType(); 8919 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 8920 NeedInvert = false; 8921 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 8922 default: 8923 llvm_unreachable("Unknown condition code action!"); 8924 case TargetLowering::Legal: 8925 // Nothing to do. 8926 break; 8927 case TargetLowering::Expand: { 8928 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 8929 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 8930 std::swap(LHS, RHS); 8931 CC = DAG.getCondCode(InvCC); 8932 return true; 8933 } 8934 // Swapping operands didn't work. Try inverting the condition. 8935 bool NeedSwap = false; 8936 InvCC = getSetCCInverse(CCCode, OpVT); 8937 if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 8938 // If inverting the condition is not enough, try swapping operands 8939 // on top of it. 8940 InvCC = ISD::getSetCCSwappedOperands(InvCC); 8941 NeedSwap = true; 8942 } 8943 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 8944 CC = DAG.getCondCode(InvCC); 8945 NeedInvert = true; 8946 if (NeedSwap) 8947 std::swap(LHS, RHS); 8948 return true; 8949 } 8950 8951 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 8952 unsigned Opc = 0; 8953 switch (CCCode) { 8954 default: 8955 llvm_unreachable("Don't know how to expand this condition!"); 8956 case ISD::SETUO: 8957 if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { 8958 CC1 = ISD::SETUNE; 8959 CC2 = ISD::SETUNE; 8960 Opc = ISD::OR; 8961 break; 8962 } 8963 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 8964 "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); 8965 NeedInvert = true; 8966 LLVM_FALLTHROUGH; 8967 case ISD::SETO: 8968 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 8969 "If SETO is expanded, SETOEQ must be legal!"); 8970 CC1 = ISD::SETOEQ; 8971 CC2 = ISD::SETOEQ; 8972 Opc = ISD::AND; 8973 break; 8974 case ISD::SETONE: 8975 case ISD::SETUEQ: 8976 // If the SETUO or SETO CC isn't legal, we might be able to use 8977 // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one 8978 // of SETOGT/SETOLT to be legal, the other can be emulated by swapping 8979 // the operands. 8980 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 8981 if (!TLI.isCondCodeLegal(CC2, OpVT) && 8982 (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) || 8983 TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) { 8984 CC1 = ISD::SETOGT; 8985 CC2 = ISD::SETOLT; 8986 Opc = ISD::OR; 8987 NeedInvert = ((unsigned)CCCode & 0x8U); 8988 break; 8989 } 8990 LLVM_FALLTHROUGH; 8991 case ISD::SETOEQ: 8992 case ISD::SETOGT: 8993 case ISD::SETOGE: 8994 case ISD::SETOLT: 8995 case ISD::SETOLE: 8996 case ISD::SETUNE: 8997 case ISD::SETUGT: 8998 case ISD::SETUGE: 8999 case ISD::SETULT: 9000 case ISD::SETULE: 9001 // If we are floating point, assign and break, otherwise fall through. 9002 if (!OpVT.isInteger()) { 9003 // We can use the 4th bit to tell if we are the unordered 9004 // or ordered version of the opcode. 9005 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9006 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 9007 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 9008 break; 9009 } 9010 // Fallthrough if we are unsigned integer. 9011 LLVM_FALLTHROUGH; 9012 case ISD::SETLE: 9013 case ISD::SETGT: 9014 case ISD::SETGE: 9015 case ISD::SETLT: 9016 case ISD::SETNE: 9017 case ISD::SETEQ: 9018 // If all combinations of inverting the condition and swapping operands 9019 // didn't work then we have no means to expand the condition. 9020 llvm_unreachable("Don't know how to expand this condition!"); 9021 } 9022 9023 SDValue SetCC1, SetCC2; 9024 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 9025 // If we aren't the ordered or unorder operation, 9026 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 9027 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); 9028 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); 9029 } else { 9030 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 9031 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); 9032 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); 9033 } 9034 if (Chain) 9035 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), 9036 SetCC2.getValue(1)); 9037 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 9038 RHS = SDValue(); 9039 CC = SDValue(); 9040 return true; 9041 } 9042 } 9043 return false; 9044 } 9045