1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/CodeGenCommonISel.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineJumpTableInfo.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/SelectionDAG.h" 22 #include "llvm/CodeGen/TargetRegisterInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/DivisionByConstantInfo.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/KnownBits.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore following attributes because they don't affect the 61 // call sequence. 62 AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs()); 63 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable, 64 Attribute::DereferenceableOrNull, Attribute::NoAlias, 65 Attribute::NonNull, Attribute::NoUndef}) 66 CallerAttrs.removeAttribute(Attr); 67 68 if (CallerAttrs.hasAttributes()) 69 return false; 70 71 // It's not safe to eliminate the sign / zero extension of the return value. 72 if (CallerAttrs.contains(Attribute::ZExt) || 73 CallerAttrs.contains(Attribute::SExt)) 74 return false; 75 76 // Check if the only use is a function return node. 77 return isUsedByReturnOnly(Node, Chain); 78 } 79 80 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 81 const uint32_t *CallerPreservedMask, 82 const SmallVectorImpl<CCValAssign> &ArgLocs, 83 const SmallVectorImpl<SDValue> &OutVals) const { 84 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 85 const CCValAssign &ArgLoc = ArgLocs[I]; 86 if (!ArgLoc.isRegLoc()) 87 continue; 88 MCRegister Reg = ArgLoc.getLocReg(); 89 // Only look at callee saved registers. 90 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 91 continue; 92 // Check that we pass the value used for the caller. 93 // (We look for a CopyFromReg reading a virtual register that is used 94 // for the function live-in value of register Reg) 95 SDValue Value = OutVals[I]; 96 if (Value->getOpcode() == ISD::AssertZext) 97 Value = Value.getOperand(0); 98 if (Value->getOpcode() != ISD::CopyFromReg) 99 return false; 100 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 101 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 102 return false; 103 } 104 return true; 105 } 106 107 /// Set CallLoweringInfo attribute flags based on a call instruction 108 /// and called function attributes. 109 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 110 unsigned ArgIdx) { 111 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 112 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 113 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 114 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 115 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 116 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 117 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 118 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 119 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 120 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 121 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync); 122 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 123 Alignment = Call->getParamStackAlign(ArgIdx); 124 IndirectType = nullptr; 125 assert(IsByVal + IsPreallocated + IsInAlloca + IsSRet <= 1 && 126 "multiple ABI attributes?"); 127 if (IsByVal) { 128 IndirectType = Call->getParamByValType(ArgIdx); 129 if (!Alignment) 130 Alignment = Call->getParamAlign(ArgIdx); 131 } 132 if (IsPreallocated) 133 IndirectType = Call->getParamPreallocatedType(ArgIdx); 134 if (IsInAlloca) 135 IndirectType = Call->getParamInAllocaType(ArgIdx); 136 if (IsSRet) 137 IndirectType = Call->getParamStructRetType(ArgIdx); 138 } 139 140 /// Generate a libcall taking the given operands as arguments and returning a 141 /// result of type RetVT. 142 std::pair<SDValue, SDValue> 143 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 144 ArrayRef<SDValue> Ops, 145 MakeLibCallOptions CallOptions, 146 const SDLoc &dl, 147 SDValue InChain) const { 148 if (!InChain) 149 InChain = DAG.getEntryNode(); 150 151 TargetLowering::ArgListTy Args; 152 Args.reserve(Ops.size()); 153 154 TargetLowering::ArgListEntry Entry; 155 for (unsigned i = 0; i < Ops.size(); ++i) { 156 SDValue NewOp = Ops[i]; 157 Entry.Node = NewOp; 158 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 159 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 160 CallOptions.IsSExt); 161 Entry.IsZExt = !Entry.IsSExt; 162 163 if (CallOptions.IsSoften && 164 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 165 Entry.IsSExt = Entry.IsZExt = false; 166 } 167 Args.push_back(Entry); 168 } 169 170 if (LC == RTLIB::UNKNOWN_LIBCALL) 171 report_fatal_error("Unsupported library call operation!"); 172 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 173 getPointerTy(DAG.getDataLayout())); 174 175 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 176 TargetLowering::CallLoweringInfo CLI(DAG); 177 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 178 bool zeroExtend = !signExtend; 179 180 if (CallOptions.IsSoften && 181 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 182 signExtend = zeroExtend = false; 183 } 184 185 CLI.setDebugLoc(dl) 186 .setChain(InChain) 187 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 188 .setNoReturn(CallOptions.DoesNotReturn) 189 .setDiscardResult(!CallOptions.IsReturnValueUsed) 190 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 191 .setSExtResult(signExtend) 192 .setZExtResult(zeroExtend); 193 return LowerCallTo(CLI); 194 } 195 196 bool TargetLowering::findOptimalMemOpLowering( 197 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 198 unsigned SrcAS, const AttributeList &FuncAttributes) const { 199 if (Limit != ~unsigned(0) && Op.isMemcpyWithFixedDstAlign() && 200 Op.getSrcAlign() < Op.getDstAlign()) 201 return false; 202 203 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 204 205 if (VT == MVT::Other) { 206 // Use the largest integer type whose alignment constraints are satisfied. 207 // We only need to check DstAlign here as SrcAlign is always greater or 208 // equal to DstAlign (or zero). 209 VT = MVT::i64; 210 if (Op.isFixedDstAlign()) 211 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && 212 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) 213 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 214 assert(VT.isInteger()); 215 216 // Find the largest legal integer type. 217 MVT LVT = MVT::i64; 218 while (!isTypeLegal(LVT)) 219 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 220 assert(LVT.isInteger()); 221 222 // If the type we've chosen is larger than the largest legal integer type 223 // then use that instead. 224 if (VT.bitsGT(LVT)) 225 VT = LVT; 226 } 227 228 unsigned NumMemOps = 0; 229 uint64_t Size = Op.size(); 230 while (Size) { 231 unsigned VTSize = VT.getSizeInBits() / 8; 232 while (VTSize > Size) { 233 // For now, only use non-vector load / store's for the left-over pieces. 234 EVT NewVT = VT; 235 unsigned NewVTSize; 236 237 bool Found = false; 238 if (VT.isVector() || VT.isFloatingPoint()) { 239 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 240 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 241 isSafeMemOpType(NewVT.getSimpleVT())) 242 Found = true; 243 else if (NewVT == MVT::i64 && 244 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 245 isSafeMemOpType(MVT::f64)) { 246 // i64 is usually not legal on 32-bit targets, but f64 may be. 247 NewVT = MVT::f64; 248 Found = true; 249 } 250 } 251 252 if (!Found) { 253 do { 254 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 255 if (NewVT == MVT::i8) 256 break; 257 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 258 } 259 NewVTSize = NewVT.getSizeInBits() / 8; 260 261 // If the new VT cannot cover all of the remaining bits, then consider 262 // issuing a (or a pair of) unaligned and overlapping load / store. 263 bool Fast; 264 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 265 allowsMisalignedMemoryAccesses( 266 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), 267 MachineMemOperand::MONone, &Fast) && 268 Fast) 269 VTSize = Size; 270 else { 271 VT = NewVT; 272 VTSize = NewVTSize; 273 } 274 } 275 276 if (++NumMemOps > Limit) 277 return false; 278 279 MemOps.push_back(VT); 280 Size -= VTSize; 281 } 282 283 return true; 284 } 285 286 /// Soften the operands of a comparison. This code is shared among BR_CC, 287 /// SELECT_CC, and SETCC handlers. 288 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 289 SDValue &NewLHS, SDValue &NewRHS, 290 ISD::CondCode &CCCode, 291 const SDLoc &dl, const SDValue OldLHS, 292 const SDValue OldRHS) const { 293 SDValue Chain; 294 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 295 OldRHS, Chain); 296 } 297 298 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 299 SDValue &NewLHS, SDValue &NewRHS, 300 ISD::CondCode &CCCode, 301 const SDLoc &dl, const SDValue OldLHS, 302 const SDValue OldRHS, 303 SDValue &Chain, 304 bool IsSignaling) const { 305 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 306 // not supporting it. We can update this code when libgcc provides such 307 // functions. 308 309 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 310 && "Unsupported setcc type!"); 311 312 // Expand into one or more soft-fp libcall(s). 313 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 314 bool ShouldInvertCC = false; 315 switch (CCCode) { 316 case ISD::SETEQ: 317 case ISD::SETOEQ: 318 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 319 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 320 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 321 break; 322 case ISD::SETNE: 323 case ISD::SETUNE: 324 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 325 (VT == MVT::f64) ? RTLIB::UNE_F64 : 326 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 327 break; 328 case ISD::SETGE: 329 case ISD::SETOGE: 330 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 331 (VT == MVT::f64) ? RTLIB::OGE_F64 : 332 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 333 break; 334 case ISD::SETLT: 335 case ISD::SETOLT: 336 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 337 (VT == MVT::f64) ? RTLIB::OLT_F64 : 338 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 339 break; 340 case ISD::SETLE: 341 case ISD::SETOLE: 342 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 343 (VT == MVT::f64) ? RTLIB::OLE_F64 : 344 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 345 break; 346 case ISD::SETGT: 347 case ISD::SETOGT: 348 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 349 (VT == MVT::f64) ? RTLIB::OGT_F64 : 350 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 351 break; 352 case ISD::SETO: 353 ShouldInvertCC = true; 354 LLVM_FALLTHROUGH; 355 case ISD::SETUO: 356 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 357 (VT == MVT::f64) ? RTLIB::UO_F64 : 358 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 359 break; 360 case ISD::SETONE: 361 // SETONE = O && UNE 362 ShouldInvertCC = true; 363 LLVM_FALLTHROUGH; 364 case ISD::SETUEQ: 365 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 366 (VT == MVT::f64) ? RTLIB::UO_F64 : 367 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 368 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 369 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 370 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 371 break; 372 default: 373 // Invert CC for unordered comparisons 374 ShouldInvertCC = true; 375 switch (CCCode) { 376 case ISD::SETULT: 377 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 378 (VT == MVT::f64) ? RTLIB::OGE_F64 : 379 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 380 break; 381 case ISD::SETULE: 382 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 383 (VT == MVT::f64) ? RTLIB::OGT_F64 : 384 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 385 break; 386 case ISD::SETUGT: 387 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 388 (VT == MVT::f64) ? RTLIB::OLE_F64 : 389 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 390 break; 391 case ISD::SETUGE: 392 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 393 (VT == MVT::f64) ? RTLIB::OLT_F64 : 394 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 395 break; 396 default: llvm_unreachable("Do not know how to soften this setcc!"); 397 } 398 } 399 400 // Use the target specific return value for comparions lib calls. 401 EVT RetVT = getCmpLibcallReturnType(); 402 SDValue Ops[2] = {NewLHS, NewRHS}; 403 TargetLowering::MakeLibCallOptions CallOptions; 404 EVT OpsVT[2] = { OldLHS.getValueType(), 405 OldRHS.getValueType() }; 406 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 407 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 408 NewLHS = Call.first; 409 NewRHS = DAG.getConstant(0, dl, RetVT); 410 411 CCCode = getCmpLibcallCC(LC1); 412 if (ShouldInvertCC) { 413 assert(RetVT.isInteger()); 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 } 416 417 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 418 // Update Chain. 419 Chain = Call.second; 420 } else { 421 EVT SetCCVT = 422 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 423 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 424 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 425 CCCode = getCmpLibcallCC(LC2); 426 if (ShouldInvertCC) 427 CCCode = getSetCCInverse(CCCode, RetVT); 428 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 429 if (Chain) 430 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 431 Call2.second); 432 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 433 Tmp.getValueType(), Tmp, NewLHS); 434 NewRHS = SDValue(); 435 } 436 } 437 438 /// Return the entry encoding for a jump table in the current function. The 439 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 440 unsigned TargetLowering::getJumpTableEncoding() const { 441 // In non-pic modes, just use the address of a block. 442 if (!isPositionIndependent()) 443 return MachineJumpTableInfo::EK_BlockAddress; 444 445 // In PIC mode, if the target supports a GPRel32 directive, use it. 446 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 447 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 448 449 // Otherwise, use a label difference. 450 return MachineJumpTableInfo::EK_LabelDifference32; 451 } 452 453 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 454 SelectionDAG &DAG) const { 455 // If our PIC model is GP relative, use the global offset table as the base. 456 unsigned JTEncoding = getJumpTableEncoding(); 457 458 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 459 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 460 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 461 462 return Table; 463 } 464 465 /// This returns the relocation base for the given PIC jumptable, the same as 466 /// getPICJumpTableRelocBase, but as an MCExpr. 467 const MCExpr * 468 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 469 unsigned JTI,MCContext &Ctx) const{ 470 // The normal PIC reloc base is the label at the start of the jump table. 471 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 472 } 473 474 bool 475 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 476 const TargetMachine &TM = getTargetMachine(); 477 const GlobalValue *GV = GA->getGlobal(); 478 479 // If the address is not even local to this DSO we will have to load it from 480 // a got and then add the offset. 481 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 482 return false; 483 484 // If the code is position independent we will have to add a base register. 485 if (isPositionIndependent()) 486 return false; 487 488 // Otherwise we can do it. 489 return true; 490 } 491 492 //===----------------------------------------------------------------------===// 493 // Optimization Methods 494 //===----------------------------------------------------------------------===// 495 496 /// If the specified instruction has a constant integer operand and there are 497 /// bits set in that constant that are not demanded, then clear those bits and 498 /// return true. 499 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 500 const APInt &DemandedBits, 501 const APInt &DemandedElts, 502 TargetLoweringOpt &TLO) const { 503 SDLoc DL(Op); 504 unsigned Opcode = Op.getOpcode(); 505 506 // Do target-specific constant optimization. 507 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 508 return TLO.New.getNode(); 509 510 // FIXME: ISD::SELECT, ISD::SELECT_CC 511 switch (Opcode) { 512 default: 513 break; 514 case ISD::XOR: 515 case ISD::AND: 516 case ISD::OR: { 517 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 518 if (!Op1C || Op1C->isOpaque()) 519 return false; 520 521 // If this is a 'not' op, don't touch it because that's a canonical form. 522 const APInt &C = Op1C->getAPIntValue(); 523 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 524 return false; 525 526 if (!C.isSubsetOf(DemandedBits)) { 527 EVT VT = Op.getValueType(); 528 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 529 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 530 return TLO.CombineTo(Op, NewOp); 531 } 532 533 break; 534 } 535 } 536 537 return false; 538 } 539 540 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 541 const APInt &DemandedBits, 542 TargetLoweringOpt &TLO) const { 543 EVT VT = Op.getValueType(); 544 APInt DemandedElts = VT.isVector() 545 ? APInt::getAllOnes(VT.getVectorNumElements()) 546 : APInt(1, 1); 547 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 548 } 549 550 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 551 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 552 /// generalized for targets with other types of implicit widening casts. 553 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 554 const APInt &Demanded, 555 TargetLoweringOpt &TLO) const { 556 assert(Op.getNumOperands() == 2 && 557 "ShrinkDemandedOp only supports binary operators!"); 558 assert(Op.getNode()->getNumValues() == 1 && 559 "ShrinkDemandedOp only supports nodes with one result!"); 560 561 SelectionDAG &DAG = TLO.DAG; 562 SDLoc dl(Op); 563 564 // Early return, as this function cannot handle vector types. 565 if (Op.getValueType().isVector()) 566 return false; 567 568 // Don't do this if the node has another user, which may require the 569 // full value. 570 if (!Op.getNode()->hasOneUse()) 571 return false; 572 573 // Search for the smallest integer type with free casts to and from 574 // Op's type. For expedience, just check power-of-2 integer types. 575 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 576 unsigned DemandedSize = Demanded.getActiveBits(); 577 unsigned SmallVTBits = DemandedSize; 578 if (!isPowerOf2_32(SmallVTBits)) 579 SmallVTBits = NextPowerOf2(SmallVTBits); 580 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 581 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 582 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 583 TLI.isZExtFree(SmallVT, Op.getValueType())) { 584 // We found a type with free casts. 585 SDValue X = DAG.getNode( 586 Op.getOpcode(), dl, SmallVT, 587 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 588 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 589 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 590 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 591 return TLO.CombineTo(Op, Z); 592 } 593 } 594 return false; 595 } 596 597 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 598 DAGCombinerInfo &DCI) const { 599 SelectionDAG &DAG = DCI.DAG; 600 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 601 !DCI.isBeforeLegalizeOps()); 602 KnownBits Known; 603 604 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 605 if (Simplified) { 606 DCI.AddToWorklist(Op.getNode()); 607 DCI.CommitTargetLoweringOpt(TLO); 608 } 609 return Simplified; 610 } 611 612 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 613 const APInt &DemandedElts, 614 DAGCombinerInfo &DCI) const { 615 SelectionDAG &DAG = DCI.DAG; 616 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 617 !DCI.isBeforeLegalizeOps()); 618 KnownBits Known; 619 620 bool Simplified = 621 SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO); 622 if (Simplified) { 623 DCI.AddToWorklist(Op.getNode()); 624 DCI.CommitTargetLoweringOpt(TLO); 625 } 626 return Simplified; 627 } 628 629 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 630 KnownBits &Known, 631 TargetLoweringOpt &TLO, 632 unsigned Depth, 633 bool AssumeSingleUse) const { 634 EVT VT = Op.getValueType(); 635 636 // TODO: We can probably do more work on calculating the known bits and 637 // simplifying the operations for scalable vectors, but for now we just 638 // bail out. 639 if (VT.isScalableVector()) { 640 // Pretend we don't know anything for now. 641 Known = KnownBits(DemandedBits.getBitWidth()); 642 return false; 643 } 644 645 APInt DemandedElts = VT.isVector() 646 ? APInt::getAllOnes(VT.getVectorNumElements()) 647 : APInt(1, 1); 648 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 649 AssumeSingleUse); 650 } 651 652 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 653 // TODO: Under what circumstances can we create nodes? Constant folding? 654 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 655 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 656 SelectionDAG &DAG, unsigned Depth) const { 657 // Limit search depth. 658 if (Depth >= SelectionDAG::MaxRecursionDepth) 659 return SDValue(); 660 661 // Ignore UNDEFs. 662 if (Op.isUndef()) 663 return SDValue(); 664 665 // Not demanding any bits/elts from Op. 666 if (DemandedBits == 0 || DemandedElts == 0) 667 return DAG.getUNDEF(Op.getValueType()); 668 669 bool IsLE = DAG.getDataLayout().isLittleEndian(); 670 unsigned NumElts = DemandedElts.getBitWidth(); 671 unsigned BitWidth = DemandedBits.getBitWidth(); 672 KnownBits LHSKnown, RHSKnown; 673 switch (Op.getOpcode()) { 674 case ISD::BITCAST: { 675 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 676 EVT SrcVT = Src.getValueType(); 677 EVT DstVT = Op.getValueType(); 678 if (SrcVT == DstVT) 679 return Src; 680 681 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 682 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 683 if (NumSrcEltBits == NumDstEltBits) 684 if (SDValue V = SimplifyMultipleUseDemandedBits( 685 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 686 return DAG.getBitcast(DstVT, V); 687 688 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0) { 689 unsigned Scale = NumDstEltBits / NumSrcEltBits; 690 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 691 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 692 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 693 for (unsigned i = 0; i != Scale; ++i) { 694 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 695 unsigned BitOffset = EltOffset * NumSrcEltBits; 696 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 697 if (!Sub.isZero()) { 698 DemandedSrcBits |= Sub; 699 for (unsigned j = 0; j != NumElts; ++j) 700 if (DemandedElts[j]) 701 DemandedSrcElts.setBit((j * Scale) + i); 702 } 703 } 704 705 if (SDValue V = SimplifyMultipleUseDemandedBits( 706 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 707 return DAG.getBitcast(DstVT, V); 708 } 709 710 // TODO - bigendian once we have test coverage. 711 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) { 712 unsigned Scale = NumSrcEltBits / NumDstEltBits; 713 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 714 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 715 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 716 for (unsigned i = 0; i != NumElts; ++i) 717 if (DemandedElts[i]) { 718 unsigned Offset = (i % Scale) * NumDstEltBits; 719 DemandedSrcBits.insertBits(DemandedBits, Offset); 720 DemandedSrcElts.setBit(i / Scale); 721 } 722 723 if (SDValue V = SimplifyMultipleUseDemandedBits( 724 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 725 return DAG.getBitcast(DstVT, V); 726 } 727 728 break; 729 } 730 case ISD::AND: { 731 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 732 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 733 734 // If all of the demanded bits are known 1 on one side, return the other. 735 // These bits cannot contribute to the result of the 'and' in this 736 // context. 737 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 738 return Op.getOperand(0); 739 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 740 return Op.getOperand(1); 741 break; 742 } 743 case ISD::OR: { 744 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 745 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 746 747 // If all of the demanded bits are known zero on one side, return the 748 // other. These bits cannot contribute to the result of the 'or' in this 749 // context. 750 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 751 return Op.getOperand(0); 752 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 753 return Op.getOperand(1); 754 break; 755 } 756 case ISD::XOR: { 757 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 758 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 759 760 // If all of the demanded bits are known zero on one side, return the 761 // other. 762 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 763 return Op.getOperand(0); 764 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 765 return Op.getOperand(1); 766 break; 767 } 768 case ISD::SHL: { 769 // If we are only demanding sign bits then we can use the shift source 770 // directly. 771 if (const APInt *MaxSA = 772 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 773 SDValue Op0 = Op.getOperand(0); 774 unsigned ShAmt = MaxSA->getZExtValue(); 775 unsigned NumSignBits = 776 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 777 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 778 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 779 return Op0; 780 } 781 break; 782 } 783 case ISD::SETCC: { 784 SDValue Op0 = Op.getOperand(0); 785 SDValue Op1 = Op.getOperand(1); 786 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 787 // If (1) we only need the sign-bit, (2) the setcc operands are the same 788 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 789 // -1, we may be able to bypass the setcc. 790 if (DemandedBits.isSignMask() && 791 Op0.getScalarValueSizeInBits() == BitWidth && 792 getBooleanContents(Op0.getValueType()) == 793 BooleanContent::ZeroOrNegativeOneBooleanContent) { 794 // If we're testing X < 0, then this compare isn't needed - just use X! 795 // FIXME: We're limiting to integer types here, but this should also work 796 // if we don't care about FP signed-zero. The use of SETLT with FP means 797 // that we don't care about NaNs. 798 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 799 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 800 return Op0; 801 } 802 break; 803 } 804 case ISD::SIGN_EXTEND_INREG: { 805 // If none of the extended bits are demanded, eliminate the sextinreg. 806 SDValue Op0 = Op.getOperand(0); 807 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 808 unsigned ExBits = ExVT.getScalarSizeInBits(); 809 if (DemandedBits.getActiveBits() <= ExBits) 810 return Op0; 811 // If the input is already sign extended, just drop the extension. 812 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 813 if (NumSignBits >= (BitWidth - ExBits + 1)) 814 return Op0; 815 break; 816 } 817 case ISD::ANY_EXTEND_VECTOR_INREG: 818 case ISD::SIGN_EXTEND_VECTOR_INREG: 819 case ISD::ZERO_EXTEND_VECTOR_INREG: { 820 // If we only want the lowest element and none of extended bits, then we can 821 // return the bitcasted source vector. 822 SDValue Src = Op.getOperand(0); 823 EVT SrcVT = Src.getValueType(); 824 EVT DstVT = Op.getValueType(); 825 if (IsLE && DemandedElts == 1 && 826 DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 827 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 828 return DAG.getBitcast(DstVT, Src); 829 } 830 break; 831 } 832 case ISD::INSERT_VECTOR_ELT: { 833 // If we don't demand the inserted element, return the base vector. 834 SDValue Vec = Op.getOperand(0); 835 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 836 EVT VecVT = Vec.getValueType(); 837 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 838 !DemandedElts[CIdx->getZExtValue()]) 839 return Vec; 840 break; 841 } 842 case ISD::INSERT_SUBVECTOR: { 843 SDValue Vec = Op.getOperand(0); 844 SDValue Sub = Op.getOperand(1); 845 uint64_t Idx = Op.getConstantOperandVal(2); 846 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 847 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 848 // If we don't demand the inserted subvector, return the base vector. 849 if (DemandedSubElts == 0) 850 return Vec; 851 // If this simply widens the lowest subvector, see if we can do it earlier. 852 if (Idx == 0 && Vec.isUndef()) { 853 if (SDValue NewSub = SimplifyMultipleUseDemandedBits( 854 Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1)) 855 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 856 Op.getOperand(0), NewSub, Op.getOperand(2)); 857 } 858 break; 859 } 860 case ISD::VECTOR_SHUFFLE: { 861 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 862 863 // If all the demanded elts are from one operand and are inline, 864 // then we can use the operand directly. 865 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 866 for (unsigned i = 0; i != NumElts; ++i) { 867 int M = ShuffleMask[i]; 868 if (M < 0 || !DemandedElts[i]) 869 continue; 870 AllUndef = false; 871 IdentityLHS &= (M == (int)i); 872 IdentityRHS &= ((M - NumElts) == i); 873 } 874 875 if (AllUndef) 876 return DAG.getUNDEF(Op.getValueType()); 877 if (IdentityLHS) 878 return Op.getOperand(0); 879 if (IdentityRHS) 880 return Op.getOperand(1); 881 break; 882 } 883 default: 884 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 885 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 886 Op, DemandedBits, DemandedElts, DAG, Depth)) 887 return V; 888 break; 889 } 890 return SDValue(); 891 } 892 893 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 894 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 895 unsigned Depth) const { 896 EVT VT = Op.getValueType(); 897 APInt DemandedElts = VT.isVector() 898 ? APInt::getAllOnes(VT.getVectorNumElements()) 899 : APInt(1, 1); 900 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 901 Depth); 902 } 903 904 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 905 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 906 unsigned Depth) const { 907 APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits()); 908 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 909 Depth); 910 } 911 912 // Attempt to form ext(avgfloor(A, B)) from shr(add(ext(A), ext(B)), 1). 913 // or to form ext(avgceil(A, B)) from shr(add(ext(A), ext(B), 1), 1). 914 static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG, 915 const TargetLowering &TLI, 916 const APInt &DemandedBits, 917 const APInt &DemandedElts, 918 unsigned Depth) { 919 assert((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) && 920 "SRL or SRA node is required here!"); 921 // Is the right shift using an immediate value of 1? 922 ConstantSDNode *N1C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 923 if (!N1C || !N1C->isOne()) 924 return SDValue(); 925 926 // We are looking for an avgfloor 927 // add(ext, ext) 928 // or one of these as a avgceil 929 // add(add(ext, ext), 1) 930 // add(add(ext, 1), ext) 931 // add(ext, add(ext, 1)) 932 SDValue Add = Op.getOperand(0); 933 if (Add.getOpcode() != ISD::ADD) 934 return SDValue(); 935 936 SDValue ExtOpA = Add.getOperand(0); 937 SDValue ExtOpB = Add.getOperand(1); 938 auto MatchOperands = [&](SDValue Op1, SDValue Op2, SDValue Op3) { 939 ConstantSDNode *ConstOp; 940 if ((ConstOp = isConstOrConstSplat(Op1, DemandedElts)) && 941 ConstOp->isOne()) { 942 ExtOpA = Op2; 943 ExtOpB = Op3; 944 return true; 945 } 946 if ((ConstOp = isConstOrConstSplat(Op2, DemandedElts)) && 947 ConstOp->isOne()) { 948 ExtOpA = Op1; 949 ExtOpB = Op3; 950 return true; 951 } 952 if ((ConstOp = isConstOrConstSplat(Op3, DemandedElts)) && 953 ConstOp->isOne()) { 954 ExtOpA = Op1; 955 ExtOpB = Op2; 956 return true; 957 } 958 return false; 959 }; 960 bool IsCeil = 961 (ExtOpA.getOpcode() == ISD::ADD && 962 MatchOperands(ExtOpA.getOperand(0), ExtOpA.getOperand(1), ExtOpB)) || 963 (ExtOpB.getOpcode() == ISD::ADD && 964 MatchOperands(ExtOpB.getOperand(0), ExtOpB.getOperand(1), ExtOpA)); 965 966 // If the shift is signed (sra): 967 // - Needs >= 2 sign bit for both operands. 968 // - Needs >= 2 zero bits. 969 // If the shift is unsigned (srl): 970 // - Needs >= 1 zero bit for both operands. 971 // - Needs 1 demanded bit zero and >= 2 sign bits. 972 unsigned ShiftOpc = Op.getOpcode(); 973 bool IsSigned = false; 974 unsigned KnownBits; 975 unsigned NumSignedA = DAG.ComputeNumSignBits(ExtOpA, DemandedElts, Depth); 976 unsigned NumSignedB = DAG.ComputeNumSignBits(ExtOpB, DemandedElts, Depth); 977 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1; 978 unsigned NumZeroA = 979 DAG.computeKnownBits(ExtOpA, DemandedElts, Depth).countMinLeadingZeros(); 980 unsigned NumZeroB = 981 DAG.computeKnownBits(ExtOpB, DemandedElts, Depth).countMinLeadingZeros(); 982 unsigned NumZero = std::min(NumZeroA, NumZeroB); 983 984 switch (ShiftOpc) { 985 default: 986 llvm_unreachable("Unexpected ShiftOpc in combineShiftToAVG"); 987 case ISD::SRA: { 988 if (NumZero >= 2 && NumSigned < NumZero) { 989 IsSigned = false; 990 KnownBits = NumZero; 991 break; 992 } 993 if (NumSigned >= 1) { 994 IsSigned = true; 995 KnownBits = NumSigned; 996 break; 997 } 998 return SDValue(); 999 } 1000 case ISD::SRL: { 1001 if (NumZero >= 1 && NumSigned < NumZero) { 1002 IsSigned = false; 1003 KnownBits = NumZero; 1004 break; 1005 } 1006 if (NumSigned >= 1 && DemandedBits.isSignBitClear()) { 1007 IsSigned = true; 1008 KnownBits = NumSigned; 1009 break; 1010 } 1011 return SDValue(); 1012 } 1013 } 1014 1015 unsigned AVGOpc = IsCeil ? (IsSigned ? ISD::AVGCEILS : ISD::AVGCEILU) 1016 : (IsSigned ? ISD::AVGFLOORS : ISD::AVGFLOORU); 1017 1018 // Find the smallest power-2 type that is legal for this vector size and 1019 // operation, given the original type size and the number of known sign/zero 1020 // bits. 1021 EVT VT = Op.getValueType(); 1022 unsigned MinWidth = 1023 std::max<unsigned>(VT.getScalarSizeInBits() - KnownBits, 8); 1024 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), PowerOf2Ceil(MinWidth)); 1025 if (VT.isVector()) 1026 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount()); 1027 if (!TLI.isOperationLegalOrCustom(AVGOpc, NVT)) 1028 return SDValue(); 1029 1030 SDLoc DL(Op); 1031 SDValue ResultAVG = 1032 DAG.getNode(AVGOpc, DL, NVT, DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpA), 1033 DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpB)); 1034 return DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, 1035 ResultAVG); 1036 } 1037 1038 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 1039 /// result of Op are ever used downstream. If we can use this information to 1040 /// simplify Op, create a new simplified DAG node and return true, returning the 1041 /// original and new nodes in Old and New. Otherwise, analyze the expression and 1042 /// return a mask of Known bits for the expression (used to simplify the 1043 /// caller). The Known bits may only be accurate for those bits in the 1044 /// OriginalDemandedBits and OriginalDemandedElts. 1045 bool TargetLowering::SimplifyDemandedBits( 1046 SDValue Op, const APInt &OriginalDemandedBits, 1047 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 1048 unsigned Depth, bool AssumeSingleUse) const { 1049 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 1050 assert(Op.getScalarValueSizeInBits() == BitWidth && 1051 "Mask size mismatches value type size!"); 1052 1053 // Don't know anything. 1054 Known = KnownBits(BitWidth); 1055 1056 // TODO: We can probably do more work on calculating the known bits and 1057 // simplifying the operations for scalable vectors, but for now we just 1058 // bail out. 1059 if (Op.getValueType().isScalableVector()) 1060 return false; 1061 1062 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 1063 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 1064 assert((!Op.getValueType().isVector() || 1065 NumElts == Op.getValueType().getVectorNumElements()) && 1066 "Unexpected vector size"); 1067 1068 APInt DemandedBits = OriginalDemandedBits; 1069 APInt DemandedElts = OriginalDemandedElts; 1070 SDLoc dl(Op); 1071 auto &DL = TLO.DAG.getDataLayout(); 1072 1073 // Undef operand. 1074 if (Op.isUndef()) 1075 return false; 1076 1077 if (Op.getOpcode() == ISD::Constant) { 1078 // We know all of the bits for a constant! 1079 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 1080 return false; 1081 } 1082 1083 if (Op.getOpcode() == ISD::ConstantFP) { 1084 // We know all of the bits for a floating point constant! 1085 Known = KnownBits::makeConstant( 1086 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 1087 return false; 1088 } 1089 1090 // Other users may use these bits. 1091 EVT VT = Op.getValueType(); 1092 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 1093 if (Depth != 0) { 1094 // If not at the root, Just compute the Known bits to 1095 // simplify things downstream. 1096 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1097 return false; 1098 } 1099 // If this is the root being simplified, allow it to have multiple uses, 1100 // just set the DemandedBits/Elts to all bits. 1101 DemandedBits = APInt::getAllOnes(BitWidth); 1102 DemandedElts = APInt::getAllOnes(NumElts); 1103 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 1104 // Not demanding any bits/elts from Op. 1105 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1106 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 1107 // Limit search depth. 1108 return false; 1109 } 1110 1111 KnownBits Known2; 1112 switch (Op.getOpcode()) { 1113 case ISD::TargetConstant: 1114 llvm_unreachable("Can't simplify this node"); 1115 case ISD::SCALAR_TO_VECTOR: { 1116 if (!DemandedElts[0]) 1117 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1118 1119 KnownBits SrcKnown; 1120 SDValue Src = Op.getOperand(0); 1121 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 1122 APInt SrcDemandedBits = DemandedBits.zext(SrcBitWidth); 1123 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 1124 return true; 1125 1126 // Upper elements are undef, so only get the knownbits if we just demand 1127 // the bottom element. 1128 if (DemandedElts == 1) 1129 Known = SrcKnown.anyextOrTrunc(BitWidth); 1130 break; 1131 } 1132 case ISD::BUILD_VECTOR: 1133 // Collect the known bits that are shared by every demanded element. 1134 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 1135 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1136 return false; // Don't fall through, will infinitely loop. 1137 case ISD::LOAD: { 1138 auto *LD = cast<LoadSDNode>(Op); 1139 if (getTargetConstantFromLoad(LD)) { 1140 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1141 return false; // Don't fall through, will infinitely loop. 1142 } 1143 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 1144 // If this is a ZEXTLoad and we are looking at the loaded value. 1145 EVT MemVT = LD->getMemoryVT(); 1146 unsigned MemBits = MemVT.getScalarSizeInBits(); 1147 Known.Zero.setBitsFrom(MemBits); 1148 return false; // Don't fall through, will infinitely loop. 1149 } 1150 break; 1151 } 1152 case ISD::INSERT_VECTOR_ELT: { 1153 SDValue Vec = Op.getOperand(0); 1154 SDValue Scl = Op.getOperand(1); 1155 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 1156 EVT VecVT = Vec.getValueType(); 1157 1158 // If index isn't constant, assume we need all vector elements AND the 1159 // inserted element. 1160 APInt DemandedVecElts(DemandedElts); 1161 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 1162 unsigned Idx = CIdx->getZExtValue(); 1163 DemandedVecElts.clearBit(Idx); 1164 1165 // Inserted element is not required. 1166 if (!DemandedElts[Idx]) 1167 return TLO.CombineTo(Op, Vec); 1168 } 1169 1170 KnownBits KnownScl; 1171 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1172 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1173 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1174 return true; 1175 1176 Known = KnownScl.anyextOrTrunc(BitWidth); 1177 1178 KnownBits KnownVec; 1179 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1180 Depth + 1)) 1181 return true; 1182 1183 if (!!DemandedVecElts) 1184 Known = KnownBits::commonBits(Known, KnownVec); 1185 1186 return false; 1187 } 1188 case ISD::INSERT_SUBVECTOR: { 1189 // Demand any elements from the subvector and the remainder from the src its 1190 // inserted into. 1191 SDValue Src = Op.getOperand(0); 1192 SDValue Sub = Op.getOperand(1); 1193 uint64_t Idx = Op.getConstantOperandVal(2); 1194 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1195 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1196 APInt DemandedSrcElts = DemandedElts; 1197 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 1198 1199 KnownBits KnownSub, KnownSrc; 1200 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1201 Depth + 1)) 1202 return true; 1203 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1204 Depth + 1)) 1205 return true; 1206 1207 Known.Zero.setAllBits(); 1208 Known.One.setAllBits(); 1209 if (!!DemandedSubElts) 1210 Known = KnownBits::commonBits(Known, KnownSub); 1211 if (!!DemandedSrcElts) 1212 Known = KnownBits::commonBits(Known, KnownSrc); 1213 1214 // Attempt to avoid multi-use src if we don't need anything from it. 1215 if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() || 1216 !DemandedSrcElts.isAllOnes()) { 1217 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1218 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1219 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1220 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1221 if (NewSub || NewSrc) { 1222 NewSub = NewSub ? NewSub : Sub; 1223 NewSrc = NewSrc ? NewSrc : Src; 1224 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1225 Op.getOperand(2)); 1226 return TLO.CombineTo(Op, NewOp); 1227 } 1228 } 1229 break; 1230 } 1231 case ISD::EXTRACT_SUBVECTOR: { 1232 // Offset the demanded elts by the subvector index. 1233 SDValue Src = Op.getOperand(0); 1234 if (Src.getValueType().isScalableVector()) 1235 break; 1236 uint64_t Idx = Op.getConstantOperandVal(1); 1237 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1238 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 1239 1240 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1241 Depth + 1)) 1242 return true; 1243 1244 // Attempt to avoid multi-use src if we don't need anything from it. 1245 if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 1246 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1247 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1248 if (DemandedSrc) { 1249 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1250 Op.getOperand(1)); 1251 return TLO.CombineTo(Op, NewOp); 1252 } 1253 } 1254 break; 1255 } 1256 case ISD::CONCAT_VECTORS: { 1257 Known.Zero.setAllBits(); 1258 Known.One.setAllBits(); 1259 EVT SubVT = Op.getOperand(0).getValueType(); 1260 unsigned NumSubVecs = Op.getNumOperands(); 1261 unsigned NumSubElts = SubVT.getVectorNumElements(); 1262 for (unsigned i = 0; i != NumSubVecs; ++i) { 1263 APInt DemandedSubElts = 1264 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1265 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1266 Known2, TLO, Depth + 1)) 1267 return true; 1268 // Known bits are shared by every demanded subvector element. 1269 if (!!DemandedSubElts) 1270 Known = KnownBits::commonBits(Known, Known2); 1271 } 1272 break; 1273 } 1274 case ISD::VECTOR_SHUFFLE: { 1275 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1276 1277 // Collect demanded elements from shuffle operands.. 1278 APInt DemandedLHS(NumElts, 0); 1279 APInt DemandedRHS(NumElts, 0); 1280 for (unsigned i = 0; i != NumElts; ++i) { 1281 if (!DemandedElts[i]) 1282 continue; 1283 int M = ShuffleMask[i]; 1284 if (M < 0) { 1285 // For UNDEF elements, we don't know anything about the common state of 1286 // the shuffle result. 1287 DemandedLHS.clearAllBits(); 1288 DemandedRHS.clearAllBits(); 1289 break; 1290 } 1291 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1292 if (M < (int)NumElts) 1293 DemandedLHS.setBit(M); 1294 else 1295 DemandedRHS.setBit(M - NumElts); 1296 } 1297 1298 if (!!DemandedLHS || !!DemandedRHS) { 1299 SDValue Op0 = Op.getOperand(0); 1300 SDValue Op1 = Op.getOperand(1); 1301 1302 Known.Zero.setAllBits(); 1303 Known.One.setAllBits(); 1304 if (!!DemandedLHS) { 1305 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1306 Depth + 1)) 1307 return true; 1308 Known = KnownBits::commonBits(Known, Known2); 1309 } 1310 if (!!DemandedRHS) { 1311 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1312 Depth + 1)) 1313 return true; 1314 Known = KnownBits::commonBits(Known, Known2); 1315 } 1316 1317 // Attempt to avoid multi-use ops if we don't need anything from them. 1318 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1319 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1320 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1321 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1322 if (DemandedOp0 || DemandedOp1) { 1323 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1324 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1325 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1326 return TLO.CombineTo(Op, NewOp); 1327 } 1328 } 1329 break; 1330 } 1331 case ISD::AND: { 1332 SDValue Op0 = Op.getOperand(0); 1333 SDValue Op1 = Op.getOperand(1); 1334 1335 // If the RHS is a constant, check to see if the LHS would be zero without 1336 // using the bits from the RHS. Below, we use knowledge about the RHS to 1337 // simplify the LHS, here we're using information from the LHS to simplify 1338 // the RHS. 1339 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1340 // Do not increment Depth here; that can cause an infinite loop. 1341 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1342 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1343 if ((LHSKnown.Zero & DemandedBits) == 1344 (~RHSC->getAPIntValue() & DemandedBits)) 1345 return TLO.CombineTo(Op, Op0); 1346 1347 // If any of the set bits in the RHS are known zero on the LHS, shrink 1348 // the constant. 1349 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1350 DemandedElts, TLO)) 1351 return true; 1352 1353 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1354 // constant, but if this 'and' is only clearing bits that were just set by 1355 // the xor, then this 'and' can be eliminated by shrinking the mask of 1356 // the xor. For example, for a 32-bit X: 1357 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1358 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1359 LHSKnown.One == ~RHSC->getAPIntValue()) { 1360 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1361 return TLO.CombineTo(Op, Xor); 1362 } 1363 } 1364 1365 // AND(INSERT_SUBVECTOR(C,X,I),M) -> INSERT_SUBVECTOR(AND(C,M),X,I) 1366 // iff 'C' is Undef/Constant and AND(X,M) == X (for DemandedBits). 1367 if (Op0.getOpcode() == ISD::INSERT_SUBVECTOR && 1368 (Op0.getOperand(0).isUndef() || 1369 ISD::isBuildVectorOfConstantSDNodes(Op0.getOperand(0).getNode())) && 1370 Op0->hasOneUse()) { 1371 unsigned NumSubElts = 1372 Op0.getOperand(1).getValueType().getVectorNumElements(); 1373 unsigned SubIdx = Op0.getConstantOperandVal(2); 1374 APInt DemandedSub = 1375 APInt::getBitsSet(NumElts, SubIdx, SubIdx + NumSubElts); 1376 KnownBits KnownSubMask = 1377 TLO.DAG.computeKnownBits(Op1, DemandedSub & DemandedElts, Depth + 1); 1378 if (DemandedBits.isSubsetOf(KnownSubMask.One)) { 1379 SDValue NewAnd = 1380 TLO.DAG.getNode(ISD::AND, dl, VT, Op0.getOperand(0), Op1); 1381 SDValue NewInsert = 1382 TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, NewAnd, 1383 Op0.getOperand(1), Op0.getOperand(2)); 1384 return TLO.CombineTo(Op, NewInsert); 1385 } 1386 } 1387 1388 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1389 Depth + 1)) 1390 return true; 1391 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1392 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1393 Known2, TLO, Depth + 1)) 1394 return true; 1395 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1396 1397 // Attempt to avoid multi-use ops if we don't need anything from them. 1398 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1399 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1400 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1401 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1402 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1403 if (DemandedOp0 || DemandedOp1) { 1404 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1405 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1406 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1407 return TLO.CombineTo(Op, NewOp); 1408 } 1409 } 1410 1411 // If all of the demanded bits are known one on one side, return the other. 1412 // These bits cannot contribute to the result of the 'and'. 1413 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1414 return TLO.CombineTo(Op, Op0); 1415 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1416 return TLO.CombineTo(Op, Op1); 1417 // If all of the demanded bits in the inputs are known zeros, return zero. 1418 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1419 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1420 // If the RHS is a constant, see if we can simplify it. 1421 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1422 TLO)) 1423 return true; 1424 // If the operation can be done in a smaller type, do so. 1425 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1426 return true; 1427 1428 Known &= Known2; 1429 break; 1430 } 1431 case ISD::OR: { 1432 SDValue Op0 = Op.getOperand(0); 1433 SDValue Op1 = Op.getOperand(1); 1434 1435 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1436 Depth + 1)) 1437 return true; 1438 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1439 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1440 Known2, TLO, Depth + 1)) 1441 return true; 1442 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1443 1444 // Attempt to avoid multi-use ops if we don't need anything from them. 1445 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1446 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1447 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1448 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1449 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1450 if (DemandedOp0 || DemandedOp1) { 1451 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1452 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1453 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1454 return TLO.CombineTo(Op, NewOp); 1455 } 1456 } 1457 1458 // If all of the demanded bits are known zero on one side, return the other. 1459 // These bits cannot contribute to the result of the 'or'. 1460 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1461 return TLO.CombineTo(Op, Op0); 1462 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1463 return TLO.CombineTo(Op, Op1); 1464 // If the RHS is a constant, see if we can simplify it. 1465 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1466 return true; 1467 // If the operation can be done in a smaller type, do so. 1468 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1469 return true; 1470 1471 Known |= Known2; 1472 break; 1473 } 1474 case ISD::XOR: { 1475 SDValue Op0 = Op.getOperand(0); 1476 SDValue Op1 = Op.getOperand(1); 1477 1478 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1479 Depth + 1)) 1480 return true; 1481 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1482 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1483 Depth + 1)) 1484 return true; 1485 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1486 1487 // Attempt to avoid multi-use ops if we don't need anything from them. 1488 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1489 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1490 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1491 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1492 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1493 if (DemandedOp0 || DemandedOp1) { 1494 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1495 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1496 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1497 return TLO.CombineTo(Op, NewOp); 1498 } 1499 } 1500 1501 // If all of the demanded bits are known zero on one side, return the other. 1502 // These bits cannot contribute to the result of the 'xor'. 1503 if (DemandedBits.isSubsetOf(Known.Zero)) 1504 return TLO.CombineTo(Op, Op0); 1505 if (DemandedBits.isSubsetOf(Known2.Zero)) 1506 return TLO.CombineTo(Op, Op1); 1507 // If the operation can be done in a smaller type, do so. 1508 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1509 return true; 1510 1511 // If all of the unknown bits are known to be zero on one side or the other 1512 // turn this into an *inclusive* or. 1513 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1514 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1515 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1516 1517 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1518 if (C) { 1519 // If one side is a constant, and all of the set bits in the constant are 1520 // also known set on the other side, turn this into an AND, as we know 1521 // the bits will be cleared. 1522 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1523 // NB: it is okay if more bits are known than are requested 1524 if (C->getAPIntValue() == Known2.One) { 1525 SDValue ANDC = 1526 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1527 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1528 } 1529 1530 // If the RHS is a constant, see if we can change it. Don't alter a -1 1531 // constant because that's a 'not' op, and that is better for combining 1532 // and codegen. 1533 if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) { 1534 // We're flipping all demanded bits. Flip the undemanded bits too. 1535 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1536 return TLO.CombineTo(Op, New); 1537 } 1538 } 1539 1540 // If we can't turn this into a 'not', try to shrink the constant. 1541 if (!C || !C->isAllOnes()) 1542 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1543 return true; 1544 1545 Known ^= Known2; 1546 break; 1547 } 1548 case ISD::SELECT: 1549 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1550 Depth + 1)) 1551 return true; 1552 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1553 Depth + 1)) 1554 return true; 1555 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1556 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1557 1558 // If the operands are constants, see if we can simplify them. 1559 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1560 return true; 1561 1562 // Only known if known in both the LHS and RHS. 1563 Known = KnownBits::commonBits(Known, Known2); 1564 break; 1565 case ISD::VSELECT: 1566 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, DemandedElts, 1567 Known, TLO, Depth + 1)) 1568 return true; 1569 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, DemandedElts, 1570 Known2, TLO, Depth + 1)) 1571 return true; 1572 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1573 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1574 1575 // Only known if known in both the LHS and RHS. 1576 Known = KnownBits::commonBits(Known, Known2); 1577 break; 1578 case ISD::SELECT_CC: 1579 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1580 Depth + 1)) 1581 return true; 1582 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1583 Depth + 1)) 1584 return true; 1585 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1586 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1587 1588 // If the operands are constants, see if we can simplify them. 1589 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1590 return true; 1591 1592 // Only known if known in both the LHS and RHS. 1593 Known = KnownBits::commonBits(Known, Known2); 1594 break; 1595 case ISD::SETCC: { 1596 SDValue Op0 = Op.getOperand(0); 1597 SDValue Op1 = Op.getOperand(1); 1598 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1599 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1600 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1601 // -1, we may be able to bypass the setcc. 1602 if (DemandedBits.isSignMask() && 1603 Op0.getScalarValueSizeInBits() == BitWidth && 1604 getBooleanContents(Op0.getValueType()) == 1605 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1606 // If we're testing X < 0, then this compare isn't needed - just use X! 1607 // FIXME: We're limiting to integer types here, but this should also work 1608 // if we don't care about FP signed-zero. The use of SETLT with FP means 1609 // that we don't care about NaNs. 1610 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1611 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1612 return TLO.CombineTo(Op, Op0); 1613 1614 // TODO: Should we check for other forms of sign-bit comparisons? 1615 // Examples: X <= -1, X >= 0 1616 } 1617 if (getBooleanContents(Op0.getValueType()) == 1618 TargetLowering::ZeroOrOneBooleanContent && 1619 BitWidth > 1) 1620 Known.Zero.setBitsFrom(1); 1621 break; 1622 } 1623 case ISD::SHL: { 1624 SDValue Op0 = Op.getOperand(0); 1625 SDValue Op1 = Op.getOperand(1); 1626 EVT ShiftVT = Op1.getValueType(); 1627 1628 if (const APInt *SA = 1629 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1630 unsigned ShAmt = SA->getZExtValue(); 1631 if (ShAmt == 0) 1632 return TLO.CombineTo(Op, Op0); 1633 1634 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1635 // single shift. We can do this if the bottom bits (which are shifted 1636 // out) are never demanded. 1637 // TODO - support non-uniform vector amounts. 1638 if (Op0.getOpcode() == ISD::SRL) { 1639 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1640 if (const APInt *SA2 = 1641 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1642 unsigned C1 = SA2->getZExtValue(); 1643 unsigned Opc = ISD::SHL; 1644 int Diff = ShAmt - C1; 1645 if (Diff < 0) { 1646 Diff = -Diff; 1647 Opc = ISD::SRL; 1648 } 1649 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1650 return TLO.CombineTo( 1651 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1652 } 1653 } 1654 } 1655 1656 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1657 // are not demanded. This will likely allow the anyext to be folded away. 1658 // TODO - support non-uniform vector amounts. 1659 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1660 SDValue InnerOp = Op0.getOperand(0); 1661 EVT InnerVT = InnerOp.getValueType(); 1662 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1663 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1664 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1665 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1666 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1667 ShTy = InnerVT; 1668 SDValue NarrowShl = 1669 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1670 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1671 return TLO.CombineTo( 1672 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1673 } 1674 1675 // Repeat the SHL optimization above in cases where an extension 1676 // intervenes: (shl (anyext (shr x, c1)), c2) to 1677 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1678 // aren't demanded (as above) and that the shifted upper c1 bits of 1679 // x aren't demanded. 1680 // TODO - support non-uniform vector amounts. 1681 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1682 InnerOp.hasOneUse()) { 1683 if (const APInt *SA2 = 1684 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1685 unsigned InnerShAmt = SA2->getZExtValue(); 1686 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1687 DemandedBits.getActiveBits() <= 1688 (InnerBits - InnerShAmt + ShAmt) && 1689 DemandedBits.countTrailingZeros() >= ShAmt) { 1690 SDValue NewSA = 1691 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1692 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1693 InnerOp.getOperand(0)); 1694 return TLO.CombineTo( 1695 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1696 } 1697 } 1698 } 1699 } 1700 1701 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1702 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1703 Depth + 1)) 1704 return true; 1705 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1706 Known.Zero <<= ShAmt; 1707 Known.One <<= ShAmt; 1708 // low bits known zero. 1709 Known.Zero.setLowBits(ShAmt); 1710 1711 // Attempt to avoid multi-use ops if we don't need anything from them. 1712 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1713 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1714 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1715 if (DemandedOp0) { 1716 SDValue NewOp = TLO.DAG.getNode(ISD::SHL, dl, VT, DemandedOp0, Op1); 1717 return TLO.CombineTo(Op, NewOp); 1718 } 1719 } 1720 1721 // Try shrinking the operation as long as the shift amount will still be 1722 // in range. 1723 if ((ShAmt < DemandedBits.getActiveBits()) && 1724 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1725 return true; 1726 } 1727 1728 // If we are only demanding sign bits then we can use the shift source 1729 // directly. 1730 if (const APInt *MaxSA = 1731 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1732 unsigned ShAmt = MaxSA->getZExtValue(); 1733 unsigned NumSignBits = 1734 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1735 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1736 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1737 return TLO.CombineTo(Op, Op0); 1738 } 1739 break; 1740 } 1741 case ISD::SRL: { 1742 SDValue Op0 = Op.getOperand(0); 1743 SDValue Op1 = Op.getOperand(1); 1744 EVT ShiftVT = Op1.getValueType(); 1745 1746 // Try to match AVG patterns. 1747 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1748 DemandedElts, Depth + 1)) 1749 return TLO.CombineTo(Op, AVG); 1750 1751 if (const APInt *SA = 1752 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1753 unsigned ShAmt = SA->getZExtValue(); 1754 if (ShAmt == 0) 1755 return TLO.CombineTo(Op, Op0); 1756 1757 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1758 // single shift. We can do this if the top bits (which are shifted out) 1759 // are never demanded. 1760 // TODO - support non-uniform vector amounts. 1761 if (Op0.getOpcode() == ISD::SHL) { 1762 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1763 if (const APInt *SA2 = 1764 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1765 unsigned C1 = SA2->getZExtValue(); 1766 unsigned Opc = ISD::SRL; 1767 int Diff = ShAmt - C1; 1768 if (Diff < 0) { 1769 Diff = -Diff; 1770 Opc = ISD::SHL; 1771 } 1772 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1773 return TLO.CombineTo( 1774 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1775 } 1776 } 1777 } 1778 1779 APInt InDemandedMask = (DemandedBits << ShAmt); 1780 1781 // If the shift is exact, then it does demand the low bits (and knows that 1782 // they are zero). 1783 if (Op->getFlags().hasExact()) 1784 InDemandedMask.setLowBits(ShAmt); 1785 1786 // Compute the new bits that are at the top now. 1787 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1788 Depth + 1)) 1789 return true; 1790 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1791 Known.Zero.lshrInPlace(ShAmt); 1792 Known.One.lshrInPlace(ShAmt); 1793 // High bits known zero. 1794 Known.Zero.setHighBits(ShAmt); 1795 } 1796 break; 1797 } 1798 case ISD::SRA: { 1799 SDValue Op0 = Op.getOperand(0); 1800 SDValue Op1 = Op.getOperand(1); 1801 EVT ShiftVT = Op1.getValueType(); 1802 1803 // If we only want bits that already match the signbit then we don't need 1804 // to shift. 1805 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1806 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1807 NumHiDemandedBits) 1808 return TLO.CombineTo(Op, Op0); 1809 1810 // If this is an arithmetic shift right and only the low-bit is set, we can 1811 // always convert this into a logical shr, even if the shift amount is 1812 // variable. The low bit of the shift cannot be an input sign bit unless 1813 // the shift amount is >= the size of the datatype, which is undefined. 1814 if (DemandedBits.isOne()) 1815 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1816 1817 // Try to match AVG patterns. 1818 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1819 DemandedElts, Depth + 1)) 1820 return TLO.CombineTo(Op, AVG); 1821 1822 if (const APInt *SA = 1823 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1824 unsigned ShAmt = SA->getZExtValue(); 1825 if (ShAmt == 0) 1826 return TLO.CombineTo(Op, Op0); 1827 1828 APInt InDemandedMask = (DemandedBits << ShAmt); 1829 1830 // If the shift is exact, then it does demand the low bits (and knows that 1831 // they are zero). 1832 if (Op->getFlags().hasExact()) 1833 InDemandedMask.setLowBits(ShAmt); 1834 1835 // If any of the demanded bits are produced by the sign extension, we also 1836 // demand the input sign bit. 1837 if (DemandedBits.countLeadingZeros() < ShAmt) 1838 InDemandedMask.setSignBit(); 1839 1840 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1841 Depth + 1)) 1842 return true; 1843 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1844 Known.Zero.lshrInPlace(ShAmt); 1845 Known.One.lshrInPlace(ShAmt); 1846 1847 // If the input sign bit is known to be zero, or if none of the top bits 1848 // are demanded, turn this into an unsigned shift right. 1849 if (Known.Zero[BitWidth - ShAmt - 1] || 1850 DemandedBits.countLeadingZeros() >= ShAmt) { 1851 SDNodeFlags Flags; 1852 Flags.setExact(Op->getFlags().hasExact()); 1853 return TLO.CombineTo( 1854 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1855 } 1856 1857 int Log2 = DemandedBits.exactLogBase2(); 1858 if (Log2 >= 0) { 1859 // The bit must come from the sign. 1860 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1861 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1862 } 1863 1864 if (Known.One[BitWidth - ShAmt - 1]) 1865 // New bits are known one. 1866 Known.One.setHighBits(ShAmt); 1867 1868 // Attempt to avoid multi-use ops if we don't need anything from them. 1869 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1870 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1871 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1872 if (DemandedOp0) { 1873 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1874 return TLO.CombineTo(Op, NewOp); 1875 } 1876 } 1877 } 1878 break; 1879 } 1880 case ISD::FSHL: 1881 case ISD::FSHR: { 1882 SDValue Op0 = Op.getOperand(0); 1883 SDValue Op1 = Op.getOperand(1); 1884 SDValue Op2 = Op.getOperand(2); 1885 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1886 1887 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1888 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1889 1890 // For fshl, 0-shift returns the 1st arg. 1891 // For fshr, 0-shift returns the 2nd arg. 1892 if (Amt == 0) { 1893 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1894 Known, TLO, Depth + 1)) 1895 return true; 1896 break; 1897 } 1898 1899 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1900 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1901 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1902 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1903 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1904 Depth + 1)) 1905 return true; 1906 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1907 Depth + 1)) 1908 return true; 1909 1910 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1911 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1912 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1913 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1914 Known.One |= Known2.One; 1915 Known.Zero |= Known2.Zero; 1916 1917 // Attempt to avoid multi-use ops if we don't need anything from them. 1918 if (!Demanded0.isAllOnes() || !Demanded1.isAllOnes() || 1919 !DemandedElts.isAllOnes()) { 1920 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1921 Op0, Demanded0, DemandedElts, TLO.DAG, Depth + 1); 1922 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1923 Op1, Demanded1, DemandedElts, TLO.DAG, Depth + 1); 1924 if (DemandedOp0 || DemandedOp1) { 1925 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0; 1926 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1; 1927 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedOp0, 1928 DemandedOp1, Op2); 1929 return TLO.CombineTo(Op, NewOp); 1930 } 1931 } 1932 } 1933 1934 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1935 if (isPowerOf2_32(BitWidth)) { 1936 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1937 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1938 Known2, TLO, Depth + 1)) 1939 return true; 1940 } 1941 break; 1942 } 1943 case ISD::ROTL: 1944 case ISD::ROTR: { 1945 SDValue Op0 = Op.getOperand(0); 1946 SDValue Op1 = Op.getOperand(1); 1947 bool IsROTL = (Op.getOpcode() == ISD::ROTL); 1948 1949 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1950 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1951 return TLO.CombineTo(Op, Op0); 1952 1953 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 1954 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1955 unsigned RevAmt = BitWidth - Amt; 1956 1957 // rotl: (Op0 << Amt) | (Op0 >> (BW - Amt)) 1958 // rotr: (Op0 << (BW - Amt)) | (Op0 >> Amt) 1959 APInt Demanded0 = DemandedBits.rotr(IsROTL ? Amt : RevAmt); 1960 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1961 Depth + 1)) 1962 return true; 1963 1964 // rot*(x, 0) --> x 1965 if (Amt == 0) 1966 return TLO.CombineTo(Op, Op0); 1967 1968 // See if we don't demand either half of the rotated bits. 1969 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SHL, VT)) && 1970 DemandedBits.countTrailingZeros() >= (IsROTL ? Amt : RevAmt)) { 1971 Op1 = TLO.DAG.getConstant(IsROTL ? Amt : RevAmt, dl, Op1.getValueType()); 1972 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, Op1)); 1973 } 1974 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT)) && 1975 DemandedBits.countLeadingZeros() >= (IsROTL ? RevAmt : Amt)) { 1976 Op1 = TLO.DAG.getConstant(IsROTL ? RevAmt : Amt, dl, Op1.getValueType()); 1977 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1978 } 1979 } 1980 1981 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1982 if (isPowerOf2_32(BitWidth)) { 1983 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1984 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1985 Depth + 1)) 1986 return true; 1987 } 1988 break; 1989 } 1990 case ISD::UMIN: { 1991 // Check if one arg is always less than (or equal) to the other arg. 1992 SDValue Op0 = Op.getOperand(0); 1993 SDValue Op1 = Op.getOperand(1); 1994 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 1995 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 1996 Known = KnownBits::umin(Known0, Known1); 1997 if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 1998 return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1); 1999 if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 2000 return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1); 2001 break; 2002 } 2003 case ISD::UMAX: { 2004 // Check if one arg is always greater than (or equal) to the other arg. 2005 SDValue Op0 = Op.getOperand(0); 2006 SDValue Op1 = Op.getOperand(1); 2007 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 2008 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 2009 Known = KnownBits::umax(Known0, Known1); 2010 if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 2011 return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1); 2012 if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 2013 return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1); 2014 break; 2015 } 2016 case ISD::BITREVERSE: { 2017 SDValue Src = Op.getOperand(0); 2018 APInt DemandedSrcBits = DemandedBits.reverseBits(); 2019 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2020 Depth + 1)) 2021 return true; 2022 Known.One = Known2.One.reverseBits(); 2023 Known.Zero = Known2.Zero.reverseBits(); 2024 break; 2025 } 2026 case ISD::BSWAP: { 2027 SDValue Src = Op.getOperand(0); 2028 2029 // If the only bits demanded come from one byte of the bswap result, 2030 // just shift the input byte into position to eliminate the bswap. 2031 unsigned NLZ = DemandedBits.countLeadingZeros(); 2032 unsigned NTZ = DemandedBits.countTrailingZeros(); 2033 2034 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 2035 // we need all the bits down to bit 8. Likewise, round NLZ. If we 2036 // have 14 leading zeros, round to 8. 2037 NLZ = alignDown(NLZ, 8); 2038 NTZ = alignDown(NTZ, 8); 2039 // If we need exactly one byte, we can do this transformation. 2040 if (BitWidth - NLZ - NTZ == 8) { 2041 // Replace this with either a left or right shift to get the byte into 2042 // the right place. 2043 unsigned ShiftOpcode = NLZ > NTZ ? ISD::SRL : ISD::SHL; 2044 if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) { 2045 EVT ShiftAmtTy = getShiftAmountTy(VT, DL); 2046 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ; 2047 SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy); 2048 SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt); 2049 return TLO.CombineTo(Op, NewOp); 2050 } 2051 } 2052 2053 APInt DemandedSrcBits = DemandedBits.byteSwap(); 2054 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2055 Depth + 1)) 2056 return true; 2057 Known.One = Known2.One.byteSwap(); 2058 Known.Zero = Known2.Zero.byteSwap(); 2059 break; 2060 } 2061 case ISD::CTPOP: { 2062 // If only 1 bit is demanded, replace with PARITY as long as we're before 2063 // op legalization. 2064 // FIXME: Limit to scalars for now. 2065 if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) 2066 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 2067 Op.getOperand(0))); 2068 2069 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2070 break; 2071 } 2072 case ISD::SIGN_EXTEND_INREG: { 2073 SDValue Op0 = Op.getOperand(0); 2074 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2075 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 2076 2077 // If we only care about the highest bit, don't bother shifting right. 2078 if (DemandedBits.isSignMask()) { 2079 unsigned MinSignedBits = 2080 TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1); 2081 bool AlreadySignExtended = ExVTBits >= MinSignedBits; 2082 // However if the input is already sign extended we expect the sign 2083 // extension to be dropped altogether later and do not simplify. 2084 if (!AlreadySignExtended) { 2085 // Compute the correct shift amount type, which must be getShiftAmountTy 2086 // for scalar types after legalization. 2087 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl, 2088 getShiftAmountTy(VT, DL)); 2089 return TLO.CombineTo(Op, 2090 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 2091 } 2092 } 2093 2094 // If none of the extended bits are demanded, eliminate the sextinreg. 2095 if (DemandedBits.getActiveBits() <= ExVTBits) 2096 return TLO.CombineTo(Op, Op0); 2097 2098 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 2099 2100 // Since the sign extended bits are demanded, we know that the sign 2101 // bit is demanded. 2102 InputDemandedBits.setBit(ExVTBits - 1); 2103 2104 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO, 2105 Depth + 1)) 2106 return true; 2107 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2108 2109 // If the sign bit of the input is known set or clear, then we know the 2110 // top bits of the result. 2111 2112 // If the input sign bit is known zero, convert this into a zero extension. 2113 if (Known.Zero[ExVTBits - 1]) 2114 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 2115 2116 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 2117 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 2118 Known.One.setBitsFrom(ExVTBits); 2119 Known.Zero &= Mask; 2120 } else { // Input sign bit unknown 2121 Known.Zero &= Mask; 2122 Known.One &= Mask; 2123 } 2124 break; 2125 } 2126 case ISD::BUILD_PAIR: { 2127 EVT HalfVT = Op.getOperand(0).getValueType(); 2128 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 2129 2130 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 2131 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 2132 2133 KnownBits KnownLo, KnownHi; 2134 2135 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 2136 return true; 2137 2138 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 2139 return true; 2140 2141 Known.Zero = KnownLo.Zero.zext(BitWidth) | 2142 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 2143 2144 Known.One = KnownLo.One.zext(BitWidth) | 2145 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 2146 break; 2147 } 2148 case ISD::ZERO_EXTEND: 2149 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2150 SDValue Src = Op.getOperand(0); 2151 EVT SrcVT = Src.getValueType(); 2152 unsigned InBits = SrcVT.getScalarSizeInBits(); 2153 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2154 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 2155 2156 // If none of the top bits are demanded, convert this into an any_extend. 2157 if (DemandedBits.getActiveBits() <= InBits) { 2158 // If we only need the non-extended bits of the bottom element 2159 // then we can just bitcast to the result. 2160 if (IsLE && IsVecInReg && DemandedElts == 1 && 2161 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2162 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2163 2164 unsigned Opc = 2165 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2166 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2167 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2168 } 2169 2170 APInt InDemandedBits = DemandedBits.trunc(InBits); 2171 APInt InDemandedElts = DemandedElts.zext(InElts); 2172 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2173 Depth + 1)) 2174 return true; 2175 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2176 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2177 Known = Known.zext(BitWidth); 2178 2179 // Attempt to avoid multi-use ops if we don't need anything from them. 2180 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2181 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2182 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2183 break; 2184 } 2185 case ISD::SIGN_EXTEND: 2186 case ISD::SIGN_EXTEND_VECTOR_INREG: { 2187 SDValue Src = Op.getOperand(0); 2188 EVT SrcVT = Src.getValueType(); 2189 unsigned InBits = SrcVT.getScalarSizeInBits(); 2190 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2191 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 2192 2193 // If none of the top bits are demanded, convert this into an any_extend. 2194 if (DemandedBits.getActiveBits() <= InBits) { 2195 // If we only need the non-extended bits of the bottom element 2196 // then we can just bitcast to the result. 2197 if (IsLE && IsVecInReg && DemandedElts == 1 && 2198 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2199 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2200 2201 unsigned Opc = 2202 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2203 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2204 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2205 } 2206 2207 APInt InDemandedBits = DemandedBits.trunc(InBits); 2208 APInt InDemandedElts = DemandedElts.zext(InElts); 2209 2210 // Since some of the sign extended bits are demanded, we know that the sign 2211 // bit is demanded. 2212 InDemandedBits.setBit(InBits - 1); 2213 2214 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2215 Depth + 1)) 2216 return true; 2217 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2218 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2219 2220 // If the sign bit is known one, the top bits match. 2221 Known = Known.sext(BitWidth); 2222 2223 // If the sign bit is known zero, convert this to a zero extend. 2224 if (Known.isNonNegative()) { 2225 unsigned Opc = 2226 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 2227 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2228 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2229 } 2230 2231 // Attempt to avoid multi-use ops if we don't need anything from them. 2232 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2233 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2234 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2235 break; 2236 } 2237 case ISD::ANY_EXTEND: 2238 case ISD::ANY_EXTEND_VECTOR_INREG: { 2239 SDValue Src = Op.getOperand(0); 2240 EVT SrcVT = Src.getValueType(); 2241 unsigned InBits = SrcVT.getScalarSizeInBits(); 2242 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2243 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 2244 2245 // If we only need the bottom element then we can just bitcast. 2246 // TODO: Handle ANY_EXTEND? 2247 if (IsLE && IsVecInReg && DemandedElts == 1 && 2248 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2249 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2250 2251 APInt InDemandedBits = DemandedBits.trunc(InBits); 2252 APInt InDemandedElts = DemandedElts.zext(InElts); 2253 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2254 Depth + 1)) 2255 return true; 2256 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2257 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2258 Known = Known.anyext(BitWidth); 2259 2260 // Attempt to avoid multi-use ops if we don't need anything from them. 2261 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2262 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2263 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2264 break; 2265 } 2266 case ISD::TRUNCATE: { 2267 SDValue Src = Op.getOperand(0); 2268 2269 // Simplify the input, using demanded bit information, and compute the known 2270 // zero/one bits live out. 2271 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 2272 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 2273 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 2274 Depth + 1)) 2275 return true; 2276 Known = Known.trunc(BitWidth); 2277 2278 // Attempt to avoid multi-use ops if we don't need anything from them. 2279 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2280 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 2281 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 2282 2283 // If the input is only used by this truncate, see if we can shrink it based 2284 // on the known demanded bits. 2285 if (Src.getNode()->hasOneUse()) { 2286 switch (Src.getOpcode()) { 2287 default: 2288 break; 2289 case ISD::SRL: 2290 // Shrink SRL by a constant if none of the high bits shifted in are 2291 // demanded. 2292 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2293 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2294 // undesirable. 2295 break; 2296 2297 const APInt *ShAmtC = 2298 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2299 if (!ShAmtC || ShAmtC->uge(BitWidth)) 2300 break; 2301 uint64_t ShVal = ShAmtC->getZExtValue(); 2302 2303 APInt HighBits = 2304 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2305 HighBits.lshrInPlace(ShVal); 2306 HighBits = HighBits.trunc(BitWidth); 2307 2308 if (!(HighBits & DemandedBits)) { 2309 // None of the shifted in bits are needed. Add a truncate of the 2310 // shift input, then shift it. 2311 SDValue NewShAmt = TLO.DAG.getConstant( 2312 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2313 SDValue NewTrunc = 2314 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2315 return TLO.CombineTo( 2316 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2317 } 2318 break; 2319 } 2320 } 2321 2322 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2323 break; 2324 } 2325 case ISD::AssertZext: { 2326 // AssertZext demands all of the high bits, plus any of the low bits 2327 // demanded by its users. 2328 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2329 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2330 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2331 TLO, Depth + 1)) 2332 return true; 2333 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2334 2335 Known.Zero |= ~InMask; 2336 break; 2337 } 2338 case ISD::EXTRACT_VECTOR_ELT: { 2339 SDValue Src = Op.getOperand(0); 2340 SDValue Idx = Op.getOperand(1); 2341 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2342 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2343 2344 if (SrcEltCnt.isScalable()) 2345 return false; 2346 2347 // Demand the bits from every vector element without a constant index. 2348 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2349 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); 2350 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2351 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2352 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2353 2354 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2355 // anything about the extended bits. 2356 APInt DemandedSrcBits = DemandedBits; 2357 if (BitWidth > EltBitWidth) 2358 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2359 2360 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2361 Depth + 1)) 2362 return true; 2363 2364 // Attempt to avoid multi-use ops if we don't need anything from them. 2365 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2366 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2367 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2368 SDValue NewOp = 2369 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2370 return TLO.CombineTo(Op, NewOp); 2371 } 2372 } 2373 2374 Known = Known2; 2375 if (BitWidth > EltBitWidth) 2376 Known = Known.anyext(BitWidth); 2377 break; 2378 } 2379 case ISD::BITCAST: { 2380 SDValue Src = Op.getOperand(0); 2381 EVT SrcVT = Src.getValueType(); 2382 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2383 2384 // If this is an FP->Int bitcast and if the sign bit is the only 2385 // thing demanded, turn this into a FGETSIGN. 2386 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2387 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2388 SrcVT.isFloatingPoint()) { 2389 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2390 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2391 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2392 SrcVT != MVT::f128) { 2393 // Cannot eliminate/lower SHL for f128 yet. 2394 EVT Ty = OpVTLegal ? VT : MVT::i32; 2395 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2396 // place. We expect the SHL to be eliminated by other optimizations. 2397 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2398 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2399 if (!OpVTLegal && OpVTSizeInBits > 32) 2400 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2401 unsigned ShVal = Op.getValueSizeInBits() - 1; 2402 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2403 return TLO.CombineTo(Op, 2404 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2405 } 2406 } 2407 2408 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2409 // Demand the elt/bit if any of the original elts/bits are demanded. 2410 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0) { 2411 unsigned Scale = BitWidth / NumSrcEltBits; 2412 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2413 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2414 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2415 for (unsigned i = 0; i != Scale; ++i) { 2416 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 2417 unsigned BitOffset = EltOffset * NumSrcEltBits; 2418 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 2419 if (!Sub.isZero()) { 2420 DemandedSrcBits |= Sub; 2421 for (unsigned j = 0; j != NumElts; ++j) 2422 if (DemandedElts[j]) 2423 DemandedSrcElts.setBit((j * Scale) + i); 2424 } 2425 } 2426 2427 APInt KnownSrcUndef, KnownSrcZero; 2428 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2429 KnownSrcZero, TLO, Depth + 1)) 2430 return true; 2431 2432 KnownBits KnownSrcBits; 2433 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2434 KnownSrcBits, TLO, Depth + 1)) 2435 return true; 2436 } else if (IsLE && (NumSrcEltBits % BitWidth) == 0) { 2437 // TODO - bigendian once we have test coverage. 2438 unsigned Scale = NumSrcEltBits / BitWidth; 2439 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2440 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2441 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2442 for (unsigned i = 0; i != NumElts; ++i) 2443 if (DemandedElts[i]) { 2444 unsigned Offset = (i % Scale) * BitWidth; 2445 DemandedSrcBits.insertBits(DemandedBits, Offset); 2446 DemandedSrcElts.setBit(i / Scale); 2447 } 2448 2449 if (SrcVT.isVector()) { 2450 APInt KnownSrcUndef, KnownSrcZero; 2451 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2452 KnownSrcZero, TLO, Depth + 1)) 2453 return true; 2454 } 2455 2456 KnownBits KnownSrcBits; 2457 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2458 KnownSrcBits, TLO, Depth + 1)) 2459 return true; 2460 } 2461 2462 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2463 // recursive call where Known may be useful to the caller. 2464 if (Depth > 0) { 2465 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2466 return false; 2467 } 2468 break; 2469 } 2470 case ISD::MUL: 2471 if (DemandedBits.isPowerOf2()) { 2472 // The LSB of X*Y is set only if (X & 1) == 1 and (Y & 1) == 1. 2473 // If we demand exactly one bit N and we have "X * (C' << N)" where C' is 2474 // odd (has LSB set), then the left-shifted low bit of X is the answer. 2475 unsigned CTZ = DemandedBits.countTrailingZeros(); 2476 ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 2477 if (C && C->getAPIntValue().countTrailingZeros() == CTZ) { 2478 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2479 SDValue AmtC = TLO.DAG.getConstant(CTZ, dl, ShiftAmtTy); 2480 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, Op.getOperand(0), AmtC); 2481 return TLO.CombineTo(Op, Shl); 2482 } 2483 } 2484 // For a squared value "X * X", the bottom 2 bits are 0 and X[0] because: 2485 // X * X is odd iff X is odd. 2486 // 'Quadratic Reciprocity': X * X -> 0 for bit[1] 2487 if (Op.getOperand(0) == Op.getOperand(1) && DemandedBits.ult(4)) { 2488 SDValue One = TLO.DAG.getConstant(1, dl, VT); 2489 SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One); 2490 return TLO.CombineTo(Op, And1); 2491 } 2492 LLVM_FALLTHROUGH; 2493 case ISD::ADD: 2494 case ISD::SUB: { 2495 // Add, Sub, and Mul don't demand any bits in positions beyond that 2496 // of the highest bit demanded of them. 2497 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2498 SDNodeFlags Flags = Op.getNode()->getFlags(); 2499 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2500 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2501 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2502 Depth + 1) || 2503 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2504 Depth + 1) || 2505 // See if the operation should be performed at a smaller bit width. 2506 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2507 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2508 // Disable the nsw and nuw flags. We can no longer guarantee that we 2509 // won't wrap after simplification. 2510 Flags.setNoSignedWrap(false); 2511 Flags.setNoUnsignedWrap(false); 2512 Op->setFlags(Flags); 2513 } 2514 return true; 2515 } 2516 2517 // Attempt to avoid multi-use ops if we don't need anything from them. 2518 if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { 2519 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2520 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2521 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2522 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2523 if (DemandedOp0 || DemandedOp1) { 2524 Flags.setNoSignedWrap(false); 2525 Flags.setNoUnsignedWrap(false); 2526 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2527 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2528 SDValue NewOp = 2529 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2530 return TLO.CombineTo(Op, NewOp); 2531 } 2532 } 2533 2534 // If we have a constant operand, we may be able to turn it into -1 if we 2535 // do not demand the high bits. This can make the constant smaller to 2536 // encode, allow more general folding, or match specialized instruction 2537 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2538 // is probably not useful (and could be detrimental). 2539 ConstantSDNode *C = isConstOrConstSplat(Op1); 2540 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2541 if (C && !C->isAllOnes() && !C->isOne() && 2542 (C->getAPIntValue() | HighMask).isAllOnes()) { 2543 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2544 // Disable the nsw and nuw flags. We can no longer guarantee that we 2545 // won't wrap after simplification. 2546 Flags.setNoSignedWrap(false); 2547 Flags.setNoUnsignedWrap(false); 2548 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2549 return TLO.CombineTo(Op, NewOp); 2550 } 2551 2552 // Match a multiply with a disguised negated-power-of-2 and convert to a 2553 // an equivalent shift-left amount. 2554 // Example: (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2555 auto getShiftLeftAmt = [&HighMask](SDValue Mul) -> unsigned { 2556 if (Mul.getOpcode() != ISD::MUL || !Mul.hasOneUse()) 2557 return 0; 2558 2559 // Don't touch opaque constants. Also, ignore zero and power-of-2 2560 // multiplies. Those will get folded later. 2561 ConstantSDNode *MulC = isConstOrConstSplat(Mul.getOperand(1)); 2562 if (MulC && !MulC->isOpaque() && !MulC->isZero() && 2563 !MulC->getAPIntValue().isPowerOf2()) { 2564 APInt UnmaskedC = MulC->getAPIntValue() | HighMask; 2565 if (UnmaskedC.isNegatedPowerOf2()) 2566 return (-UnmaskedC).logBase2(); 2567 } 2568 return 0; 2569 }; 2570 2571 auto foldMul = [&](ISD::NodeType NT, SDValue X, SDValue Y, unsigned ShlAmt) { 2572 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2573 SDValue ShlAmtC = TLO.DAG.getConstant(ShlAmt, dl, ShiftAmtTy); 2574 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, X, ShlAmtC); 2575 SDValue Res = TLO.DAG.getNode(NT, dl, VT, Y, Shl); 2576 return TLO.CombineTo(Op, Res); 2577 }; 2578 2579 if (isOperationLegalOrCustom(ISD::SHL, VT)) { 2580 if (Op.getOpcode() == ISD::ADD) { 2581 // (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2582 if (unsigned ShAmt = getShiftLeftAmt(Op0)) 2583 return foldMul(ISD::SUB, Op0.getOperand(0), Op1, ShAmt); 2584 // Op0 + (X * MulC) --> Op0 - (X << log2(-MulC)) 2585 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2586 return foldMul(ISD::SUB, Op1.getOperand(0), Op0, ShAmt); 2587 } 2588 if (Op.getOpcode() == ISD::SUB) { 2589 // Op0 - (X * MulC) --> Op0 + (X << log2(-MulC)) 2590 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2591 return foldMul(ISD::ADD, Op1.getOperand(0), Op0, ShAmt); 2592 } 2593 } 2594 2595 LLVM_FALLTHROUGH; 2596 } 2597 default: 2598 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2599 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2600 Known, TLO, Depth)) 2601 return true; 2602 break; 2603 } 2604 2605 // Just use computeKnownBits to compute output bits. 2606 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2607 break; 2608 } 2609 2610 // If we know the value of all of the demanded bits, return this as a 2611 // constant. 2612 if (!isTargetCanonicalConstantNode(Op) && 2613 DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2614 // Avoid folding to a constant if any OpaqueConstant is involved. 2615 const SDNode *N = Op.getNode(); 2616 for (SDNode *Op : 2617 llvm::make_range(SDNodeIterator::begin(N), SDNodeIterator::end(N))) { 2618 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2619 if (C->isOpaque()) 2620 return false; 2621 } 2622 if (VT.isInteger()) 2623 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2624 if (VT.isFloatingPoint()) 2625 return TLO.CombineTo( 2626 Op, 2627 TLO.DAG.getConstantFP( 2628 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2629 } 2630 2631 return false; 2632 } 2633 2634 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2635 const APInt &DemandedElts, 2636 DAGCombinerInfo &DCI) const { 2637 SelectionDAG &DAG = DCI.DAG; 2638 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2639 !DCI.isBeforeLegalizeOps()); 2640 2641 APInt KnownUndef, KnownZero; 2642 bool Simplified = 2643 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2644 if (Simplified) { 2645 DCI.AddToWorklist(Op.getNode()); 2646 DCI.CommitTargetLoweringOpt(TLO); 2647 } 2648 2649 return Simplified; 2650 } 2651 2652 /// Given a vector binary operation and known undefined elements for each input 2653 /// operand, compute whether each element of the output is undefined. 2654 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2655 const APInt &UndefOp0, 2656 const APInt &UndefOp1) { 2657 EVT VT = BO.getValueType(); 2658 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2659 "Vector binop only"); 2660 2661 EVT EltVT = VT.getVectorElementType(); 2662 unsigned NumElts = VT.getVectorNumElements(); 2663 assert(UndefOp0.getBitWidth() == NumElts && 2664 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2665 2666 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2667 const APInt &UndefVals) { 2668 if (UndefVals[Index]) 2669 return DAG.getUNDEF(EltVT); 2670 2671 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2672 // Try hard to make sure that the getNode() call is not creating temporary 2673 // nodes. Ignore opaque integers because they do not constant fold. 2674 SDValue Elt = BV->getOperand(Index); 2675 auto *C = dyn_cast<ConstantSDNode>(Elt); 2676 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2677 return Elt; 2678 } 2679 2680 return SDValue(); 2681 }; 2682 2683 APInt KnownUndef = APInt::getZero(NumElts); 2684 for (unsigned i = 0; i != NumElts; ++i) { 2685 // If both inputs for this element are either constant or undef and match 2686 // the element type, compute the constant/undef result for this element of 2687 // the vector. 2688 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2689 // not handle FP constants. The code within getNode() should be refactored 2690 // to avoid the danger of creating a bogus temporary node here. 2691 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2692 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2693 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2694 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2695 KnownUndef.setBit(i); 2696 } 2697 return KnownUndef; 2698 } 2699 2700 bool TargetLowering::SimplifyDemandedVectorElts( 2701 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2702 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2703 bool AssumeSingleUse) const { 2704 EVT VT = Op.getValueType(); 2705 unsigned Opcode = Op.getOpcode(); 2706 APInt DemandedElts = OriginalDemandedElts; 2707 unsigned NumElts = DemandedElts.getBitWidth(); 2708 assert(VT.isVector() && "Expected vector op"); 2709 2710 KnownUndef = KnownZero = APInt::getZero(NumElts); 2711 2712 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 2713 if (!TLI.shouldSimplifyDemandedVectorElts(Op, TLO)) 2714 return false; 2715 2716 // TODO: For now we assume we know nothing about scalable vectors. 2717 if (VT.isScalableVector()) 2718 return false; 2719 2720 assert(VT.getVectorNumElements() == NumElts && 2721 "Mask size mismatches value type element count!"); 2722 2723 // Undef operand. 2724 if (Op.isUndef()) { 2725 KnownUndef.setAllBits(); 2726 return false; 2727 } 2728 2729 // If Op has other users, assume that all elements are needed. 2730 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2731 DemandedElts.setAllBits(); 2732 2733 // Not demanding any elements from Op. 2734 if (DemandedElts == 0) { 2735 KnownUndef.setAllBits(); 2736 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2737 } 2738 2739 // Limit search depth. 2740 if (Depth >= SelectionDAG::MaxRecursionDepth) 2741 return false; 2742 2743 SDLoc DL(Op); 2744 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2745 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 2746 2747 // Helper for demanding the specified elements and all the bits of both binary 2748 // operands. 2749 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2750 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2751 TLO.DAG, Depth + 1); 2752 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2753 TLO.DAG, Depth + 1); 2754 if (NewOp0 || NewOp1) { 2755 SDValue NewOp = TLO.DAG.getNode( 2756 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2757 return TLO.CombineTo(Op, NewOp); 2758 } 2759 return false; 2760 }; 2761 2762 switch (Opcode) { 2763 case ISD::SCALAR_TO_VECTOR: { 2764 if (!DemandedElts[0]) { 2765 KnownUndef.setAllBits(); 2766 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2767 } 2768 SDValue ScalarSrc = Op.getOperand(0); 2769 if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2770 SDValue Src = ScalarSrc.getOperand(0); 2771 SDValue Idx = ScalarSrc.getOperand(1); 2772 EVT SrcVT = Src.getValueType(); 2773 2774 ElementCount SrcEltCnt = SrcVT.getVectorElementCount(); 2775 2776 if (SrcEltCnt.isScalable()) 2777 return false; 2778 2779 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2780 if (isNullConstant(Idx)) { 2781 APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0); 2782 APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts); 2783 APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts); 2784 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2785 TLO, Depth + 1)) 2786 return true; 2787 } 2788 } 2789 KnownUndef.setHighBits(NumElts - 1); 2790 break; 2791 } 2792 case ISD::BITCAST: { 2793 SDValue Src = Op.getOperand(0); 2794 EVT SrcVT = Src.getValueType(); 2795 2796 // We only handle vectors here. 2797 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2798 if (!SrcVT.isVector()) 2799 break; 2800 2801 // Fast handling of 'identity' bitcasts. 2802 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2803 if (NumSrcElts == NumElts) 2804 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2805 KnownZero, TLO, Depth + 1); 2806 2807 APInt SrcDemandedElts, SrcZero, SrcUndef; 2808 2809 // Bitcast from 'large element' src vector to 'small element' vector, we 2810 // must demand a source element if any DemandedElt maps to it. 2811 if ((NumElts % NumSrcElts) == 0) { 2812 unsigned Scale = NumElts / NumSrcElts; 2813 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2814 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2815 TLO, Depth + 1)) 2816 return true; 2817 2818 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2819 // of the large element. 2820 // TODO - bigendian once we have test coverage. 2821 if (IsLE) { 2822 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2823 APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits); 2824 for (unsigned i = 0; i != NumElts; ++i) 2825 if (DemandedElts[i]) { 2826 unsigned Ofs = (i % Scale) * EltSizeInBits; 2827 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2828 } 2829 2830 KnownBits Known; 2831 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2832 TLO, Depth + 1)) 2833 return true; 2834 2835 // The bitcast has split each wide element into a number of 2836 // narrow subelements. We have just computed the Known bits 2837 // for wide elements. See if element splitting results in 2838 // some subelements being zero. Only for demanded elements! 2839 for (unsigned SubElt = 0; SubElt != Scale; ++SubElt) { 2840 if (!Known.Zero.extractBits(EltSizeInBits, SubElt * EltSizeInBits) 2841 .isAllOnes()) 2842 continue; 2843 for (unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) { 2844 unsigned Elt = Scale * SrcElt + SubElt; 2845 if (DemandedElts[Elt]) 2846 KnownZero.setBit(Elt); 2847 } 2848 } 2849 } 2850 2851 // If the src element is zero/undef then all the output elements will be - 2852 // only demanded elements are guaranteed to be correct. 2853 for (unsigned i = 0; i != NumSrcElts; ++i) { 2854 if (SrcDemandedElts[i]) { 2855 if (SrcZero[i]) 2856 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2857 if (SrcUndef[i]) 2858 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2859 } 2860 } 2861 } 2862 2863 // Bitcast from 'small element' src vector to 'large element' vector, we 2864 // demand all smaller source elements covered by the larger demanded element 2865 // of this vector. 2866 if ((NumSrcElts % NumElts) == 0) { 2867 unsigned Scale = NumSrcElts / NumElts; 2868 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2869 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2870 TLO, Depth + 1)) 2871 return true; 2872 2873 // If all the src elements covering an output element are zero/undef, then 2874 // the output element will be as well, assuming it was demanded. 2875 for (unsigned i = 0; i != NumElts; ++i) { 2876 if (DemandedElts[i]) { 2877 if (SrcZero.extractBits(Scale, i * Scale).isAllOnes()) 2878 KnownZero.setBit(i); 2879 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes()) 2880 KnownUndef.setBit(i); 2881 } 2882 } 2883 } 2884 break; 2885 } 2886 case ISD::BUILD_VECTOR: { 2887 // Check all elements and simplify any unused elements with UNDEF. 2888 if (!DemandedElts.isAllOnes()) { 2889 // Don't simplify BROADCASTS. 2890 if (llvm::any_of(Op->op_values(), 2891 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2892 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2893 bool Updated = false; 2894 for (unsigned i = 0; i != NumElts; ++i) { 2895 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2896 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2897 KnownUndef.setBit(i); 2898 Updated = true; 2899 } 2900 } 2901 if (Updated) 2902 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2903 } 2904 } 2905 for (unsigned i = 0; i != NumElts; ++i) { 2906 SDValue SrcOp = Op.getOperand(i); 2907 if (SrcOp.isUndef()) { 2908 KnownUndef.setBit(i); 2909 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2910 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2911 KnownZero.setBit(i); 2912 } 2913 } 2914 break; 2915 } 2916 case ISD::CONCAT_VECTORS: { 2917 EVT SubVT = Op.getOperand(0).getValueType(); 2918 unsigned NumSubVecs = Op.getNumOperands(); 2919 unsigned NumSubElts = SubVT.getVectorNumElements(); 2920 for (unsigned i = 0; i != NumSubVecs; ++i) { 2921 SDValue SubOp = Op.getOperand(i); 2922 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2923 APInt SubUndef, SubZero; 2924 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2925 Depth + 1)) 2926 return true; 2927 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2928 KnownZero.insertBits(SubZero, i * NumSubElts); 2929 } 2930 2931 // Attempt to avoid multi-use ops if we don't need anything from them. 2932 if (!DemandedElts.isAllOnes()) { 2933 bool FoundNewSub = false; 2934 SmallVector<SDValue, 2> DemandedSubOps; 2935 for (unsigned i = 0; i != NumSubVecs; ++i) { 2936 SDValue SubOp = Op.getOperand(i); 2937 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2938 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts( 2939 SubOp, SubElts, TLO.DAG, Depth + 1); 2940 DemandedSubOps.push_back(NewSubOp ? NewSubOp : SubOp); 2941 FoundNewSub = NewSubOp ? true : FoundNewSub; 2942 } 2943 if (FoundNewSub) { 2944 SDValue NewOp = 2945 TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, DemandedSubOps); 2946 return TLO.CombineTo(Op, NewOp); 2947 } 2948 } 2949 break; 2950 } 2951 case ISD::INSERT_SUBVECTOR: { 2952 // Demand any elements from the subvector and the remainder from the src its 2953 // inserted into. 2954 SDValue Src = Op.getOperand(0); 2955 SDValue Sub = Op.getOperand(1); 2956 uint64_t Idx = Op.getConstantOperandVal(2); 2957 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2958 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2959 APInt DemandedSrcElts = DemandedElts; 2960 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 2961 2962 APInt SubUndef, SubZero; 2963 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2964 Depth + 1)) 2965 return true; 2966 2967 // If none of the src operand elements are demanded, replace it with undef. 2968 if (!DemandedSrcElts && !Src.isUndef()) 2969 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2970 TLO.DAG.getUNDEF(VT), Sub, 2971 Op.getOperand(2))); 2972 2973 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2974 TLO, Depth + 1)) 2975 return true; 2976 KnownUndef.insertBits(SubUndef, Idx); 2977 KnownZero.insertBits(SubZero, Idx); 2978 2979 // Attempt to avoid multi-use ops if we don't need anything from them. 2980 if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) { 2981 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2982 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2983 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2984 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2985 if (NewSrc || NewSub) { 2986 NewSrc = NewSrc ? NewSrc : Src; 2987 NewSub = NewSub ? NewSub : Sub; 2988 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2989 NewSub, Op.getOperand(2)); 2990 return TLO.CombineTo(Op, NewOp); 2991 } 2992 } 2993 break; 2994 } 2995 case ISD::EXTRACT_SUBVECTOR: { 2996 // Offset the demanded elts by the subvector index. 2997 SDValue Src = Op.getOperand(0); 2998 if (Src.getValueType().isScalableVector()) 2999 break; 3000 uint64_t Idx = Op.getConstantOperandVal(1); 3001 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3002 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 3003 3004 APInt SrcUndef, SrcZero; 3005 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3006 Depth + 1)) 3007 return true; 3008 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 3009 KnownZero = SrcZero.extractBits(NumElts, Idx); 3010 3011 // Attempt to avoid multi-use ops if we don't need anything from them. 3012 if (!DemandedElts.isAllOnes()) { 3013 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 3014 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 3015 if (NewSrc) { 3016 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 3017 Op.getOperand(1)); 3018 return TLO.CombineTo(Op, NewOp); 3019 } 3020 } 3021 break; 3022 } 3023 case ISD::INSERT_VECTOR_ELT: { 3024 SDValue Vec = Op.getOperand(0); 3025 SDValue Scl = Op.getOperand(1); 3026 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3027 3028 // For a legal, constant insertion index, if we don't need this insertion 3029 // then strip it, else remove it from the demanded elts. 3030 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 3031 unsigned Idx = CIdx->getZExtValue(); 3032 if (!DemandedElts[Idx]) 3033 return TLO.CombineTo(Op, Vec); 3034 3035 APInt DemandedVecElts(DemandedElts); 3036 DemandedVecElts.clearBit(Idx); 3037 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 3038 KnownZero, TLO, Depth + 1)) 3039 return true; 3040 3041 KnownUndef.setBitVal(Idx, Scl.isUndef()); 3042 3043 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 3044 break; 3045 } 3046 3047 APInt VecUndef, VecZero; 3048 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 3049 Depth + 1)) 3050 return true; 3051 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 3052 break; 3053 } 3054 case ISD::VSELECT: { 3055 // Try to transform the select condition based on the current demanded 3056 // elements. 3057 // TODO: If a condition element is undef, we can choose from one arm of the 3058 // select (and if one arm is undef, then we can propagate that to the 3059 // result). 3060 // TODO - add support for constant vselect masks (see IR version of this). 3061 APInt UnusedUndef, UnusedZero; 3062 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 3063 UnusedZero, TLO, Depth + 1)) 3064 return true; 3065 3066 // See if we can simplify either vselect operand. 3067 APInt DemandedLHS(DemandedElts); 3068 APInt DemandedRHS(DemandedElts); 3069 APInt UndefLHS, ZeroLHS; 3070 APInt UndefRHS, ZeroRHS; 3071 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 3072 ZeroLHS, TLO, Depth + 1)) 3073 return true; 3074 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 3075 ZeroRHS, TLO, Depth + 1)) 3076 return true; 3077 3078 KnownUndef = UndefLHS & UndefRHS; 3079 KnownZero = ZeroLHS & ZeroRHS; 3080 break; 3081 } 3082 case ISD::VECTOR_SHUFFLE: { 3083 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 3084 3085 // Collect demanded elements from shuffle operands.. 3086 APInt DemandedLHS(NumElts, 0); 3087 APInt DemandedRHS(NumElts, 0); 3088 for (unsigned i = 0; i != NumElts; ++i) { 3089 int M = ShuffleMask[i]; 3090 if (M < 0 || !DemandedElts[i]) 3091 continue; 3092 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 3093 if (M < (int)NumElts) 3094 DemandedLHS.setBit(M); 3095 else 3096 DemandedRHS.setBit(M - NumElts); 3097 } 3098 3099 // See if we can simplify either shuffle operand. 3100 APInt UndefLHS, ZeroLHS; 3101 APInt UndefRHS, ZeroRHS; 3102 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 3103 ZeroLHS, TLO, Depth + 1)) 3104 return true; 3105 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 3106 ZeroRHS, TLO, Depth + 1)) 3107 return true; 3108 3109 // Simplify mask using undef elements from LHS/RHS. 3110 bool Updated = false; 3111 bool IdentityLHS = true, IdentityRHS = true; 3112 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 3113 for (unsigned i = 0; i != NumElts; ++i) { 3114 int &M = NewMask[i]; 3115 if (M < 0) 3116 continue; 3117 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 3118 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 3119 Updated = true; 3120 M = -1; 3121 } 3122 IdentityLHS &= (M < 0) || (M == (int)i); 3123 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 3124 } 3125 3126 // Update legal shuffle masks based on demanded elements if it won't reduce 3127 // to Identity which can cause premature removal of the shuffle mask. 3128 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 3129 SDValue LegalShuffle = 3130 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 3131 NewMask, TLO.DAG); 3132 if (LegalShuffle) 3133 return TLO.CombineTo(Op, LegalShuffle); 3134 } 3135 3136 // Propagate undef/zero elements from LHS/RHS. 3137 for (unsigned i = 0; i != NumElts; ++i) { 3138 int M = ShuffleMask[i]; 3139 if (M < 0) { 3140 KnownUndef.setBit(i); 3141 } else if (M < (int)NumElts) { 3142 if (UndefLHS[M]) 3143 KnownUndef.setBit(i); 3144 if (ZeroLHS[M]) 3145 KnownZero.setBit(i); 3146 } else { 3147 if (UndefRHS[M - NumElts]) 3148 KnownUndef.setBit(i); 3149 if (ZeroRHS[M - NumElts]) 3150 KnownZero.setBit(i); 3151 } 3152 } 3153 break; 3154 } 3155 case ISD::ANY_EXTEND_VECTOR_INREG: 3156 case ISD::SIGN_EXTEND_VECTOR_INREG: 3157 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3158 APInt SrcUndef, SrcZero; 3159 SDValue Src = Op.getOperand(0); 3160 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3161 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); 3162 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3163 Depth + 1)) 3164 return true; 3165 KnownZero = SrcZero.zextOrTrunc(NumElts); 3166 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 3167 3168 if (IsLE && Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 3169 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 3170 DemandedSrcElts == 1) { 3171 // aext - if we just need the bottom element then we can bitcast. 3172 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 3173 } 3174 3175 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 3176 // zext(undef) upper bits are guaranteed to be zero. 3177 if (DemandedElts.isSubsetOf(KnownUndef)) 3178 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3179 KnownUndef.clearAllBits(); 3180 3181 // zext - if we just need the bottom element then we can mask: 3182 // zext(and(x,c)) -> and(x,c') iff the zext is the only user of the and. 3183 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() == ISD::AND && 3184 Op->isOnlyUserOf(Src.getNode()) && 3185 Op.getValueSizeInBits() == Src.getValueSizeInBits()) { 3186 SDLoc DL(Op); 3187 EVT SrcVT = Src.getValueType(); 3188 EVT SrcSVT = SrcVT.getScalarType(); 3189 SmallVector<SDValue> MaskElts; 3190 MaskElts.push_back(TLO.DAG.getAllOnesConstant(DL, SrcSVT)); 3191 MaskElts.append(NumSrcElts - 1, TLO.DAG.getConstant(0, DL, SrcSVT)); 3192 SDValue Mask = TLO.DAG.getBuildVector(SrcVT, DL, MaskElts); 3193 if (SDValue Fold = TLO.DAG.FoldConstantArithmetic( 3194 ISD::AND, DL, SrcVT, {Src.getOperand(1), Mask})) { 3195 Fold = TLO.DAG.getNode(ISD::AND, DL, SrcVT, Src.getOperand(0), Fold); 3196 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Fold)); 3197 } 3198 } 3199 } 3200 break; 3201 } 3202 3203 // TODO: There are more binop opcodes that could be handled here - MIN, 3204 // MAX, saturated math, etc. 3205 case ISD::ADD: { 3206 SDValue Op0 = Op.getOperand(0); 3207 SDValue Op1 = Op.getOperand(1); 3208 if (Op0 == Op1 && Op->isOnlyUserOf(Op0.getNode())) { 3209 APInt UndefLHS, ZeroLHS; 3210 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3211 Depth + 1, /*AssumeSingleUse*/ true)) 3212 return true; 3213 } 3214 LLVM_FALLTHROUGH; 3215 } 3216 case ISD::OR: 3217 case ISD::XOR: 3218 case ISD::SUB: 3219 case ISD::FADD: 3220 case ISD::FSUB: 3221 case ISD::FMUL: 3222 case ISD::FDIV: 3223 case ISD::FREM: { 3224 SDValue Op0 = Op.getOperand(0); 3225 SDValue Op1 = Op.getOperand(1); 3226 3227 APInt UndefRHS, ZeroRHS; 3228 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3229 Depth + 1)) 3230 return true; 3231 APInt UndefLHS, ZeroLHS; 3232 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3233 Depth + 1)) 3234 return true; 3235 3236 KnownZero = ZeroLHS & ZeroRHS; 3237 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 3238 3239 // Attempt to avoid multi-use ops if we don't need anything from them. 3240 // TODO - use KnownUndef to relax the demandedelts? 3241 if (!DemandedElts.isAllOnes()) 3242 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3243 return true; 3244 break; 3245 } 3246 case ISD::SHL: 3247 case ISD::SRL: 3248 case ISD::SRA: 3249 case ISD::ROTL: 3250 case ISD::ROTR: { 3251 SDValue Op0 = Op.getOperand(0); 3252 SDValue Op1 = Op.getOperand(1); 3253 3254 APInt UndefRHS, ZeroRHS; 3255 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3256 Depth + 1)) 3257 return true; 3258 APInt UndefLHS, ZeroLHS; 3259 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3260 Depth + 1)) 3261 return true; 3262 3263 KnownZero = ZeroLHS; 3264 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 3265 3266 // Attempt to avoid multi-use ops if we don't need anything from them. 3267 // TODO - use KnownUndef to relax the demandedelts? 3268 if (!DemandedElts.isAllOnes()) 3269 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3270 return true; 3271 break; 3272 } 3273 case ISD::MUL: 3274 case ISD::AND: { 3275 SDValue Op0 = Op.getOperand(0); 3276 SDValue Op1 = Op.getOperand(1); 3277 3278 APInt SrcUndef, SrcZero; 3279 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 3280 Depth + 1)) 3281 return true; 3282 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 3283 TLO, Depth + 1)) 3284 return true; 3285 3286 // If either side has a zero element, then the result element is zero, even 3287 // if the other is an UNDEF. 3288 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 3289 // and then handle 'and' nodes with the rest of the binop opcodes. 3290 KnownZero |= SrcZero; 3291 KnownUndef &= SrcUndef; 3292 KnownUndef &= ~KnownZero; 3293 3294 // Attempt to avoid multi-use ops if we don't need anything from them. 3295 // TODO - use KnownUndef to relax the demandedelts? 3296 if (!DemandedElts.isAllOnes()) 3297 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3298 return true; 3299 break; 3300 } 3301 case ISD::TRUNCATE: 3302 case ISD::SIGN_EXTEND: 3303 case ISD::ZERO_EXTEND: 3304 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 3305 KnownZero, TLO, Depth + 1)) 3306 return true; 3307 3308 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 3309 // zext(undef) upper bits are guaranteed to be zero. 3310 if (DemandedElts.isSubsetOf(KnownUndef)) 3311 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3312 KnownUndef.clearAllBits(); 3313 } 3314 break; 3315 default: { 3316 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 3317 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 3318 KnownZero, TLO, Depth)) 3319 return true; 3320 } else { 3321 KnownBits Known; 3322 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits); 3323 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 3324 TLO, Depth, AssumeSingleUse)) 3325 return true; 3326 } 3327 break; 3328 } 3329 } 3330 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 3331 3332 // Constant fold all undef cases. 3333 // TODO: Handle zero cases as well. 3334 if (DemandedElts.isSubsetOf(KnownUndef)) 3335 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 3336 3337 return false; 3338 } 3339 3340 /// Determine which of the bits specified in Mask are known to be either zero or 3341 /// one and return them in the Known. 3342 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3343 KnownBits &Known, 3344 const APInt &DemandedElts, 3345 const SelectionDAG &DAG, 3346 unsigned Depth) const { 3347 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3348 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3349 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3350 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3351 "Should use MaskedValueIsZero if you don't know whether Op" 3352 " is a target node!"); 3353 Known.resetAll(); 3354 } 3355 3356 void TargetLowering::computeKnownBitsForTargetInstr( 3357 GISelKnownBits &Analysis, Register R, KnownBits &Known, 3358 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 3359 unsigned Depth) const { 3360 Known.resetAll(); 3361 } 3362 3363 void TargetLowering::computeKnownBitsForFrameIndex( 3364 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 3365 // The low bits are known zero if the pointer is aligned. 3366 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 3367 } 3368 3369 Align TargetLowering::computeKnownAlignForTargetInstr( 3370 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 3371 unsigned Depth) const { 3372 return Align(1); 3373 } 3374 3375 /// This method can be implemented by targets that want to expose additional 3376 /// information about sign bits to the DAG Combiner. 3377 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 3378 const APInt &, 3379 const SelectionDAG &, 3380 unsigned Depth) const { 3381 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3382 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3383 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3384 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3385 "Should use ComputeNumSignBits if you don't know whether Op" 3386 " is a target node!"); 3387 return 1; 3388 } 3389 3390 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 3391 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 3392 const MachineRegisterInfo &MRI, unsigned Depth) const { 3393 return 1; 3394 } 3395 3396 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 3397 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 3398 TargetLoweringOpt &TLO, unsigned Depth) const { 3399 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3400 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3401 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3402 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3403 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 3404 " is a target node!"); 3405 return false; 3406 } 3407 3408 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 3409 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3410 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 3411 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3412 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3413 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3414 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3415 "Should use SimplifyDemandedBits if you don't know whether Op" 3416 " is a target node!"); 3417 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 3418 return false; 3419 } 3420 3421 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 3422 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3423 SelectionDAG &DAG, unsigned Depth) const { 3424 assert( 3425 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3426 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3427 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3428 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3429 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3430 " is a target node!"); 3431 return SDValue(); 3432 } 3433 3434 SDValue 3435 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3436 SDValue N1, MutableArrayRef<int> Mask, 3437 SelectionDAG &DAG) const { 3438 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3439 if (!LegalMask) { 3440 std::swap(N0, N1); 3441 ShuffleVectorSDNode::commuteMask(Mask); 3442 LegalMask = isShuffleMaskLegal(Mask, VT); 3443 } 3444 3445 if (!LegalMask) 3446 return SDValue(); 3447 3448 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3449 } 3450 3451 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3452 return nullptr; 3453 } 3454 3455 bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3456 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3457 bool PoisonOnly, unsigned Depth) const { 3458 assert( 3459 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3460 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3461 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3462 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3463 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op" 3464 " is a target node!"); 3465 return false; 3466 } 3467 3468 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3469 const SelectionDAG &DAG, 3470 bool SNaN, 3471 unsigned Depth) const { 3472 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3473 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3474 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3475 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3476 "Should use isKnownNeverNaN if you don't know whether Op" 3477 " is a target node!"); 3478 return false; 3479 } 3480 3481 bool TargetLowering::isSplatValueForTargetNode(SDValue Op, 3482 const APInt &DemandedElts, 3483 APInt &UndefElts, 3484 unsigned Depth) const { 3485 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3486 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3487 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3488 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3489 "Should use isSplatValue if you don't know whether Op" 3490 " is a target node!"); 3491 return false; 3492 } 3493 3494 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3495 // work with truncating build vectors and vectors with elements of less than 3496 // 8 bits. 3497 bool TargetLowering::isConstTrueVal(SDValue N) const { 3498 if (!N) 3499 return false; 3500 3501 unsigned EltWidth; 3502 APInt CVal; 3503 if (ConstantSDNode *CN = isConstOrConstSplat(N, /*AllowUndefs=*/false, 3504 /*AllowTruncation=*/true)) { 3505 CVal = CN->getAPIntValue(); 3506 EltWidth = N.getValueType().getScalarSizeInBits(); 3507 } else 3508 return false; 3509 3510 // If this is a truncating splat, truncate the splat value. 3511 // Otherwise, we may fail to match the expected values below. 3512 if (EltWidth < CVal.getBitWidth()) 3513 CVal = CVal.trunc(EltWidth); 3514 3515 switch (getBooleanContents(N.getValueType())) { 3516 case UndefinedBooleanContent: 3517 return CVal[0]; 3518 case ZeroOrOneBooleanContent: 3519 return CVal.isOne(); 3520 case ZeroOrNegativeOneBooleanContent: 3521 return CVal.isAllOnes(); 3522 } 3523 3524 llvm_unreachable("Invalid boolean contents"); 3525 } 3526 3527 bool TargetLowering::isConstFalseVal(SDValue N) const { 3528 if (!N) 3529 return false; 3530 3531 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3532 if (!CN) { 3533 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3534 if (!BV) 3535 return false; 3536 3537 // Only interested in constant splats, we don't care about undef 3538 // elements in identifying boolean constants and getConstantSplatNode 3539 // returns NULL if all ops are undef; 3540 CN = BV->getConstantSplatNode(); 3541 if (!CN) 3542 return false; 3543 } 3544 3545 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3546 return !CN->getAPIntValue()[0]; 3547 3548 return CN->isZero(); 3549 } 3550 3551 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3552 bool SExt) const { 3553 if (VT == MVT::i1) 3554 return N->isOne(); 3555 3556 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3557 switch (Cnt) { 3558 case TargetLowering::ZeroOrOneBooleanContent: 3559 // An extended value of 1 is always true, unless its original type is i1, 3560 // in which case it will be sign extended to -1. 3561 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3562 case TargetLowering::UndefinedBooleanContent: 3563 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3564 return N->isAllOnes() && SExt; 3565 } 3566 llvm_unreachable("Unexpected enumeration."); 3567 } 3568 3569 /// This helper function of SimplifySetCC tries to optimize the comparison when 3570 /// either operand of the SetCC node is a bitwise-and instruction. 3571 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3572 ISD::CondCode Cond, const SDLoc &DL, 3573 DAGCombinerInfo &DCI) const { 3574 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3575 std::swap(N0, N1); 3576 3577 SelectionDAG &DAG = DCI.DAG; 3578 EVT OpVT = N0.getValueType(); 3579 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3580 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3581 return SDValue(); 3582 3583 // (X & Y) != 0 --> zextOrTrunc(X & Y) 3584 // iff everything but LSB is known zero: 3585 if (Cond == ISD::SETNE && isNullConstant(N1) && 3586 (getBooleanContents(OpVT) == TargetLowering::UndefinedBooleanContent || 3587 getBooleanContents(OpVT) == TargetLowering::ZeroOrOneBooleanContent)) { 3588 unsigned NumEltBits = OpVT.getScalarSizeInBits(); 3589 APInt UpperBits = APInt::getHighBitsSet(NumEltBits, NumEltBits - 1); 3590 if (DAG.MaskedValueIsZero(N0, UpperBits)) 3591 return DAG.getBoolExtOrTrunc(N0, DL, VT, OpVT); 3592 } 3593 3594 // Match these patterns in any of their permutations: 3595 // (X & Y) == Y 3596 // (X & Y) != Y 3597 SDValue X, Y; 3598 if (N0.getOperand(0) == N1) { 3599 X = N0.getOperand(1); 3600 Y = N0.getOperand(0); 3601 } else if (N0.getOperand(1) == N1) { 3602 X = N0.getOperand(0); 3603 Y = N0.getOperand(1); 3604 } else { 3605 return SDValue(); 3606 } 3607 3608 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3609 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3610 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3611 // Note that where Y is variable and is known to have at most one bit set 3612 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3613 // equivalent when Y == 0. 3614 assert(OpVT.isInteger()); 3615 Cond = ISD::getSetCCInverse(Cond, OpVT); 3616 if (DCI.isBeforeLegalizeOps() || 3617 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3618 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3619 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3620 // If the target supports an 'and-not' or 'and-complement' logic operation, 3621 // try to use that to make a comparison operation more efficient. 3622 // But don't do this transform if the mask is a single bit because there are 3623 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3624 // 'rlwinm' on PPC). 3625 3626 // Bail out if the compare operand that we want to turn into a zero is 3627 // already a zero (otherwise, infinite loop). 3628 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3629 if (YConst && YConst->isZero()) 3630 return SDValue(); 3631 3632 // Transform this into: ~X & Y == 0. 3633 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3634 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3635 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3636 } 3637 3638 return SDValue(); 3639 } 3640 3641 /// There are multiple IR patterns that could be checking whether certain 3642 /// truncation of a signed number would be lossy or not. The pattern which is 3643 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3644 /// We are looking for the following pattern: (KeptBits is a constant) 3645 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3646 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3647 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3648 /// We will unfold it into the natural trunc+sext pattern: 3649 /// ((%x << C) a>> C) dstcond %x 3650 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3651 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3652 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3653 const SDLoc &DL) const { 3654 // We must be comparing with a constant. 3655 ConstantSDNode *C1; 3656 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3657 return SDValue(); 3658 3659 // N0 should be: add %x, (1 << (KeptBits-1)) 3660 if (N0->getOpcode() != ISD::ADD) 3661 return SDValue(); 3662 3663 // And we must be 'add'ing a constant. 3664 ConstantSDNode *C01; 3665 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3666 return SDValue(); 3667 3668 SDValue X = N0->getOperand(0); 3669 EVT XVT = X.getValueType(); 3670 3671 // Validate constants ... 3672 3673 APInt I1 = C1->getAPIntValue(); 3674 3675 ISD::CondCode NewCond; 3676 if (Cond == ISD::CondCode::SETULT) { 3677 NewCond = ISD::CondCode::SETEQ; 3678 } else if (Cond == ISD::CondCode::SETULE) { 3679 NewCond = ISD::CondCode::SETEQ; 3680 // But need to 'canonicalize' the constant. 3681 I1 += 1; 3682 } else if (Cond == ISD::CondCode::SETUGT) { 3683 NewCond = ISD::CondCode::SETNE; 3684 // But need to 'canonicalize' the constant. 3685 I1 += 1; 3686 } else if (Cond == ISD::CondCode::SETUGE) { 3687 NewCond = ISD::CondCode::SETNE; 3688 } else 3689 return SDValue(); 3690 3691 APInt I01 = C01->getAPIntValue(); 3692 3693 auto checkConstants = [&I1, &I01]() -> bool { 3694 // Both of them must be power-of-two, and the constant from setcc is bigger. 3695 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3696 }; 3697 3698 if (checkConstants()) { 3699 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3700 } else { 3701 // What if we invert constants? (and the target predicate) 3702 I1.negate(); 3703 I01.negate(); 3704 assert(XVT.isInteger()); 3705 NewCond = getSetCCInverse(NewCond, XVT); 3706 if (!checkConstants()) 3707 return SDValue(); 3708 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3709 } 3710 3711 // They are power-of-two, so which bit is set? 3712 const unsigned KeptBits = I1.logBase2(); 3713 const unsigned KeptBitsMinusOne = I01.logBase2(); 3714 3715 // Magic! 3716 if (KeptBits != (KeptBitsMinusOne + 1)) 3717 return SDValue(); 3718 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3719 3720 // We don't want to do this in every single case. 3721 SelectionDAG &DAG = DCI.DAG; 3722 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3723 XVT, KeptBits)) 3724 return SDValue(); 3725 3726 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3727 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3728 3729 // Unfold into: ((%x << C) a>> C) cond %x 3730 // Where 'cond' will be either 'eq' or 'ne'. 3731 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3732 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3733 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3734 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3735 3736 return T2; 3737 } 3738 3739 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3740 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3741 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3742 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3743 assert(isConstOrConstSplat(N1C) && 3744 isConstOrConstSplat(N1C)->getAPIntValue().isZero() && 3745 "Should be a comparison with 0."); 3746 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3747 "Valid only for [in]equality comparisons."); 3748 3749 unsigned NewShiftOpcode; 3750 SDValue X, C, Y; 3751 3752 SelectionDAG &DAG = DCI.DAG; 3753 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3754 3755 // Look for '(C l>>/<< Y)'. 3756 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3757 // The shift should be one-use. 3758 if (!V.hasOneUse()) 3759 return false; 3760 unsigned OldShiftOpcode = V.getOpcode(); 3761 switch (OldShiftOpcode) { 3762 case ISD::SHL: 3763 NewShiftOpcode = ISD::SRL; 3764 break; 3765 case ISD::SRL: 3766 NewShiftOpcode = ISD::SHL; 3767 break; 3768 default: 3769 return false; // must be a logical shift. 3770 } 3771 // We should be shifting a constant. 3772 // FIXME: best to use isConstantOrConstantVector(). 3773 C = V.getOperand(0); 3774 ConstantSDNode *CC = 3775 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3776 if (!CC) 3777 return false; 3778 Y = V.getOperand(1); 3779 3780 ConstantSDNode *XC = 3781 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3782 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3783 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3784 }; 3785 3786 // LHS of comparison should be an one-use 'and'. 3787 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3788 return SDValue(); 3789 3790 X = N0.getOperand(0); 3791 SDValue Mask = N0.getOperand(1); 3792 3793 // 'and' is commutative! 3794 if (!Match(Mask)) { 3795 std::swap(X, Mask); 3796 if (!Match(Mask)) 3797 return SDValue(); 3798 } 3799 3800 EVT VT = X.getValueType(); 3801 3802 // Produce: 3803 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3804 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3805 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3806 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3807 return T2; 3808 } 3809 3810 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3811 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3812 /// handle the commuted versions of these patterns. 3813 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3814 ISD::CondCode Cond, const SDLoc &DL, 3815 DAGCombinerInfo &DCI) const { 3816 unsigned BOpcode = N0.getOpcode(); 3817 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3818 "Unexpected binop"); 3819 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3820 3821 // (X + Y) == X --> Y == 0 3822 // (X - Y) == X --> Y == 0 3823 // (X ^ Y) == X --> Y == 0 3824 SelectionDAG &DAG = DCI.DAG; 3825 EVT OpVT = N0.getValueType(); 3826 SDValue X = N0.getOperand(0); 3827 SDValue Y = N0.getOperand(1); 3828 if (X == N1) 3829 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3830 3831 if (Y != N1) 3832 return SDValue(); 3833 3834 // (X + Y) == Y --> X == 0 3835 // (X ^ Y) == Y --> X == 0 3836 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3837 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3838 3839 // The shift would not be valid if the operands are boolean (i1). 3840 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3841 return SDValue(); 3842 3843 // (X - Y) == Y --> X == Y << 1 3844 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3845 !DCI.isBeforeLegalize()); 3846 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3847 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3848 if (!DCI.isCalledByLegalizer()) 3849 DCI.AddToWorklist(YShl1.getNode()); 3850 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3851 } 3852 3853 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3854 SDValue N0, const APInt &C1, 3855 ISD::CondCode Cond, const SDLoc &dl, 3856 SelectionDAG &DAG) { 3857 // Look through truncs that don't change the value of a ctpop. 3858 // FIXME: Add vector support? Need to be careful with setcc result type below. 3859 SDValue CTPOP = N0; 3860 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3861 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3862 CTPOP = N0.getOperand(0); 3863 3864 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3865 return SDValue(); 3866 3867 EVT CTVT = CTPOP.getValueType(); 3868 SDValue CTOp = CTPOP.getOperand(0); 3869 3870 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3871 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3872 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3873 return SDValue(); 3874 3875 // (ctpop x) u< 2 -> (x & x-1) == 0 3876 // (ctpop x) u> 1 -> (x & x-1) != 0 3877 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 3878 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 3879 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 3880 return SDValue(); 3881 if (C1 == 0 && (Cond == ISD::SETULT)) 3882 return SDValue(); // This is handled elsewhere. 3883 3884 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 3885 3886 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3887 SDValue Result = CTOp; 3888 for (unsigned i = 0; i < Passes; i++) { 3889 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 3890 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 3891 } 3892 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3893 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 3894 } 3895 3896 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3897 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3898 // For scalars, keep CTPOP if it is legal or custom. 3899 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 3900 return SDValue(); 3901 // This is based on X86's custom lowering for CTPOP which produces more 3902 // instructions than the expansion here. 3903 3904 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3905 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3906 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3907 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3908 assert(CTVT.isInteger()); 3909 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3910 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3911 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3912 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3913 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3914 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3915 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3916 } 3917 3918 return SDValue(); 3919 } 3920 3921 static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, 3922 ISD::CondCode Cond, const SDLoc &dl, 3923 SelectionDAG &DAG) { 3924 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 3925 return SDValue(); 3926 3927 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 3928 if (!C1 || !(C1->isZero() || C1->isAllOnes())) 3929 return SDValue(); 3930 3931 auto getRotateSource = [](SDValue X) { 3932 if (X.getOpcode() == ISD::ROTL || X.getOpcode() == ISD::ROTR) 3933 return X.getOperand(0); 3934 return SDValue(); 3935 }; 3936 3937 // Peek through a rotated value compared against 0 or -1: 3938 // (rot X, Y) == 0/-1 --> X == 0/-1 3939 // (rot X, Y) != 0/-1 --> X != 0/-1 3940 if (SDValue R = getRotateSource(N0)) 3941 return DAG.getSetCC(dl, VT, R, N1, Cond); 3942 3943 // Peek through an 'or' of a rotated value compared against 0: 3944 // or (rot X, Y), Z ==/!= 0 --> (or X, Z) ==/!= 0 3945 // or Z, (rot X, Y) ==/!= 0 --> (or X, Z) ==/!= 0 3946 // 3947 // TODO: Add the 'and' with -1 sibling. 3948 // TODO: Recurse through a series of 'or' ops to find the rotate. 3949 EVT OpVT = N0.getValueType(); 3950 if (N0.hasOneUse() && N0.getOpcode() == ISD::OR && C1->isZero()) { 3951 if (SDValue R = getRotateSource(N0.getOperand(0))) { 3952 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(1)); 3953 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3954 } 3955 if (SDValue R = getRotateSource(N0.getOperand(1))) { 3956 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(0)); 3957 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 3958 } 3959 } 3960 3961 return SDValue(); 3962 } 3963 3964 static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, 3965 ISD::CondCode Cond, const SDLoc &dl, 3966 SelectionDAG &DAG) { 3967 // If we are testing for all-bits-clear, we might be able to do that with 3968 // less shifting since bit-order does not matter. 3969 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 3970 return SDValue(); 3971 3972 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 3973 if (!C1 || !C1->isZero()) 3974 return SDValue(); 3975 3976 if (!N0.hasOneUse() || 3977 (N0.getOpcode() != ISD::FSHL && N0.getOpcode() != ISD::FSHR)) 3978 return SDValue(); 3979 3980 unsigned BitWidth = N0.getScalarValueSizeInBits(); 3981 auto *ShAmtC = isConstOrConstSplat(N0.getOperand(2)); 3982 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 3983 return SDValue(); 3984 3985 // Canonicalize fshr as fshl to reduce pattern-matching. 3986 unsigned ShAmt = ShAmtC->getZExtValue(); 3987 if (N0.getOpcode() == ISD::FSHR) 3988 ShAmt = BitWidth - ShAmt; 3989 3990 // Match an 'or' with a specific operand 'Other' in either commuted variant. 3991 SDValue X, Y; 3992 auto matchOr = [&X, &Y](SDValue Or, SDValue Other) { 3993 if (Or.getOpcode() != ISD::OR || !Or.hasOneUse()) 3994 return false; 3995 if (Or.getOperand(0) == Other) { 3996 X = Or.getOperand(0); 3997 Y = Or.getOperand(1); 3998 return true; 3999 } 4000 if (Or.getOperand(1) == Other) { 4001 X = Or.getOperand(1); 4002 Y = Or.getOperand(0); 4003 return true; 4004 } 4005 return false; 4006 }; 4007 4008 EVT OpVT = N0.getValueType(); 4009 EVT ShAmtVT = N0.getOperand(2).getValueType(); 4010 SDValue F0 = N0.getOperand(0); 4011 SDValue F1 = N0.getOperand(1); 4012 if (matchOr(F0, F1)) { 4013 // fshl (or X, Y), X, C ==/!= 0 --> or (shl Y, C), X ==/!= 0 4014 SDValue NewShAmt = DAG.getConstant(ShAmt, dl, ShAmtVT); 4015 SDValue Shift = DAG.getNode(ISD::SHL, dl, OpVT, Y, NewShAmt); 4016 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4017 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4018 } 4019 if (matchOr(F1, F0)) { 4020 // fshl X, (or X, Y), C ==/!= 0 --> or (srl Y, BW-C), X ==/!= 0 4021 SDValue NewShAmt = DAG.getConstant(BitWidth - ShAmt, dl, ShAmtVT); 4022 SDValue Shift = DAG.getNode(ISD::SRL, dl, OpVT, Y, NewShAmt); 4023 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4024 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4025 } 4026 4027 return SDValue(); 4028 } 4029 4030 /// Try to simplify a setcc built with the specified operands and cc. If it is 4031 /// unable to simplify it, return a null SDValue. 4032 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 4033 ISD::CondCode Cond, bool foldBooleans, 4034 DAGCombinerInfo &DCI, 4035 const SDLoc &dl) const { 4036 SelectionDAG &DAG = DCI.DAG; 4037 const DataLayout &Layout = DAG.getDataLayout(); 4038 EVT OpVT = N0.getValueType(); 4039 4040 // Constant fold or commute setcc. 4041 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 4042 return Fold; 4043 4044 bool N0ConstOrSplat = 4045 isConstOrConstSplat(N0, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4046 bool N1ConstOrSplat = 4047 isConstOrConstSplat(N1, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4048 4049 // Ensure that the constant occurs on the RHS and fold constant comparisons. 4050 // TODO: Handle non-splat vector constants. All undef causes trouble. 4051 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 4052 // infinite loop here when we encounter one. 4053 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 4054 if (N0ConstOrSplat && (!OpVT.isScalableVector() || !N1ConstOrSplat) && 4055 (DCI.isBeforeLegalizeOps() || 4056 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 4057 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4058 4059 // If we have a subtract with the same 2 non-constant operands as this setcc 4060 // -- but in reverse order -- then try to commute the operands of this setcc 4061 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 4062 // instruction on some targets. 4063 if (!N0ConstOrSplat && !N1ConstOrSplat && 4064 (DCI.isBeforeLegalizeOps() || 4065 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 4066 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 4067 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 4068 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4069 4070 if (SDValue V = foldSetCCWithRotate(VT, N0, N1, Cond, dl, DAG)) 4071 return V; 4072 4073 if (SDValue V = foldSetCCWithFunnelShift(VT, N0, N1, Cond, dl, DAG)) 4074 return V; 4075 4076 if (auto *N1C = isConstOrConstSplat(N1)) { 4077 const APInt &C1 = N1C->getAPIntValue(); 4078 4079 // Optimize some CTPOP cases. 4080 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 4081 return V; 4082 4083 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 4084 // equality comparison, then we're just comparing whether X itself is 4085 // zero. 4086 if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && 4087 N0.getOperand(0).getOpcode() == ISD::CTLZ && 4088 isPowerOf2_32(N0.getScalarValueSizeInBits())) { 4089 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 4090 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4091 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 4092 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 4093 // (srl (ctlz x), 5) == 0 -> X != 0 4094 // (srl (ctlz x), 5) != 1 -> X != 0 4095 Cond = ISD::SETNE; 4096 } else { 4097 // (srl (ctlz x), 5) != 0 -> X == 0 4098 // (srl (ctlz x), 5) == 1 -> X == 0 4099 Cond = ISD::SETEQ; 4100 } 4101 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 4102 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 4103 Cond); 4104 } 4105 } 4106 } 4107 } 4108 4109 // FIXME: Support vectors. 4110 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4111 const APInt &C1 = N1C->getAPIntValue(); 4112 4113 // (zext x) == C --> x == (trunc C) 4114 // (sext x) == C --> x == (trunc C) 4115 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4116 DCI.isBeforeLegalize() && N0->hasOneUse()) { 4117 unsigned MinBits = N0.getValueSizeInBits(); 4118 SDValue PreExt; 4119 bool Signed = false; 4120 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 4121 // ZExt 4122 MinBits = N0->getOperand(0).getValueSizeInBits(); 4123 PreExt = N0->getOperand(0); 4124 } else if (N0->getOpcode() == ISD::AND) { 4125 // DAGCombine turns costly ZExts into ANDs 4126 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 4127 if ((C->getAPIntValue()+1).isPowerOf2()) { 4128 MinBits = C->getAPIntValue().countTrailingOnes(); 4129 PreExt = N0->getOperand(0); 4130 } 4131 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 4132 // SExt 4133 MinBits = N0->getOperand(0).getValueSizeInBits(); 4134 PreExt = N0->getOperand(0); 4135 Signed = true; 4136 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 4137 // ZEXTLOAD / SEXTLOAD 4138 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 4139 MinBits = LN0->getMemoryVT().getSizeInBits(); 4140 PreExt = N0; 4141 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 4142 Signed = true; 4143 MinBits = LN0->getMemoryVT().getSizeInBits(); 4144 PreExt = N0; 4145 } 4146 } 4147 4148 // Figure out how many bits we need to preserve this constant. 4149 unsigned ReqdBits = Signed ? C1.getMinSignedBits() : C1.getActiveBits(); 4150 4151 // Make sure we're not losing bits from the constant. 4152 if (MinBits > 0 && 4153 MinBits < C1.getBitWidth() && 4154 MinBits >= ReqdBits) { 4155 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 4156 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 4157 // Will get folded away. 4158 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 4159 if (MinBits == 1 && C1 == 1) 4160 // Invert the condition. 4161 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 4162 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4163 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 4164 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 4165 } 4166 4167 // If truncating the setcc operands is not desirable, we can still 4168 // simplify the expression in some cases: 4169 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 4170 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 4171 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 4172 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 4173 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 4174 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 4175 SDValue TopSetCC = N0->getOperand(0); 4176 unsigned N0Opc = N0->getOpcode(); 4177 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 4178 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 4179 TopSetCC.getOpcode() == ISD::SETCC && 4180 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 4181 (isConstFalseVal(N1) || 4182 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 4183 4184 bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) || 4185 (!N1C->isZero() && Cond == ISD::SETNE); 4186 4187 if (!Inverse) 4188 return TopSetCC; 4189 4190 ISD::CondCode InvCond = ISD::getSetCCInverse( 4191 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 4192 TopSetCC.getOperand(0).getValueType()); 4193 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 4194 TopSetCC.getOperand(1), 4195 InvCond); 4196 } 4197 } 4198 } 4199 4200 // If the LHS is '(and load, const)', the RHS is 0, the test is for 4201 // equality or unsigned, and all 1 bits of the const are in the same 4202 // partial word, see if we can shorten the load. 4203 if (DCI.isBeforeLegalize() && 4204 !ISD::isSignedIntSetCC(Cond) && 4205 N0.getOpcode() == ISD::AND && C1 == 0 && 4206 N0.getNode()->hasOneUse() && 4207 isa<LoadSDNode>(N0.getOperand(0)) && 4208 N0.getOperand(0).getNode()->hasOneUse() && 4209 isa<ConstantSDNode>(N0.getOperand(1))) { 4210 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 4211 APInt bestMask; 4212 unsigned bestWidth = 0, bestOffset = 0; 4213 if (Lod->isSimple() && Lod->isUnindexed()) { 4214 unsigned origWidth = N0.getValueSizeInBits(); 4215 unsigned maskWidth = origWidth; 4216 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 4217 // 8 bits, but have to be careful... 4218 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 4219 origWidth = Lod->getMemoryVT().getSizeInBits(); 4220 const APInt &Mask = N0.getConstantOperandAPInt(1); 4221 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 4222 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 4223 for (unsigned offset=0; offset<origWidth/width; offset++) { 4224 if (Mask.isSubsetOf(newMask)) { 4225 if (Layout.isLittleEndian()) 4226 bestOffset = (uint64_t)offset * (width/8); 4227 else 4228 bestOffset = (origWidth/width - offset - 1) * (width/8); 4229 bestMask = Mask.lshr(offset * (width/8) * 8); 4230 bestWidth = width; 4231 break; 4232 } 4233 newMask <<= width; 4234 } 4235 } 4236 } 4237 if (bestWidth) { 4238 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 4239 if (newVT.isRound() && 4240 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 4241 SDValue Ptr = Lod->getBasePtr(); 4242 if (bestOffset != 0) 4243 Ptr = 4244 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 4245 SDValue NewLoad = 4246 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 4247 Lod->getPointerInfo().getWithOffset(bestOffset), 4248 Lod->getOriginalAlign()); 4249 return DAG.getSetCC(dl, VT, 4250 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 4251 DAG.getConstant(bestMask.trunc(bestWidth), 4252 dl, newVT)), 4253 DAG.getConstant(0LL, dl, newVT), Cond); 4254 } 4255 } 4256 } 4257 4258 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 4259 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 4260 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 4261 4262 // If the comparison constant has bits in the upper part, the 4263 // zero-extended value could never match. 4264 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 4265 C1.getBitWidth() - InSize))) { 4266 switch (Cond) { 4267 case ISD::SETUGT: 4268 case ISD::SETUGE: 4269 case ISD::SETEQ: 4270 return DAG.getConstant(0, dl, VT); 4271 case ISD::SETULT: 4272 case ISD::SETULE: 4273 case ISD::SETNE: 4274 return DAG.getConstant(1, dl, VT); 4275 case ISD::SETGT: 4276 case ISD::SETGE: 4277 // True if the sign bit of C1 is set. 4278 return DAG.getConstant(C1.isNegative(), dl, VT); 4279 case ISD::SETLT: 4280 case ISD::SETLE: 4281 // True if the sign bit of C1 isn't set. 4282 return DAG.getConstant(C1.isNonNegative(), dl, VT); 4283 default: 4284 break; 4285 } 4286 } 4287 4288 // Otherwise, we can perform the comparison with the low bits. 4289 switch (Cond) { 4290 case ISD::SETEQ: 4291 case ISD::SETNE: 4292 case ISD::SETUGT: 4293 case ISD::SETUGE: 4294 case ISD::SETULT: 4295 case ISD::SETULE: { 4296 EVT newVT = N0.getOperand(0).getValueType(); 4297 if (DCI.isBeforeLegalizeOps() || 4298 (isOperationLegal(ISD::SETCC, newVT) && 4299 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 4300 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 4301 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 4302 4303 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 4304 NewConst, Cond); 4305 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 4306 } 4307 break; 4308 } 4309 default: 4310 break; // todo, be more careful with signed comparisons 4311 } 4312 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 4313 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4314 !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(), 4315 OpVT)) { 4316 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 4317 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 4318 EVT ExtDstTy = N0.getValueType(); 4319 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 4320 4321 // If the constant doesn't fit into the number of bits for the source of 4322 // the sign extension, it is impossible for both sides to be equal. 4323 if (C1.getMinSignedBits() > ExtSrcTyBits) 4324 return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT); 4325 4326 assert(ExtDstTy == N0.getOperand(0).getValueType() && 4327 ExtDstTy != ExtSrcTy && "Unexpected types!"); 4328 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 4329 SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0), 4330 DAG.getConstant(Imm, dl, ExtDstTy)); 4331 if (!DCI.isCalledByLegalizer()) 4332 DCI.AddToWorklist(ZextOp.getNode()); 4333 // Otherwise, make this a use of a zext. 4334 return DAG.getSetCC(dl, VT, ZextOp, 4335 DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond); 4336 } else if ((N1C->isZero() || N1C->isOne()) && 4337 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4338 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 4339 if (N0.getOpcode() == ISD::SETCC && 4340 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 4341 (N0.getValueType() == MVT::i1 || 4342 getBooleanContents(N0.getOperand(0).getValueType()) == 4343 ZeroOrOneBooleanContent)) { 4344 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 4345 if (TrueWhenTrue) 4346 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 4347 // Invert the condition. 4348 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4349 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 4350 if (DCI.isBeforeLegalizeOps() || 4351 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 4352 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 4353 } 4354 4355 if ((N0.getOpcode() == ISD::XOR || 4356 (N0.getOpcode() == ISD::AND && 4357 N0.getOperand(0).getOpcode() == ISD::XOR && 4358 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 4359 isOneConstant(N0.getOperand(1))) { 4360 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 4361 // can only do this if the top bits are known zero. 4362 unsigned BitWidth = N0.getValueSizeInBits(); 4363 if (DAG.MaskedValueIsZero(N0, 4364 APInt::getHighBitsSet(BitWidth, 4365 BitWidth-1))) { 4366 // Okay, get the un-inverted input value. 4367 SDValue Val; 4368 if (N0.getOpcode() == ISD::XOR) { 4369 Val = N0.getOperand(0); 4370 } else { 4371 assert(N0.getOpcode() == ISD::AND && 4372 N0.getOperand(0).getOpcode() == ISD::XOR); 4373 // ((X^1)&1)^1 -> X & 1 4374 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 4375 N0.getOperand(0).getOperand(0), 4376 N0.getOperand(1)); 4377 } 4378 4379 return DAG.getSetCC(dl, VT, Val, N1, 4380 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4381 } 4382 } else if (N1C->isOne()) { 4383 SDValue Op0 = N0; 4384 if (Op0.getOpcode() == ISD::TRUNCATE) 4385 Op0 = Op0.getOperand(0); 4386 4387 if ((Op0.getOpcode() == ISD::XOR) && 4388 Op0.getOperand(0).getOpcode() == ISD::SETCC && 4389 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 4390 SDValue XorLHS = Op0.getOperand(0); 4391 SDValue XorRHS = Op0.getOperand(1); 4392 // Ensure that the input setccs return an i1 type or 0/1 value. 4393 if (Op0.getValueType() == MVT::i1 || 4394 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 4395 ZeroOrOneBooleanContent && 4396 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 4397 ZeroOrOneBooleanContent)) { 4398 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 4399 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 4400 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 4401 } 4402 } 4403 if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) { 4404 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 4405 if (Op0.getValueType().bitsGT(VT)) 4406 Op0 = DAG.getNode(ISD::AND, dl, VT, 4407 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 4408 DAG.getConstant(1, dl, VT)); 4409 else if (Op0.getValueType().bitsLT(VT)) 4410 Op0 = DAG.getNode(ISD::AND, dl, VT, 4411 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 4412 DAG.getConstant(1, dl, VT)); 4413 4414 return DAG.getSetCC(dl, VT, Op0, 4415 DAG.getConstant(0, dl, Op0.getValueType()), 4416 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4417 } 4418 if (Op0.getOpcode() == ISD::AssertZext && 4419 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 4420 return DAG.getSetCC(dl, VT, Op0, 4421 DAG.getConstant(0, dl, Op0.getValueType()), 4422 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4423 } 4424 } 4425 4426 // Given: 4427 // icmp eq/ne (urem %x, %y), 0 4428 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 4429 // icmp eq/ne %x, 0 4430 if (N0.getOpcode() == ISD::UREM && N1C->isZero() && 4431 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4432 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 4433 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 4434 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 4435 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 4436 } 4437 4438 // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0 4439 // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0 4440 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4441 N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) && 4442 N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 && 4443 N1C && N1C->isAllOnes()) { 4444 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4445 DAG.getConstant(0, dl, OpVT), 4446 Cond == ISD::SETEQ ? ISD::SETLT : ISD::SETGE); 4447 } 4448 4449 if (SDValue V = 4450 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 4451 return V; 4452 } 4453 4454 // These simplifications apply to splat vectors as well. 4455 // TODO: Handle more splat vector cases. 4456 if (auto *N1C = isConstOrConstSplat(N1)) { 4457 const APInt &C1 = N1C->getAPIntValue(); 4458 4459 APInt MinVal, MaxVal; 4460 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 4461 if (ISD::isSignedIntSetCC(Cond)) { 4462 MinVal = APInt::getSignedMinValue(OperandBitSize); 4463 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 4464 } else { 4465 MinVal = APInt::getMinValue(OperandBitSize); 4466 MaxVal = APInt::getMaxValue(OperandBitSize); 4467 } 4468 4469 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 4470 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 4471 // X >= MIN --> true 4472 if (C1 == MinVal) 4473 return DAG.getBoolConstant(true, dl, VT, OpVT); 4474 4475 if (!VT.isVector()) { // TODO: Support this for vectors. 4476 // X >= C0 --> X > (C0 - 1) 4477 APInt C = C1 - 1; 4478 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 4479 if ((DCI.isBeforeLegalizeOps() || 4480 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4481 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4482 isLegalICmpImmediate(C.getSExtValue())))) { 4483 return DAG.getSetCC(dl, VT, N0, 4484 DAG.getConstant(C, dl, N1.getValueType()), 4485 NewCC); 4486 } 4487 } 4488 } 4489 4490 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 4491 // X <= MAX --> true 4492 if (C1 == MaxVal) 4493 return DAG.getBoolConstant(true, dl, VT, OpVT); 4494 4495 // X <= C0 --> X < (C0 + 1) 4496 if (!VT.isVector()) { // TODO: Support this for vectors. 4497 APInt C = C1 + 1; 4498 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 4499 if ((DCI.isBeforeLegalizeOps() || 4500 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4501 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4502 isLegalICmpImmediate(C.getSExtValue())))) { 4503 return DAG.getSetCC(dl, VT, N0, 4504 DAG.getConstant(C, dl, N1.getValueType()), 4505 NewCC); 4506 } 4507 } 4508 } 4509 4510 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 4511 if (C1 == MinVal) 4512 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 4513 4514 // TODO: Support this for vectors after legalize ops. 4515 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4516 // Canonicalize setlt X, Max --> setne X, Max 4517 if (C1 == MaxVal) 4518 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4519 4520 // If we have setult X, 1, turn it into seteq X, 0 4521 if (C1 == MinVal+1) 4522 return DAG.getSetCC(dl, VT, N0, 4523 DAG.getConstant(MinVal, dl, N0.getValueType()), 4524 ISD::SETEQ); 4525 } 4526 } 4527 4528 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 4529 if (C1 == MaxVal) 4530 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 4531 4532 // TODO: Support this for vectors after legalize ops. 4533 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4534 // Canonicalize setgt X, Min --> setne X, Min 4535 if (C1 == MinVal) 4536 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4537 4538 // If we have setugt X, Max-1, turn it into seteq X, Max 4539 if (C1 == MaxVal-1) 4540 return DAG.getSetCC(dl, VT, N0, 4541 DAG.getConstant(MaxVal, dl, N0.getValueType()), 4542 ISD::SETEQ); 4543 } 4544 } 4545 4546 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 4547 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4548 if (C1.isZero()) 4549 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 4550 VT, N0, N1, Cond, DCI, dl)) 4551 return CC; 4552 4553 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 4554 // For example, when high 32-bits of i64 X are known clear: 4555 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 4556 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 4557 bool CmpZero = N1C->getAPIntValue().isZero(); 4558 bool CmpNegOne = N1C->getAPIntValue().isAllOnes(); 4559 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 4560 // Match or(lo,shl(hi,bw/2)) pattern. 4561 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 4562 unsigned EltBits = V.getScalarValueSizeInBits(); 4563 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 4564 return false; 4565 SDValue LHS = V.getOperand(0); 4566 SDValue RHS = V.getOperand(1); 4567 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 4568 // Unshifted element must have zero upperbits. 4569 if (RHS.getOpcode() == ISD::SHL && 4570 isa<ConstantSDNode>(RHS.getOperand(1)) && 4571 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4572 DAG.MaskedValueIsZero(LHS, HiBits)) { 4573 Lo = LHS; 4574 Hi = RHS.getOperand(0); 4575 return true; 4576 } 4577 if (LHS.getOpcode() == ISD::SHL && 4578 isa<ConstantSDNode>(LHS.getOperand(1)) && 4579 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4580 DAG.MaskedValueIsZero(RHS, HiBits)) { 4581 Lo = RHS; 4582 Hi = LHS.getOperand(0); 4583 return true; 4584 } 4585 return false; 4586 }; 4587 4588 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4589 unsigned EltBits = N0.getScalarValueSizeInBits(); 4590 unsigned HalfBits = EltBits / 2; 4591 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4592 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4593 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4594 SDValue NewN0 = 4595 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4596 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4597 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4598 }; 4599 4600 SDValue Lo, Hi; 4601 if (IsConcat(N0, Lo, Hi)) 4602 return MergeConcat(Lo, Hi); 4603 4604 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4605 SDValue Lo0, Lo1, Hi0, Hi1; 4606 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4607 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4608 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4609 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4610 } 4611 } 4612 } 4613 } 4614 4615 // If we have "setcc X, C0", check to see if we can shrink the immediate 4616 // by changing cc. 4617 // TODO: Support this for vectors after legalize ops. 4618 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4619 // SETUGT X, SINTMAX -> SETLT X, 0 4620 // SETUGE X, SINTMIN -> SETLT X, 0 4621 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4622 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4623 return DAG.getSetCC(dl, VT, N0, 4624 DAG.getConstant(0, dl, N1.getValueType()), 4625 ISD::SETLT); 4626 4627 // SETULT X, SINTMIN -> SETGT X, -1 4628 // SETULE X, SINTMAX -> SETGT X, -1 4629 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4630 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4631 return DAG.getSetCC(dl, VT, N0, 4632 DAG.getAllOnesConstant(dl, N1.getValueType()), 4633 ISD::SETGT); 4634 } 4635 } 4636 4637 // Back to non-vector simplifications. 4638 // TODO: Can we do these for vector splats? 4639 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4640 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4641 const APInt &C1 = N1C->getAPIntValue(); 4642 EVT ShValTy = N0.getValueType(); 4643 4644 // Fold bit comparisons when we can. This will result in an 4645 // incorrect value when boolean false is negative one, unless 4646 // the bitsize is 1 in which case the false value is the same 4647 // in practice regardless of the representation. 4648 if ((VT.getSizeInBits() == 1 || 4649 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4650 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4651 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4652 N0.getOpcode() == ISD::AND) { 4653 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4654 EVT ShiftTy = 4655 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4656 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4657 // Perform the xform if the AND RHS is a single bit. 4658 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4659 if (AndRHS->getAPIntValue().isPowerOf2() && 4660 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4661 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4662 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4663 DAG.getConstant(ShCt, dl, ShiftTy))); 4664 } 4665 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4666 // (X & 8) == 8 --> (X & 8) >> 3 4667 // Perform the xform if C1 is a single bit. 4668 unsigned ShCt = C1.logBase2(); 4669 if (C1.isPowerOf2() && 4670 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4671 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4672 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4673 DAG.getConstant(ShCt, dl, ShiftTy))); 4674 } 4675 } 4676 } 4677 } 4678 4679 if (C1.getMinSignedBits() <= 64 && 4680 !isLegalICmpImmediate(C1.getSExtValue())) { 4681 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4682 // (X & -256) == 256 -> (X >> 8) == 1 4683 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4684 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4685 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4686 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4687 if (AndRHSC.isNegatedPowerOf2() && (AndRHSC & C1) == C1) { 4688 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4689 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4690 SDValue Shift = 4691 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4692 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4693 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4694 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4695 } 4696 } 4697 } 4698 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4699 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4700 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4701 // X < 0x100000000 -> (X >> 32) < 1 4702 // X >= 0x100000000 -> (X >> 32) >= 1 4703 // X <= 0x0ffffffff -> (X >> 32) < 1 4704 // X > 0x0ffffffff -> (X >> 32) >= 1 4705 unsigned ShiftBits; 4706 APInt NewC = C1; 4707 ISD::CondCode NewCond = Cond; 4708 if (AdjOne) { 4709 ShiftBits = C1.countTrailingOnes(); 4710 NewC = NewC + 1; 4711 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4712 } else { 4713 ShiftBits = C1.countTrailingZeros(); 4714 } 4715 NewC.lshrInPlace(ShiftBits); 4716 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4717 isLegalICmpImmediate(NewC.getSExtValue()) && 4718 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4719 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4720 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4721 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4722 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4723 } 4724 } 4725 } 4726 } 4727 4728 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4729 auto *CFP = cast<ConstantFPSDNode>(N1); 4730 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4731 4732 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4733 // constant if knowing that the operand is non-nan is enough. We prefer to 4734 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4735 // materialize 0.0. 4736 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4737 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4738 4739 // setcc (fneg x), C -> setcc swap(pred) x, -C 4740 if (N0.getOpcode() == ISD::FNEG) { 4741 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4742 if (DCI.isBeforeLegalizeOps() || 4743 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4744 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4745 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4746 } 4747 } 4748 4749 // If the condition is not legal, see if we can find an equivalent one 4750 // which is legal. 4751 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4752 // If the comparison was an awkward floating-point == or != and one of 4753 // the comparison operands is infinity or negative infinity, convert the 4754 // condition to a less-awkward <= or >=. 4755 if (CFP->getValueAPF().isInfinity()) { 4756 bool IsNegInf = CFP->getValueAPF().isNegative(); 4757 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4758 switch (Cond) { 4759 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4760 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4761 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4762 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4763 default: break; 4764 } 4765 if (NewCond != ISD::SETCC_INVALID && 4766 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4767 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4768 } 4769 } 4770 } 4771 4772 if (N0 == N1) { 4773 // The sext(setcc()) => setcc() optimization relies on the appropriate 4774 // constant being emitted. 4775 assert(!N0.getValueType().isInteger() && 4776 "Integer types should be handled by FoldSetCC"); 4777 4778 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4779 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4780 if (UOF == 2) // FP operators that are undefined on NaNs. 4781 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4782 if (UOF == unsigned(EqTrue)) 4783 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4784 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4785 // if it is not already. 4786 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4787 if (NewCond != Cond && 4788 (DCI.isBeforeLegalizeOps() || 4789 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4790 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4791 } 4792 4793 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4794 N0.getValueType().isInteger()) { 4795 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4796 N0.getOpcode() == ISD::XOR) { 4797 // Simplify (X+Y) == (X+Z) --> Y == Z 4798 if (N0.getOpcode() == N1.getOpcode()) { 4799 if (N0.getOperand(0) == N1.getOperand(0)) 4800 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4801 if (N0.getOperand(1) == N1.getOperand(1)) 4802 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4803 if (isCommutativeBinOp(N0.getOpcode())) { 4804 // If X op Y == Y op X, try other combinations. 4805 if (N0.getOperand(0) == N1.getOperand(1)) 4806 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4807 Cond); 4808 if (N0.getOperand(1) == N1.getOperand(0)) 4809 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4810 Cond); 4811 } 4812 } 4813 4814 // If RHS is a legal immediate value for a compare instruction, we need 4815 // to be careful about increasing register pressure needlessly. 4816 bool LegalRHSImm = false; 4817 4818 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4819 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4820 // Turn (X+C1) == C2 --> X == C2-C1 4821 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) 4822 return DAG.getSetCC( 4823 dl, VT, N0.getOperand(0), 4824 DAG.getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(), 4825 dl, N0.getValueType()), 4826 Cond); 4827 4828 // Turn (X^C1) == C2 --> X == C1^C2 4829 if (N0.getOpcode() == ISD::XOR && N0.getNode()->hasOneUse()) 4830 return DAG.getSetCC( 4831 dl, VT, N0.getOperand(0), 4832 DAG.getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(), 4833 dl, N0.getValueType()), 4834 Cond); 4835 } 4836 4837 // Turn (C1-X) == C2 --> X == C1-C2 4838 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 4839 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) 4840 return DAG.getSetCC( 4841 dl, VT, N0.getOperand(1), 4842 DAG.getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(), 4843 dl, N0.getValueType()), 4844 Cond); 4845 4846 // Could RHSC fold directly into a compare? 4847 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4848 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4849 } 4850 4851 // (X+Y) == X --> Y == 0 and similar folds. 4852 // Don't do this if X is an immediate that can fold into a cmp 4853 // instruction and X+Y has other uses. It could be an induction variable 4854 // chain, and the transform would increase register pressure. 4855 if (!LegalRHSImm || N0.hasOneUse()) 4856 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4857 return V; 4858 } 4859 4860 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4861 N1.getOpcode() == ISD::XOR) 4862 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4863 return V; 4864 4865 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4866 return V; 4867 } 4868 4869 // Fold remainder of division by a constant. 4870 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4871 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4872 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4873 4874 // When division is cheap or optimizing for minimum size, 4875 // fall through to DIVREM creation by skipping this fold. 4876 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) { 4877 if (N0.getOpcode() == ISD::UREM) { 4878 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4879 return Folded; 4880 } else if (N0.getOpcode() == ISD::SREM) { 4881 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4882 return Folded; 4883 } 4884 } 4885 } 4886 4887 // Fold away ALL boolean setcc's. 4888 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4889 SDValue Temp; 4890 switch (Cond) { 4891 default: llvm_unreachable("Unknown integer setcc!"); 4892 case ISD::SETEQ: // X == Y -> ~(X^Y) 4893 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4894 N0 = DAG.getNOT(dl, Temp, OpVT); 4895 if (!DCI.isCalledByLegalizer()) 4896 DCI.AddToWorklist(Temp.getNode()); 4897 break; 4898 case ISD::SETNE: // X != Y --> (X^Y) 4899 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4900 break; 4901 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4902 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4903 Temp = DAG.getNOT(dl, N0, OpVT); 4904 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4905 if (!DCI.isCalledByLegalizer()) 4906 DCI.AddToWorklist(Temp.getNode()); 4907 break; 4908 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4909 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4910 Temp = DAG.getNOT(dl, N1, OpVT); 4911 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4912 if (!DCI.isCalledByLegalizer()) 4913 DCI.AddToWorklist(Temp.getNode()); 4914 break; 4915 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4916 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4917 Temp = DAG.getNOT(dl, N0, OpVT); 4918 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4919 if (!DCI.isCalledByLegalizer()) 4920 DCI.AddToWorklist(Temp.getNode()); 4921 break; 4922 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4923 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4924 Temp = DAG.getNOT(dl, N1, OpVT); 4925 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4926 break; 4927 } 4928 if (VT.getScalarType() != MVT::i1) { 4929 if (!DCI.isCalledByLegalizer()) 4930 DCI.AddToWorklist(N0.getNode()); 4931 // FIXME: If running after legalize, we probably can't do this. 4932 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4933 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4934 } 4935 return N0; 4936 } 4937 4938 // Could not fold it. 4939 return SDValue(); 4940 } 4941 4942 /// Returns true (and the GlobalValue and the offset) if the node is a 4943 /// GlobalAddress + offset. 4944 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4945 int64_t &Offset) const { 4946 4947 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4948 4949 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4950 GA = GASD->getGlobal(); 4951 Offset += GASD->getOffset(); 4952 return true; 4953 } 4954 4955 if (N->getOpcode() == ISD::ADD) { 4956 SDValue N1 = N->getOperand(0); 4957 SDValue N2 = N->getOperand(1); 4958 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4959 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4960 Offset += V->getSExtValue(); 4961 return true; 4962 } 4963 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4964 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4965 Offset += V->getSExtValue(); 4966 return true; 4967 } 4968 } 4969 } 4970 4971 return false; 4972 } 4973 4974 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4975 DAGCombinerInfo &DCI) const { 4976 // Default implementation: no optimization. 4977 return SDValue(); 4978 } 4979 4980 //===----------------------------------------------------------------------===// 4981 // Inline Assembler Implementation Methods 4982 //===----------------------------------------------------------------------===// 4983 4984 TargetLowering::ConstraintType 4985 TargetLowering::getConstraintType(StringRef Constraint) const { 4986 unsigned S = Constraint.size(); 4987 4988 if (S == 1) { 4989 switch (Constraint[0]) { 4990 default: break; 4991 case 'r': 4992 return C_RegisterClass; 4993 case 'm': // memory 4994 case 'o': // offsetable 4995 case 'V': // not offsetable 4996 return C_Memory; 4997 case 'p': // Address. 4998 return C_Address; 4999 case 'n': // Simple Integer 5000 case 'E': // Floating Point Constant 5001 case 'F': // Floating Point Constant 5002 return C_Immediate; 5003 case 'i': // Simple Integer or Relocatable Constant 5004 case 's': // Relocatable Constant 5005 case 'X': // Allow ANY value. 5006 case 'I': // Target registers. 5007 case 'J': 5008 case 'K': 5009 case 'L': 5010 case 'M': 5011 case 'N': 5012 case 'O': 5013 case 'P': 5014 case '<': 5015 case '>': 5016 return C_Other; 5017 } 5018 } 5019 5020 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 5021 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 5022 return C_Memory; 5023 return C_Register; 5024 } 5025 return C_Unknown; 5026 } 5027 5028 /// Try to replace an X constraint, which matches anything, with another that 5029 /// has more specific requirements based on the type of the corresponding 5030 /// operand. 5031 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 5032 if (ConstraintVT.isInteger()) 5033 return "r"; 5034 if (ConstraintVT.isFloatingPoint()) 5035 return "f"; // works for many targets 5036 return nullptr; 5037 } 5038 5039 SDValue TargetLowering::LowerAsmOutputForConstraint( 5040 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 5041 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 5042 return SDValue(); 5043 } 5044 5045 /// Lower the specified operand into the Ops vector. 5046 /// If it is invalid, don't add anything to Ops. 5047 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5048 std::string &Constraint, 5049 std::vector<SDValue> &Ops, 5050 SelectionDAG &DAG) const { 5051 5052 if (Constraint.length() > 1) return; 5053 5054 char ConstraintLetter = Constraint[0]; 5055 switch (ConstraintLetter) { 5056 default: break; 5057 case 'X': // Allows any operand 5058 case 'i': // Simple Integer or Relocatable Constant 5059 case 'n': // Simple Integer 5060 case 's': { // Relocatable Constant 5061 5062 ConstantSDNode *C; 5063 uint64_t Offset = 0; 5064 5065 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 5066 // etc., since getelementpointer is variadic. We can't use 5067 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 5068 // while in this case the GA may be furthest from the root node which is 5069 // likely an ISD::ADD. 5070 while (true) { 5071 if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') { 5072 // gcc prints these as sign extended. Sign extend value to 64 bits 5073 // now; without this it would get ZExt'd later in 5074 // ScheduleDAGSDNodes::EmitNode, which is very generic. 5075 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 5076 BooleanContent BCont = getBooleanContents(MVT::i64); 5077 ISD::NodeType ExtOpc = 5078 IsBool ? getExtendForContent(BCont) : ISD::SIGN_EXTEND; 5079 int64_t ExtVal = 5080 ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() : C->getSExtValue(); 5081 Ops.push_back( 5082 DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64)); 5083 return; 5084 } 5085 if (ConstraintLetter != 'n') { 5086 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5087 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 5088 GA->getValueType(0), 5089 Offset + GA->getOffset())); 5090 return; 5091 } 5092 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 5093 Ops.push_back(DAG.getTargetBlockAddress( 5094 BA->getBlockAddress(), BA->getValueType(0), 5095 Offset + BA->getOffset(), BA->getTargetFlags())); 5096 return; 5097 } 5098 if (isa<BasicBlockSDNode>(Op)) { 5099 Ops.push_back(Op); 5100 return; 5101 } 5102 } 5103 const unsigned OpCode = Op.getOpcode(); 5104 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 5105 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 5106 Op = Op.getOperand(1); 5107 // Subtraction is not commutative. 5108 else if (OpCode == ISD::ADD && 5109 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 5110 Op = Op.getOperand(0); 5111 else 5112 return; 5113 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 5114 continue; 5115 } 5116 return; 5117 } 5118 break; 5119 } 5120 } 5121 } 5122 5123 std::pair<unsigned, const TargetRegisterClass *> 5124 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 5125 StringRef Constraint, 5126 MVT VT) const { 5127 if (Constraint.empty() || Constraint[0] != '{') 5128 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 5129 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 5130 5131 // Remove the braces from around the name. 5132 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 5133 5134 std::pair<unsigned, const TargetRegisterClass *> R = 5135 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 5136 5137 // Figure out which register class contains this reg. 5138 for (const TargetRegisterClass *RC : RI->regclasses()) { 5139 // If none of the value types for this register class are valid, we 5140 // can't use it. For example, 64-bit reg classes on 32-bit targets. 5141 if (!isLegalRC(*RI, *RC)) 5142 continue; 5143 5144 for (const MCPhysReg &PR : *RC) { 5145 if (RegName.equals_insensitive(RI->getRegAsmName(PR))) { 5146 std::pair<unsigned, const TargetRegisterClass *> S = 5147 std::make_pair(PR, RC); 5148 5149 // If this register class has the requested value type, return it, 5150 // otherwise keep searching and return the first class found 5151 // if no other is found which explicitly has the requested type. 5152 if (RI->isTypeLegalForClass(*RC, VT)) 5153 return S; 5154 if (!R.second) 5155 R = S; 5156 } 5157 } 5158 } 5159 5160 return R; 5161 } 5162 5163 //===----------------------------------------------------------------------===// 5164 // Constraint Selection. 5165 5166 /// Return true of this is an input operand that is a matching constraint like 5167 /// "4". 5168 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 5169 assert(!ConstraintCode.empty() && "No known constraint!"); 5170 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 5171 } 5172 5173 /// If this is an input matching constraint, this method returns the output 5174 /// operand it matches. 5175 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 5176 assert(!ConstraintCode.empty() && "No known constraint!"); 5177 return atoi(ConstraintCode.c_str()); 5178 } 5179 5180 /// Split up the constraint string from the inline assembly value into the 5181 /// specific constraints and their prefixes, and also tie in the associated 5182 /// operand values. 5183 /// If this returns an empty vector, and if the constraint string itself 5184 /// isn't empty, there was an error parsing. 5185 TargetLowering::AsmOperandInfoVector 5186 TargetLowering::ParseConstraints(const DataLayout &DL, 5187 const TargetRegisterInfo *TRI, 5188 const CallBase &Call) const { 5189 /// Information about all of the constraints. 5190 AsmOperandInfoVector ConstraintOperands; 5191 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 5192 unsigned maCount = 0; // Largest number of multiple alternative constraints. 5193 5194 // Do a prepass over the constraints, canonicalizing them, and building up the 5195 // ConstraintOperands list. 5196 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 5197 unsigned ResNo = 0; // ResNo - The result number of the next output. 5198 5199 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 5200 ConstraintOperands.emplace_back(std::move(CI)); 5201 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 5202 5203 // Update multiple alternative constraint count. 5204 if (OpInfo.multipleAlternatives.size() > maCount) 5205 maCount = OpInfo.multipleAlternatives.size(); 5206 5207 OpInfo.ConstraintVT = MVT::Other; 5208 5209 // Compute the value type for each operand. 5210 switch (OpInfo.Type) { 5211 case InlineAsm::isOutput: 5212 // Indirect outputs just consume an argument. 5213 if (OpInfo.isIndirect) { 5214 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5215 break; 5216 } 5217 5218 // The return value of the call is this value. As such, there is no 5219 // corresponding argument. 5220 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 5221 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 5222 OpInfo.ConstraintVT = 5223 getSimpleValueType(DL, STy->getElementType(ResNo)); 5224 } else { 5225 assert(ResNo == 0 && "Asm only has one result!"); 5226 OpInfo.ConstraintVT = 5227 getAsmOperandValueType(DL, Call.getType()).getSimpleVT(); 5228 } 5229 ++ResNo; 5230 break; 5231 case InlineAsm::isInput: 5232 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5233 break; 5234 case InlineAsm::isClobber: 5235 // Nothing to do. 5236 break; 5237 } 5238 5239 if (OpInfo.CallOperandVal) { 5240 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 5241 if (OpInfo.isIndirect) { 5242 OpTy = Call.getParamElementType(ArgNo); 5243 assert(OpTy && "Indirect operand must have elementtype attribute"); 5244 } 5245 5246 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 5247 if (StructType *STy = dyn_cast<StructType>(OpTy)) 5248 if (STy->getNumElements() == 1) 5249 OpTy = STy->getElementType(0); 5250 5251 // If OpTy is not a single value, it may be a struct/union that we 5252 // can tile with integers. 5253 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 5254 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 5255 switch (BitSize) { 5256 default: break; 5257 case 1: 5258 case 8: 5259 case 16: 5260 case 32: 5261 case 64: 5262 case 128: 5263 OpTy = IntegerType::get(OpTy->getContext(), BitSize); 5264 break; 5265 } 5266 } 5267 5268 EVT VT = getAsmOperandValueType(DL, OpTy, true); 5269 OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other; 5270 ArgNo++; 5271 } 5272 } 5273 5274 // If we have multiple alternative constraints, select the best alternative. 5275 if (!ConstraintOperands.empty()) { 5276 if (maCount) { 5277 unsigned bestMAIndex = 0; 5278 int bestWeight = -1; 5279 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 5280 int weight = -1; 5281 unsigned maIndex; 5282 // Compute the sums of the weights for each alternative, keeping track 5283 // of the best (highest weight) one so far. 5284 for (maIndex = 0; maIndex < maCount; ++maIndex) { 5285 int weightSum = 0; 5286 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5287 cIndex != eIndex; ++cIndex) { 5288 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5289 if (OpInfo.Type == InlineAsm::isClobber) 5290 continue; 5291 5292 // If this is an output operand with a matching input operand, 5293 // look up the matching input. If their types mismatch, e.g. one 5294 // is an integer, the other is floating point, or their sizes are 5295 // different, flag it as an maCantMatch. 5296 if (OpInfo.hasMatchingInput()) { 5297 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5298 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5299 if ((OpInfo.ConstraintVT.isInteger() != 5300 Input.ConstraintVT.isInteger()) || 5301 (OpInfo.ConstraintVT.getSizeInBits() != 5302 Input.ConstraintVT.getSizeInBits())) { 5303 weightSum = -1; // Can't match. 5304 break; 5305 } 5306 } 5307 } 5308 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 5309 if (weight == -1) { 5310 weightSum = -1; 5311 break; 5312 } 5313 weightSum += weight; 5314 } 5315 // Update best. 5316 if (weightSum > bestWeight) { 5317 bestWeight = weightSum; 5318 bestMAIndex = maIndex; 5319 } 5320 } 5321 5322 // Now select chosen alternative in each constraint. 5323 for (AsmOperandInfo &cInfo : ConstraintOperands) 5324 if (cInfo.Type != InlineAsm::isClobber) 5325 cInfo.selectAlternative(bestMAIndex); 5326 } 5327 } 5328 5329 // Check and hook up tied operands, choose constraint code to use. 5330 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5331 cIndex != eIndex; ++cIndex) { 5332 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5333 5334 // If this is an output operand with a matching input operand, look up the 5335 // matching input. If their types mismatch, e.g. one is an integer, the 5336 // other is floating point, or their sizes are different, flag it as an 5337 // error. 5338 if (OpInfo.hasMatchingInput()) { 5339 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5340 5341 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5342 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 5343 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 5344 OpInfo.ConstraintVT); 5345 std::pair<unsigned, const TargetRegisterClass *> InputRC = 5346 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 5347 Input.ConstraintVT); 5348 if ((OpInfo.ConstraintVT.isInteger() != 5349 Input.ConstraintVT.isInteger()) || 5350 (MatchRC.second != InputRC.second)) { 5351 report_fatal_error("Unsupported asm: input constraint" 5352 " with a matching output constraint of" 5353 " incompatible type!"); 5354 } 5355 } 5356 } 5357 } 5358 5359 return ConstraintOperands; 5360 } 5361 5362 /// Return an integer indicating how general CT is. 5363 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 5364 switch (CT) { 5365 case TargetLowering::C_Immediate: 5366 case TargetLowering::C_Other: 5367 case TargetLowering::C_Unknown: 5368 return 0; 5369 case TargetLowering::C_Register: 5370 return 1; 5371 case TargetLowering::C_RegisterClass: 5372 return 2; 5373 case TargetLowering::C_Memory: 5374 case TargetLowering::C_Address: 5375 return 3; 5376 } 5377 llvm_unreachable("Invalid constraint type"); 5378 } 5379 5380 /// Examine constraint type and operand type and determine a weight value. 5381 /// This object must already have been set up with the operand type 5382 /// and the current alternative constraint selected. 5383 TargetLowering::ConstraintWeight 5384 TargetLowering::getMultipleConstraintMatchWeight( 5385 AsmOperandInfo &info, int maIndex) const { 5386 InlineAsm::ConstraintCodeVector *rCodes; 5387 if (maIndex >= (int)info.multipleAlternatives.size()) 5388 rCodes = &info.Codes; 5389 else 5390 rCodes = &info.multipleAlternatives[maIndex].Codes; 5391 ConstraintWeight BestWeight = CW_Invalid; 5392 5393 // Loop over the options, keeping track of the most general one. 5394 for (const std::string &rCode : *rCodes) { 5395 ConstraintWeight weight = 5396 getSingleConstraintMatchWeight(info, rCode.c_str()); 5397 if (weight > BestWeight) 5398 BestWeight = weight; 5399 } 5400 5401 return BestWeight; 5402 } 5403 5404 /// Examine constraint type and operand type and determine a weight value. 5405 /// This object must already have been set up with the operand type 5406 /// and the current alternative constraint selected. 5407 TargetLowering::ConstraintWeight 5408 TargetLowering::getSingleConstraintMatchWeight( 5409 AsmOperandInfo &info, const char *constraint) const { 5410 ConstraintWeight weight = CW_Invalid; 5411 Value *CallOperandVal = info.CallOperandVal; 5412 // If we don't have a value, we can't do a match, 5413 // but allow it at the lowest weight. 5414 if (!CallOperandVal) 5415 return CW_Default; 5416 // Look at the constraint type. 5417 switch (*constraint) { 5418 case 'i': // immediate integer. 5419 case 'n': // immediate integer with a known value. 5420 if (isa<ConstantInt>(CallOperandVal)) 5421 weight = CW_Constant; 5422 break; 5423 case 's': // non-explicit intregal immediate. 5424 if (isa<GlobalValue>(CallOperandVal)) 5425 weight = CW_Constant; 5426 break; 5427 case 'E': // immediate float if host format. 5428 case 'F': // immediate float. 5429 if (isa<ConstantFP>(CallOperandVal)) 5430 weight = CW_Constant; 5431 break; 5432 case '<': // memory operand with autodecrement. 5433 case '>': // memory operand with autoincrement. 5434 case 'm': // memory operand. 5435 case 'o': // offsettable memory operand 5436 case 'V': // non-offsettable memory operand 5437 weight = CW_Memory; 5438 break; 5439 case 'r': // general register. 5440 case 'g': // general register, memory operand or immediate integer. 5441 // note: Clang converts "g" to "imr". 5442 if (CallOperandVal->getType()->isIntegerTy()) 5443 weight = CW_Register; 5444 break; 5445 case 'X': // any operand. 5446 default: 5447 weight = CW_Default; 5448 break; 5449 } 5450 return weight; 5451 } 5452 5453 /// If there are multiple different constraints that we could pick for this 5454 /// operand (e.g. "imr") try to pick the 'best' one. 5455 /// This is somewhat tricky: constraints fall into four classes: 5456 /// Other -> immediates and magic values 5457 /// Register -> one specific register 5458 /// RegisterClass -> a group of regs 5459 /// Memory -> memory 5460 /// Ideally, we would pick the most specific constraint possible: if we have 5461 /// something that fits into a register, we would pick it. The problem here 5462 /// is that if we have something that could either be in a register or in 5463 /// memory that use of the register could cause selection of *other* 5464 /// operands to fail: they might only succeed if we pick memory. Because of 5465 /// this the heuristic we use is: 5466 /// 5467 /// 1) If there is an 'other' constraint, and if the operand is valid for 5468 /// that constraint, use it. This makes us take advantage of 'i' 5469 /// constraints when available. 5470 /// 2) Otherwise, pick the most general constraint present. This prefers 5471 /// 'm' over 'r', for example. 5472 /// 5473 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 5474 const TargetLowering &TLI, 5475 SDValue Op, SelectionDAG *DAG) { 5476 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 5477 unsigned BestIdx = 0; 5478 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 5479 int BestGenerality = -1; 5480 5481 // Loop over the options, keeping track of the most general one. 5482 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 5483 TargetLowering::ConstraintType CType = 5484 TLI.getConstraintType(OpInfo.Codes[i]); 5485 5486 // Indirect 'other' or 'immediate' constraints are not allowed. 5487 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 5488 CType == TargetLowering::C_Register || 5489 CType == TargetLowering::C_RegisterClass)) 5490 continue; 5491 5492 // If this is an 'other' or 'immediate' constraint, see if the operand is 5493 // valid for it. For example, on X86 we might have an 'rI' constraint. If 5494 // the operand is an integer in the range [0..31] we want to use I (saving a 5495 // load of a register), otherwise we must use 'r'. 5496 if ((CType == TargetLowering::C_Other || 5497 CType == TargetLowering::C_Immediate) && Op.getNode()) { 5498 assert(OpInfo.Codes[i].size() == 1 && 5499 "Unhandled multi-letter 'other' constraint"); 5500 std::vector<SDValue> ResultOps; 5501 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 5502 ResultOps, *DAG); 5503 if (!ResultOps.empty()) { 5504 BestType = CType; 5505 BestIdx = i; 5506 break; 5507 } 5508 } 5509 5510 // Things with matching constraints can only be registers, per gcc 5511 // documentation. This mainly affects "g" constraints. 5512 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 5513 continue; 5514 5515 // This constraint letter is more general than the previous one, use it. 5516 int Generality = getConstraintGenerality(CType); 5517 if (Generality > BestGenerality) { 5518 BestType = CType; 5519 BestIdx = i; 5520 BestGenerality = Generality; 5521 } 5522 } 5523 5524 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 5525 OpInfo.ConstraintType = BestType; 5526 } 5527 5528 /// Determines the constraint code and constraint type to use for the specific 5529 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5530 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5531 SDValue Op, 5532 SelectionDAG *DAG) const { 5533 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 5534 5535 // Single-letter constraints ('r') are very common. 5536 if (OpInfo.Codes.size() == 1) { 5537 OpInfo.ConstraintCode = OpInfo.Codes[0]; 5538 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5539 } else { 5540 ChooseConstraint(OpInfo, *this, Op, DAG); 5541 } 5542 5543 // 'X' matches anything. 5544 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 5545 // Constants are handled elsewhere. For Functions, the type here is the 5546 // type of the result, which is not what we want to look at; leave them 5547 // alone. 5548 Value *v = OpInfo.CallOperandVal; 5549 if (isa<ConstantInt>(v) || isa<Function>(v)) { 5550 return; 5551 } 5552 5553 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) { 5554 OpInfo.ConstraintCode = "i"; 5555 return; 5556 } 5557 5558 // Otherwise, try to resolve it to something we know about by looking at 5559 // the actual operand type. 5560 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5561 OpInfo.ConstraintCode = Repl; 5562 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5563 } 5564 } 5565 } 5566 5567 /// Given an exact SDIV by a constant, create a multiplication 5568 /// with the multiplicative inverse of the constant. 5569 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5570 const SDLoc &dl, SelectionDAG &DAG, 5571 SmallVectorImpl<SDNode *> &Created) { 5572 SDValue Op0 = N->getOperand(0); 5573 SDValue Op1 = N->getOperand(1); 5574 EVT VT = N->getValueType(0); 5575 EVT SVT = VT.getScalarType(); 5576 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5577 EVT ShSVT = ShVT.getScalarType(); 5578 5579 bool UseSRA = false; 5580 SmallVector<SDValue, 16> Shifts, Factors; 5581 5582 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5583 if (C->isZero()) 5584 return false; 5585 APInt Divisor = C->getAPIntValue(); 5586 unsigned Shift = Divisor.countTrailingZeros(); 5587 if (Shift) { 5588 Divisor.ashrInPlace(Shift); 5589 UseSRA = true; 5590 } 5591 // Calculate the multiplicative inverse, using Newton's method. 5592 APInt t; 5593 APInt Factor = Divisor; 5594 while ((t = Divisor * Factor) != 1) 5595 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5596 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5597 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5598 return true; 5599 }; 5600 5601 // Collect all magic values from the build vector. 5602 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5603 return SDValue(); 5604 5605 SDValue Shift, Factor; 5606 if (Op1.getOpcode() == ISD::BUILD_VECTOR) { 5607 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5608 Factor = DAG.getBuildVector(VT, dl, Factors); 5609 } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) { 5610 assert(Shifts.size() == 1 && Factors.size() == 1 && 5611 "Expected matchUnaryPredicate to return one element for scalable " 5612 "vectors"); 5613 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5614 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5615 } else { 5616 assert(isa<ConstantSDNode>(Op1) && "Expected a constant"); 5617 Shift = Shifts[0]; 5618 Factor = Factors[0]; 5619 } 5620 5621 SDValue Res = Op0; 5622 5623 // Shift the value upfront if it is even, so the LSB is one. 5624 if (UseSRA) { 5625 // TODO: For UDIV use SRL instead of SRA. 5626 SDNodeFlags Flags; 5627 Flags.setExact(true); 5628 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5629 Created.push_back(Res.getNode()); 5630 } 5631 5632 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5633 } 5634 5635 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5636 SelectionDAG &DAG, 5637 SmallVectorImpl<SDNode *> &Created) const { 5638 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5639 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5640 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5641 return SDValue(N, 0); // Lower SDIV as SDIV 5642 return SDValue(); 5643 } 5644 5645 SDValue 5646 TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor, 5647 SelectionDAG &DAG, 5648 SmallVectorImpl<SDNode *> &Created) const { 5649 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5650 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5651 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5652 return SDValue(N, 0); // Lower SREM as SREM 5653 return SDValue(); 5654 } 5655 5656 /// Given an ISD::SDIV node expressing a divide by constant, 5657 /// return a DAG expression to select that will generate the same value by 5658 /// multiplying by a magic number. 5659 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5660 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5661 bool IsAfterLegalization, 5662 SmallVectorImpl<SDNode *> &Created) const { 5663 SDLoc dl(N); 5664 EVT VT = N->getValueType(0); 5665 EVT SVT = VT.getScalarType(); 5666 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5667 EVT ShSVT = ShVT.getScalarType(); 5668 unsigned EltBits = VT.getScalarSizeInBits(); 5669 EVT MulVT; 5670 5671 // Check to see if we can do this. 5672 // FIXME: We should be more aggressive here. 5673 if (!isTypeLegal(VT)) { 5674 // Limit this to simple scalars for now. 5675 if (VT.isVector() || !VT.isSimple()) 5676 return SDValue(); 5677 5678 // If this type will be promoted to a large enough type with a legal 5679 // multiply operation, we can go ahead and do this transform. 5680 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5681 return SDValue(); 5682 5683 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5684 if (MulVT.getSizeInBits() < (2 * EltBits) || 5685 !isOperationLegal(ISD::MUL, MulVT)) 5686 return SDValue(); 5687 } 5688 5689 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5690 if (N->getFlags().hasExact()) 5691 return BuildExactSDIV(*this, N, dl, DAG, Created); 5692 5693 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5694 5695 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5696 if (C->isZero()) 5697 return false; 5698 5699 const APInt &Divisor = C->getAPIntValue(); 5700 SignedDivisionByConstantInfo magics = SignedDivisionByConstantInfo::get(Divisor); 5701 int NumeratorFactor = 0; 5702 int ShiftMask = -1; 5703 5704 if (Divisor.isOne() || Divisor.isAllOnes()) { 5705 // If d is +1/-1, we just multiply the numerator by +1/-1. 5706 NumeratorFactor = Divisor.getSExtValue(); 5707 magics.Magic = 0; 5708 magics.ShiftAmount = 0; 5709 ShiftMask = 0; 5710 } else if (Divisor.isStrictlyPositive() && magics.Magic.isNegative()) { 5711 // If d > 0 and m < 0, add the numerator. 5712 NumeratorFactor = 1; 5713 } else if (Divisor.isNegative() && magics.Magic.isStrictlyPositive()) { 5714 // If d < 0 and m > 0, subtract the numerator. 5715 NumeratorFactor = -1; 5716 } 5717 5718 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5719 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5720 Shifts.push_back(DAG.getConstant(magics.ShiftAmount, dl, ShSVT)); 5721 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5722 return true; 5723 }; 5724 5725 SDValue N0 = N->getOperand(0); 5726 SDValue N1 = N->getOperand(1); 5727 5728 // Collect the shifts / magic values from each element. 5729 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5730 return SDValue(); 5731 5732 SDValue MagicFactor, Factor, Shift, ShiftMask; 5733 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5734 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5735 Factor = DAG.getBuildVector(VT, dl, Factors); 5736 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5737 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5738 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5739 assert(MagicFactors.size() == 1 && Factors.size() == 1 && 5740 Shifts.size() == 1 && ShiftMasks.size() == 1 && 5741 "Expected matchUnaryPredicate to return one element for scalable " 5742 "vectors"); 5743 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5744 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5745 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5746 ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]); 5747 } else { 5748 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5749 MagicFactor = MagicFactors[0]; 5750 Factor = Factors[0]; 5751 Shift = Shifts[0]; 5752 ShiftMask = ShiftMasks[0]; 5753 } 5754 5755 // Multiply the numerator (operand 0) by the magic value. 5756 // FIXME: We should support doing a MUL in a wider type. 5757 auto GetMULHS = [&](SDValue X, SDValue Y) { 5758 // If the type isn't legal, use a wider mul of the the type calculated 5759 // earlier. 5760 if (!isTypeLegal(VT)) { 5761 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); 5762 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, Y); 5763 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5764 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5765 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5766 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5767 } 5768 5769 if (isOperationLegalOrCustom(ISD::MULHS, VT, IsAfterLegalization)) 5770 return DAG.getNode(ISD::MULHS, dl, VT, X, Y); 5771 if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT, IsAfterLegalization)) { 5772 SDValue LoHi = 5773 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5774 return SDValue(LoHi.getNode(), 1); 5775 } 5776 return SDValue(); 5777 }; 5778 5779 SDValue Q = GetMULHS(N0, MagicFactor); 5780 if (!Q) 5781 return SDValue(); 5782 5783 Created.push_back(Q.getNode()); 5784 5785 // (Optionally) Add/subtract the numerator using Factor. 5786 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5787 Created.push_back(Factor.getNode()); 5788 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5789 Created.push_back(Q.getNode()); 5790 5791 // Shift right algebraic by shift value. 5792 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5793 Created.push_back(Q.getNode()); 5794 5795 // Extract the sign bit, mask it and add it to the quotient. 5796 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5797 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5798 Created.push_back(T.getNode()); 5799 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5800 Created.push_back(T.getNode()); 5801 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5802 } 5803 5804 /// Given an ISD::UDIV node expressing a divide by constant, 5805 /// return a DAG expression to select that will generate the same value by 5806 /// multiplying by a magic number. 5807 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5808 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5809 bool IsAfterLegalization, 5810 SmallVectorImpl<SDNode *> &Created) const { 5811 SDLoc dl(N); 5812 EVT VT = N->getValueType(0); 5813 EVT SVT = VT.getScalarType(); 5814 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5815 EVT ShSVT = ShVT.getScalarType(); 5816 unsigned EltBits = VT.getScalarSizeInBits(); 5817 EVT MulVT; 5818 5819 // Check to see if we can do this. 5820 // FIXME: We should be more aggressive here. 5821 if (!isTypeLegal(VT)) { 5822 // Limit this to simple scalars for now. 5823 if (VT.isVector() || !VT.isSimple()) 5824 return SDValue(); 5825 5826 // If this type will be promoted to a large enough type with a legal 5827 // multiply operation, we can go ahead and do this transform. 5828 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5829 return SDValue(); 5830 5831 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5832 if (MulVT.getSizeInBits() < (2 * EltBits) || 5833 !isOperationLegal(ISD::MUL, MulVT)) 5834 return SDValue(); 5835 } 5836 5837 bool UseNPQ = false; 5838 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5839 5840 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5841 if (C->isZero()) 5842 return false; 5843 // FIXME: We should use a narrower constant when the upper 5844 // bits are known to be zero. 5845 const APInt& Divisor = C->getAPIntValue(); 5846 UnsignedDivisonByConstantInfo magics = UnsignedDivisonByConstantInfo::get(Divisor); 5847 unsigned PreShift = 0, PostShift = 0; 5848 5849 // If the divisor is even, we can avoid using the expensive fixup by 5850 // shifting the divided value upfront. 5851 if (magics.IsAdd != 0 && !Divisor[0]) { 5852 PreShift = Divisor.countTrailingZeros(); 5853 // Get magic number for the shifted divisor. 5854 magics = UnsignedDivisonByConstantInfo::get(Divisor.lshr(PreShift), PreShift); 5855 assert(magics.IsAdd == 0 && "Should use cheap fixup now"); 5856 } 5857 5858 APInt Magic = magics.Magic; 5859 5860 unsigned SelNPQ; 5861 if (magics.IsAdd == 0 || Divisor.isOne()) { 5862 assert(magics.ShiftAmount < Divisor.getBitWidth() && 5863 "We shouldn't generate an undefined shift!"); 5864 PostShift = magics.ShiftAmount; 5865 SelNPQ = false; 5866 } else { 5867 PostShift = magics.ShiftAmount - 1; 5868 SelNPQ = true; 5869 } 5870 5871 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5872 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5873 NPQFactors.push_back( 5874 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5875 : APInt::getZero(EltBits), 5876 dl, SVT)); 5877 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5878 UseNPQ |= SelNPQ; 5879 return true; 5880 }; 5881 5882 SDValue N0 = N->getOperand(0); 5883 SDValue N1 = N->getOperand(1); 5884 5885 // Collect the shifts/magic values from each element. 5886 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5887 return SDValue(); 5888 5889 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5890 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5891 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5892 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5893 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5894 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5895 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5896 assert(PreShifts.size() == 1 && MagicFactors.size() == 1 && 5897 NPQFactors.size() == 1 && PostShifts.size() == 1 && 5898 "Expected matchUnaryPredicate to return one for scalable vectors"); 5899 PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]); 5900 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5901 NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]); 5902 PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]); 5903 } else { 5904 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5905 PreShift = PreShifts[0]; 5906 MagicFactor = MagicFactors[0]; 5907 PostShift = PostShifts[0]; 5908 } 5909 5910 SDValue Q = N0; 5911 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5912 Created.push_back(Q.getNode()); 5913 5914 // FIXME: We should support doing a MUL in a wider type. 5915 auto GetMULHU = [&](SDValue X, SDValue Y) { 5916 // If the type isn't legal, use a wider mul of the the type calculated 5917 // earlier. 5918 if (!isTypeLegal(VT)) { 5919 X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); 5920 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, Y); 5921 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5922 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5923 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5924 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5925 } 5926 5927 if (isOperationLegalOrCustom(ISD::MULHU, VT, IsAfterLegalization)) 5928 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5929 if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT, IsAfterLegalization)) { 5930 SDValue LoHi = 5931 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5932 return SDValue(LoHi.getNode(), 1); 5933 } 5934 return SDValue(); // No mulhu or equivalent 5935 }; 5936 5937 // Multiply the numerator (operand 0) by the magic value. 5938 Q = GetMULHU(Q, MagicFactor); 5939 if (!Q) 5940 return SDValue(); 5941 5942 Created.push_back(Q.getNode()); 5943 5944 if (UseNPQ) { 5945 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5946 Created.push_back(NPQ.getNode()); 5947 5948 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5949 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5950 if (VT.isVector()) 5951 NPQ = GetMULHU(NPQ, NPQFactor); 5952 else 5953 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5954 5955 Created.push_back(NPQ.getNode()); 5956 5957 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5958 Created.push_back(Q.getNode()); 5959 } 5960 5961 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5962 Created.push_back(Q.getNode()); 5963 5964 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 5965 5966 SDValue One = DAG.getConstant(1, dl, VT); 5967 SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ); 5968 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5969 } 5970 5971 /// If all values in Values that *don't* match the predicate are same 'splat' 5972 /// value, then replace all values with that splat value. 5973 /// Else, if AlternativeReplacement was provided, then replace all values that 5974 /// do match predicate with AlternativeReplacement value. 5975 static void 5976 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5977 std::function<bool(SDValue)> Predicate, 5978 SDValue AlternativeReplacement = SDValue()) { 5979 SDValue Replacement; 5980 // Is there a value for which the Predicate does *NOT* match? What is it? 5981 auto SplatValue = llvm::find_if_not(Values, Predicate); 5982 if (SplatValue != Values.end()) { 5983 // Does Values consist only of SplatValue's and values matching Predicate? 5984 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5985 return Value == *SplatValue || Predicate(Value); 5986 })) // Then we shall replace values matching predicate with SplatValue. 5987 Replacement = *SplatValue; 5988 } 5989 if (!Replacement) { 5990 // Oops, we did not find the "baseline" splat value. 5991 if (!AlternativeReplacement) 5992 return; // Nothing to do. 5993 // Let's replace with provided value then. 5994 Replacement = AlternativeReplacement; 5995 } 5996 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5997 } 5998 5999 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 6000 /// where the divisor is constant and the comparison target is zero, 6001 /// return a DAG expression that will generate the same comparison result 6002 /// using only multiplications, additions and shifts/rotations. 6003 /// Ref: "Hacker's Delight" 10-17. 6004 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 6005 SDValue CompTargetNode, 6006 ISD::CondCode Cond, 6007 DAGCombinerInfo &DCI, 6008 const SDLoc &DL) const { 6009 SmallVector<SDNode *, 5> Built; 6010 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6011 DCI, DL, Built)) { 6012 for (SDNode *N : Built) 6013 DCI.AddToWorklist(N); 6014 return Folded; 6015 } 6016 6017 return SDValue(); 6018 } 6019 6020 SDValue 6021 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 6022 SDValue CompTargetNode, ISD::CondCode Cond, 6023 DAGCombinerInfo &DCI, const SDLoc &DL, 6024 SmallVectorImpl<SDNode *> &Created) const { 6025 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 6026 // - D must be constant, with D = D0 * 2^K where D0 is odd 6027 // - P is the multiplicative inverse of D0 modulo 2^W 6028 // - Q = floor(((2^W) - 1) / D) 6029 // where W is the width of the common type of N and D. 6030 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6031 "Only applicable for (in)equality comparisons."); 6032 6033 SelectionDAG &DAG = DCI.DAG; 6034 6035 EVT VT = REMNode.getValueType(); 6036 EVT SVT = VT.getScalarType(); 6037 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6038 EVT ShSVT = ShVT.getScalarType(); 6039 6040 // If MUL is unavailable, we cannot proceed in any case. 6041 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6042 return SDValue(); 6043 6044 bool ComparingWithAllZeros = true; 6045 bool AllComparisonsWithNonZerosAreTautological = true; 6046 bool HadTautologicalLanes = false; 6047 bool AllLanesAreTautological = true; 6048 bool HadEvenDivisor = false; 6049 bool AllDivisorsArePowerOfTwo = true; 6050 bool HadTautologicalInvertedLanes = false; 6051 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 6052 6053 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 6054 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6055 if (CDiv->isZero()) 6056 return false; 6057 6058 const APInt &D = CDiv->getAPIntValue(); 6059 const APInt &Cmp = CCmp->getAPIntValue(); 6060 6061 ComparingWithAllZeros &= Cmp.isZero(); 6062 6063 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6064 // if C2 is not less than C1, the comparison is always false. 6065 // But we will only be able to produce the comparison that will give the 6066 // opposive tautological answer. So this lane would need to be fixed up. 6067 bool TautologicalInvertedLane = D.ule(Cmp); 6068 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 6069 6070 // If all lanes are tautological (either all divisors are ones, or divisor 6071 // is not greater than the constant we are comparing with), 6072 // we will prefer to avoid the fold. 6073 bool TautologicalLane = D.isOne() || TautologicalInvertedLane; 6074 HadTautologicalLanes |= TautologicalLane; 6075 AllLanesAreTautological &= TautologicalLane; 6076 6077 // If we are comparing with non-zero, we need'll need to subtract said 6078 // comparison value from the LHS. But there is no point in doing that if 6079 // every lane where we are comparing with non-zero is tautological.. 6080 if (!Cmp.isZero()) 6081 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 6082 6083 // Decompose D into D0 * 2^K 6084 unsigned K = D.countTrailingZeros(); 6085 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6086 APInt D0 = D.lshr(K); 6087 6088 // D is even if it has trailing zeros. 6089 HadEvenDivisor |= (K != 0); 6090 // D is a power-of-two if D0 is one. 6091 // If all divisors are power-of-two, we will prefer to avoid the fold. 6092 AllDivisorsArePowerOfTwo &= D0.isOne(); 6093 6094 // P = inv(D0, 2^W) 6095 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6096 unsigned W = D.getBitWidth(); 6097 APInt P = D0.zext(W + 1) 6098 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6099 .trunc(W); 6100 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6101 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6102 6103 // Q = floor((2^W - 1) u/ D) 6104 // R = ((2^W - 1) u% D) 6105 APInt Q, R; 6106 APInt::udivrem(APInt::getAllOnes(W), D, Q, R); 6107 6108 // If we are comparing with zero, then that comparison constant is okay, 6109 // else it may need to be one less than that. 6110 if (Cmp.ugt(R)) 6111 Q -= 1; 6112 6113 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6114 "We are expecting that K is always less than all-ones for ShSVT"); 6115 6116 // If the lane is tautological the result can be constant-folded. 6117 if (TautologicalLane) { 6118 // Set P and K amount to a bogus values so we can try to splat them. 6119 P = 0; 6120 K = -1; 6121 // And ensure that comparison constant is tautological, 6122 // it will always compare true/false. 6123 Q = -1; 6124 } 6125 6126 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6127 KAmts.push_back( 6128 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6129 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6130 return true; 6131 }; 6132 6133 SDValue N = REMNode.getOperand(0); 6134 SDValue D = REMNode.getOperand(1); 6135 6136 // Collect the values from each element. 6137 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 6138 return SDValue(); 6139 6140 // If all lanes are tautological, the result can be constant-folded. 6141 if (AllLanesAreTautological) 6142 return SDValue(); 6143 6144 // If this is a urem by a powers-of-two, avoid the fold since it can be 6145 // best implemented as a bit test. 6146 if (AllDivisorsArePowerOfTwo) 6147 return SDValue(); 6148 6149 SDValue PVal, KVal, QVal; 6150 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6151 if (HadTautologicalLanes) { 6152 // Try to turn PAmts into a splat, since we don't care about the values 6153 // that are currently '0'. If we can't, just keep '0'`s. 6154 turnVectorIntoSplatVector(PAmts, isNullConstant); 6155 // Try to turn KAmts into a splat, since we don't care about the values 6156 // that are currently '-1'. If we can't, change them to '0'`s. 6157 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6158 DAG.getConstant(0, DL, ShSVT)); 6159 } 6160 6161 PVal = DAG.getBuildVector(VT, DL, PAmts); 6162 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6163 QVal = DAG.getBuildVector(VT, DL, QAmts); 6164 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6165 assert(PAmts.size() == 1 && KAmts.size() == 1 && QAmts.size() == 1 && 6166 "Expected matchBinaryPredicate to return one element for " 6167 "SPLAT_VECTORs"); 6168 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6169 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6170 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6171 } else { 6172 PVal = PAmts[0]; 6173 KVal = KAmts[0]; 6174 QVal = QAmts[0]; 6175 } 6176 6177 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 6178 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::SUB, VT)) 6179 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 6180 assert(CompTargetNode.getValueType() == N.getValueType() && 6181 "Expecting that the types on LHS and RHS of comparisons match."); 6182 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 6183 } 6184 6185 // (mul N, P) 6186 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6187 Created.push_back(Op0.getNode()); 6188 6189 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6190 // divisors as a performance improvement, since rotating by 0 is a no-op. 6191 if (HadEvenDivisor) { 6192 // We need ROTR to do this. 6193 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6194 return SDValue(); 6195 // UREM: (rotr (mul N, P), K) 6196 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6197 Created.push_back(Op0.getNode()); 6198 } 6199 6200 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 6201 SDValue NewCC = 6202 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6203 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6204 if (!HadTautologicalInvertedLanes) 6205 return NewCC; 6206 6207 // If any lanes previously compared always-false, the NewCC will give 6208 // always-true result for them, so we need to fixup those lanes. 6209 // Or the other way around for inequality predicate. 6210 assert(VT.isVector() && "Can/should only get here for vectors."); 6211 Created.push_back(NewCC.getNode()); 6212 6213 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6214 // if C2 is not less than C1, the comparison is always false. 6215 // But we have produced the comparison that will give the 6216 // opposive tautological answer. So these lanes would need to be fixed up. 6217 SDValue TautologicalInvertedChannels = 6218 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 6219 Created.push_back(TautologicalInvertedChannels.getNode()); 6220 6221 // NOTE: we avoid letting illegal types through even if we're before legalize 6222 // ops – legalization has a hard time producing good code for this. 6223 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 6224 // If we have a vector select, let's replace the comparison results in the 6225 // affected lanes with the correct tautological result. 6226 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 6227 DL, SETCCVT, SETCCVT); 6228 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 6229 Replacement, NewCC); 6230 } 6231 6232 // Else, we can just invert the comparison result in the appropriate lanes. 6233 // 6234 // NOTE: see the note above VSELECT above. 6235 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 6236 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 6237 TautologicalInvertedChannels); 6238 6239 return SDValue(); // Don't know how to lower. 6240 } 6241 6242 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 6243 /// where the divisor is constant and the comparison target is zero, 6244 /// return a DAG expression that will generate the same comparison result 6245 /// using only multiplications, additions and shifts/rotations. 6246 /// Ref: "Hacker's Delight" 10-17. 6247 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 6248 SDValue CompTargetNode, 6249 ISD::CondCode Cond, 6250 DAGCombinerInfo &DCI, 6251 const SDLoc &DL) const { 6252 SmallVector<SDNode *, 7> Built; 6253 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6254 DCI, DL, Built)) { 6255 assert(Built.size() <= 7 && "Max size prediction failed."); 6256 for (SDNode *N : Built) 6257 DCI.AddToWorklist(N); 6258 return Folded; 6259 } 6260 6261 return SDValue(); 6262 } 6263 6264 SDValue 6265 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 6266 SDValue CompTargetNode, ISD::CondCode Cond, 6267 DAGCombinerInfo &DCI, const SDLoc &DL, 6268 SmallVectorImpl<SDNode *> &Created) const { 6269 // Fold: 6270 // (seteq/ne (srem N, D), 0) 6271 // To: 6272 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 6273 // 6274 // - D must be constant, with D = D0 * 2^K where D0 is odd 6275 // - P is the multiplicative inverse of D0 modulo 2^W 6276 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 6277 // - Q = floor((2 * A) / (2^K)) 6278 // where W is the width of the common type of N and D. 6279 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6280 "Only applicable for (in)equality comparisons."); 6281 6282 SelectionDAG &DAG = DCI.DAG; 6283 6284 EVT VT = REMNode.getValueType(); 6285 EVT SVT = VT.getScalarType(); 6286 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6287 EVT ShSVT = ShVT.getScalarType(); 6288 6289 // If we are after ops legalization, and MUL is unavailable, we can not 6290 // proceed. 6291 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6292 return SDValue(); 6293 6294 // TODO: Could support comparing with non-zero too. 6295 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 6296 if (!CompTarget || !CompTarget->isZero()) 6297 return SDValue(); 6298 6299 bool HadIntMinDivisor = false; 6300 bool HadOneDivisor = false; 6301 bool AllDivisorsAreOnes = true; 6302 bool HadEvenDivisor = false; 6303 bool NeedToApplyOffset = false; 6304 bool AllDivisorsArePowerOfTwo = true; 6305 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 6306 6307 auto BuildSREMPattern = [&](ConstantSDNode *C) { 6308 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6309 if (C->isZero()) 6310 return false; 6311 6312 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 6313 6314 // WARNING: this fold is only valid for positive divisors! 6315 APInt D = C->getAPIntValue(); 6316 if (D.isNegative()) 6317 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 6318 6319 HadIntMinDivisor |= D.isMinSignedValue(); 6320 6321 // If all divisors are ones, we will prefer to avoid the fold. 6322 HadOneDivisor |= D.isOne(); 6323 AllDivisorsAreOnes &= D.isOne(); 6324 6325 // Decompose D into D0 * 2^K 6326 unsigned K = D.countTrailingZeros(); 6327 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6328 APInt D0 = D.lshr(K); 6329 6330 if (!D.isMinSignedValue()) { 6331 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 6332 // we don't care about this lane in this fold, we'll special-handle it. 6333 HadEvenDivisor |= (K != 0); 6334 } 6335 6336 // D is a power-of-two if D0 is one. This includes INT_MIN. 6337 // If all divisors are power-of-two, we will prefer to avoid the fold. 6338 AllDivisorsArePowerOfTwo &= D0.isOne(); 6339 6340 // P = inv(D0, 2^W) 6341 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6342 unsigned W = D.getBitWidth(); 6343 APInt P = D0.zext(W + 1) 6344 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6345 .trunc(W); 6346 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6347 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6348 6349 // A = floor((2^(W - 1) - 1) / D0) & -2^K 6350 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 6351 A.clearLowBits(K); 6352 6353 if (!D.isMinSignedValue()) { 6354 // If divisor INT_MIN, then we don't care about this lane in this fold, 6355 // we'll special-handle it. 6356 NeedToApplyOffset |= A != 0; 6357 } 6358 6359 // Q = floor((2 * A) / (2^K)) 6360 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 6361 6362 assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) && 6363 "We are expecting that A is always less than all-ones for SVT"); 6364 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6365 "We are expecting that K is always less than all-ones for ShSVT"); 6366 6367 // If the divisor is 1 the result can be constant-folded. Likewise, we 6368 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 6369 if (D.isOne()) { 6370 // Set P, A and K to a bogus values so we can try to splat them. 6371 P = 0; 6372 A = -1; 6373 K = -1; 6374 6375 // x ?% 1 == 0 <--> true <--> x u<= -1 6376 Q = -1; 6377 } 6378 6379 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6380 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 6381 KAmts.push_back( 6382 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6383 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6384 return true; 6385 }; 6386 6387 SDValue N = REMNode.getOperand(0); 6388 SDValue D = REMNode.getOperand(1); 6389 6390 // Collect the values from each element. 6391 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 6392 return SDValue(); 6393 6394 // If this is a srem by a one, avoid the fold since it can be constant-folded. 6395 if (AllDivisorsAreOnes) 6396 return SDValue(); 6397 6398 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 6399 // since it can be best implemented as a bit test. 6400 if (AllDivisorsArePowerOfTwo) 6401 return SDValue(); 6402 6403 SDValue PVal, AVal, KVal, QVal; 6404 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6405 if (HadOneDivisor) { 6406 // Try to turn PAmts into a splat, since we don't care about the values 6407 // that are currently '0'. If we can't, just keep '0'`s. 6408 turnVectorIntoSplatVector(PAmts, isNullConstant); 6409 // Try to turn AAmts into a splat, since we don't care about the 6410 // values that are currently '-1'. If we can't, change them to '0'`s. 6411 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 6412 DAG.getConstant(0, DL, SVT)); 6413 // Try to turn KAmts into a splat, since we don't care about the values 6414 // that are currently '-1'. If we can't, change them to '0'`s. 6415 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6416 DAG.getConstant(0, DL, ShSVT)); 6417 } 6418 6419 PVal = DAG.getBuildVector(VT, DL, PAmts); 6420 AVal = DAG.getBuildVector(VT, DL, AAmts); 6421 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6422 QVal = DAG.getBuildVector(VT, DL, QAmts); 6423 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6424 assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 && 6425 QAmts.size() == 1 && 6426 "Expected matchUnaryPredicate to return one element for scalable " 6427 "vectors"); 6428 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6429 AVal = DAG.getSplatVector(VT, DL, AAmts[0]); 6430 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6431 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6432 } else { 6433 assert(isa<ConstantSDNode>(D) && "Expected a constant"); 6434 PVal = PAmts[0]; 6435 AVal = AAmts[0]; 6436 KVal = KAmts[0]; 6437 QVal = QAmts[0]; 6438 } 6439 6440 // (mul N, P) 6441 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6442 Created.push_back(Op0.getNode()); 6443 6444 if (NeedToApplyOffset) { 6445 // We need ADD to do this. 6446 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ADD, VT)) 6447 return SDValue(); 6448 6449 // (add (mul N, P), A) 6450 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 6451 Created.push_back(Op0.getNode()); 6452 } 6453 6454 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6455 // divisors as a performance improvement, since rotating by 0 is a no-op. 6456 if (HadEvenDivisor) { 6457 // We need ROTR to do this. 6458 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6459 return SDValue(); 6460 // SREM: (rotr (add (mul N, P), A), K) 6461 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6462 Created.push_back(Op0.getNode()); 6463 } 6464 6465 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 6466 SDValue Fold = 6467 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6468 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6469 6470 // If we didn't have lanes with INT_MIN divisor, then we're done. 6471 if (!HadIntMinDivisor) 6472 return Fold; 6473 6474 // That fold is only valid for positive divisors. Which effectively means, 6475 // it is invalid for INT_MIN divisors. So if we have such a lane, 6476 // we must fix-up results for said lanes. 6477 assert(VT.isVector() && "Can/should only get here for vectors."); 6478 6479 // NOTE: we avoid letting illegal types through even if we're before legalize 6480 // ops – legalization has a hard time producing good code for the code that 6481 // follows. 6482 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 6483 !isOperationLegalOrCustom(ISD::AND, VT) || 6484 !isOperationLegalOrCustom(Cond, VT) || 6485 !isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) 6486 return SDValue(); 6487 6488 Created.push_back(Fold.getNode()); 6489 6490 SDValue IntMin = DAG.getConstant( 6491 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 6492 SDValue IntMax = DAG.getConstant( 6493 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 6494 SDValue Zero = 6495 DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT); 6496 6497 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 6498 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 6499 Created.push_back(DivisorIsIntMin.getNode()); 6500 6501 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 6502 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 6503 Created.push_back(Masked.getNode()); 6504 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 6505 Created.push_back(MaskedIsZero.getNode()); 6506 6507 // To produce final result we need to blend 2 vectors: 'SetCC' and 6508 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 6509 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 6510 // constant-folded, select can get lowered to a shuffle with constant mask. 6511 SDValue Blended = DAG.getNode(ISD::VSELECT, DL, SETCCVT, DivisorIsIntMin, 6512 MaskedIsZero, Fold); 6513 6514 return Blended; 6515 } 6516 6517 bool TargetLowering:: 6518 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 6519 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 6520 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 6521 "be a constant integer"); 6522 return true; 6523 } 6524 6525 return false; 6526 } 6527 6528 SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 6529 const DenormalMode &Mode) const { 6530 SDLoc DL(Op); 6531 EVT VT = Op.getValueType(); 6532 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6533 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 6534 // Testing it with denormal inputs to avoid wrong estimate. 6535 if (Mode.Input == DenormalMode::IEEE) { 6536 // This is specifically a check for the handling of denormal inputs, 6537 // not the result. 6538 6539 // Test = fabs(X) < SmallestNormal 6540 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 6541 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem); 6542 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT); 6543 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op); 6544 return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT); 6545 } 6546 // Test = X == 0.0 6547 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 6548 } 6549 6550 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 6551 bool LegalOps, bool OptForSize, 6552 NegatibleCost &Cost, 6553 unsigned Depth) const { 6554 // fneg is removable even if it has multiple uses. 6555 if (Op.getOpcode() == ISD::FNEG) { 6556 Cost = NegatibleCost::Cheaper; 6557 return Op.getOperand(0); 6558 } 6559 6560 // Don't recurse exponentially. 6561 if (Depth > SelectionDAG::MaxRecursionDepth) 6562 return SDValue(); 6563 6564 // Pre-increment recursion depth for use in recursive calls. 6565 ++Depth; 6566 const SDNodeFlags Flags = Op->getFlags(); 6567 const TargetOptions &Options = DAG.getTarget().Options; 6568 EVT VT = Op.getValueType(); 6569 unsigned Opcode = Op.getOpcode(); 6570 6571 // Don't allow anything with multiple uses unless we know it is free. 6572 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 6573 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 6574 isFPExtFree(VT, Op.getOperand(0).getValueType()); 6575 if (!IsFreeExtend) 6576 return SDValue(); 6577 } 6578 6579 auto RemoveDeadNode = [&](SDValue N) { 6580 if (N && N.getNode()->use_empty()) 6581 DAG.RemoveDeadNode(N.getNode()); 6582 }; 6583 6584 SDLoc DL(Op); 6585 6586 // Because getNegatedExpression can delete nodes we need a handle to keep 6587 // temporary nodes alive in case the recursion manages to create an identical 6588 // node. 6589 std::list<HandleSDNode> Handles; 6590 6591 switch (Opcode) { 6592 case ISD::ConstantFP: { 6593 // Don't invert constant FP values after legalization unless the target says 6594 // the negated constant is legal. 6595 bool IsOpLegal = 6596 isOperationLegal(ISD::ConstantFP, VT) || 6597 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 6598 OptForSize); 6599 6600 if (LegalOps && !IsOpLegal) 6601 break; 6602 6603 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6604 V.changeSign(); 6605 SDValue CFP = DAG.getConstantFP(V, DL, VT); 6606 6607 // If we already have the use of the negated floating constant, it is free 6608 // to negate it even it has multiple uses. 6609 if (!Op.hasOneUse() && CFP.use_empty()) 6610 break; 6611 Cost = NegatibleCost::Neutral; 6612 return CFP; 6613 } 6614 case ISD::BUILD_VECTOR: { 6615 // Only permit BUILD_VECTOR of constants. 6616 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 6617 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 6618 })) 6619 break; 6620 6621 bool IsOpLegal = 6622 (isOperationLegal(ISD::ConstantFP, VT) && 6623 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 6624 llvm::all_of(Op->op_values(), [&](SDValue N) { 6625 return N.isUndef() || 6626 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 6627 OptForSize); 6628 }); 6629 6630 if (LegalOps && !IsOpLegal) 6631 break; 6632 6633 SmallVector<SDValue, 4> Ops; 6634 for (SDValue C : Op->op_values()) { 6635 if (C.isUndef()) { 6636 Ops.push_back(C); 6637 continue; 6638 } 6639 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 6640 V.changeSign(); 6641 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 6642 } 6643 Cost = NegatibleCost::Neutral; 6644 return DAG.getBuildVector(VT, DL, Ops); 6645 } 6646 case ISD::FADD: { 6647 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6648 break; 6649 6650 // After operation legalization, it might not be legal to create new FSUBs. 6651 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 6652 break; 6653 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6654 6655 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 6656 NegatibleCost CostX = NegatibleCost::Expensive; 6657 SDValue NegX = 6658 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6659 // Prevent this node from being deleted by the next call. 6660 if (NegX) 6661 Handles.emplace_back(NegX); 6662 6663 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 6664 NegatibleCost CostY = NegatibleCost::Expensive; 6665 SDValue NegY = 6666 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6667 6668 // We're done with the handles. 6669 Handles.clear(); 6670 6671 // Negate the X if its cost is less or equal than Y. 6672 if (NegX && (CostX <= CostY)) { 6673 Cost = CostX; 6674 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 6675 if (NegY != N) 6676 RemoveDeadNode(NegY); 6677 return N; 6678 } 6679 6680 // Negate the Y if it is not expensive. 6681 if (NegY) { 6682 Cost = CostY; 6683 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 6684 if (NegX != N) 6685 RemoveDeadNode(NegX); 6686 return N; 6687 } 6688 break; 6689 } 6690 case ISD::FSUB: { 6691 // We can't turn -(A-B) into B-A when we honor signed zeros. 6692 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6693 break; 6694 6695 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6696 // fold (fneg (fsub 0, Y)) -> Y 6697 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 6698 if (C->isZero()) { 6699 Cost = NegatibleCost::Cheaper; 6700 return Y; 6701 } 6702 6703 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 6704 Cost = NegatibleCost::Neutral; 6705 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 6706 } 6707 case ISD::FMUL: 6708 case ISD::FDIV: { 6709 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6710 6711 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 6712 NegatibleCost CostX = NegatibleCost::Expensive; 6713 SDValue NegX = 6714 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6715 // Prevent this node from being deleted by the next call. 6716 if (NegX) 6717 Handles.emplace_back(NegX); 6718 6719 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 6720 NegatibleCost CostY = NegatibleCost::Expensive; 6721 SDValue NegY = 6722 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6723 6724 // We're done with the handles. 6725 Handles.clear(); 6726 6727 // Negate the X if its cost is less or equal than Y. 6728 if (NegX && (CostX <= CostY)) { 6729 Cost = CostX; 6730 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 6731 if (NegY != N) 6732 RemoveDeadNode(NegY); 6733 return N; 6734 } 6735 6736 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 6737 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 6738 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 6739 break; 6740 6741 // Negate the Y if it is not expensive. 6742 if (NegY) { 6743 Cost = CostY; 6744 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 6745 if (NegX != N) 6746 RemoveDeadNode(NegX); 6747 return N; 6748 } 6749 break; 6750 } 6751 case ISD::FMA: 6752 case ISD::FMAD: { 6753 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6754 break; 6755 6756 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 6757 NegatibleCost CostZ = NegatibleCost::Expensive; 6758 SDValue NegZ = 6759 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 6760 // Give up if fail to negate the Z. 6761 if (!NegZ) 6762 break; 6763 6764 // Prevent this node from being deleted by the next two calls. 6765 Handles.emplace_back(NegZ); 6766 6767 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 6768 NegatibleCost CostX = NegatibleCost::Expensive; 6769 SDValue NegX = 6770 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6771 // Prevent this node from being deleted by the next call. 6772 if (NegX) 6773 Handles.emplace_back(NegX); 6774 6775 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 6776 NegatibleCost CostY = NegatibleCost::Expensive; 6777 SDValue NegY = 6778 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6779 6780 // We're done with the handles. 6781 Handles.clear(); 6782 6783 // Negate the X if its cost is less or equal than Y. 6784 if (NegX && (CostX <= CostY)) { 6785 Cost = std::min(CostX, CostZ); 6786 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 6787 if (NegY != N) 6788 RemoveDeadNode(NegY); 6789 return N; 6790 } 6791 6792 // Negate the Y if it is not expensive. 6793 if (NegY) { 6794 Cost = std::min(CostY, CostZ); 6795 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 6796 if (NegX != N) 6797 RemoveDeadNode(NegX); 6798 return N; 6799 } 6800 break; 6801 } 6802 6803 case ISD::FP_EXTEND: 6804 case ISD::FSIN: 6805 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6806 OptForSize, Cost, Depth)) 6807 return DAG.getNode(Opcode, DL, VT, NegV); 6808 break; 6809 case ISD::FP_ROUND: 6810 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6811 OptForSize, Cost, Depth)) 6812 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6813 break; 6814 } 6815 6816 return SDValue(); 6817 } 6818 6819 //===----------------------------------------------------------------------===// 6820 // Legalization Utilities 6821 //===----------------------------------------------------------------------===// 6822 6823 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6824 SDValue LHS, SDValue RHS, 6825 SmallVectorImpl<SDValue> &Result, 6826 EVT HiLoVT, SelectionDAG &DAG, 6827 MulExpansionKind Kind, SDValue LL, 6828 SDValue LH, SDValue RL, SDValue RH) const { 6829 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6830 Opcode == ISD::SMUL_LOHI); 6831 6832 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6833 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6834 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6835 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6836 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6837 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6838 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6839 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6840 6841 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6842 return false; 6843 6844 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6845 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6846 6847 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6848 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6849 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6850 6851 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6852 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6853 bool Signed) -> bool { 6854 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6855 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6856 Hi = SDValue(Lo.getNode(), 1); 6857 return true; 6858 } 6859 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6860 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6861 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6862 return true; 6863 } 6864 return false; 6865 }; 6866 6867 SDValue Lo, Hi; 6868 6869 if (!LL.getNode() && !RL.getNode() && 6870 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6871 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6872 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6873 } 6874 6875 if (!LL.getNode()) 6876 return false; 6877 6878 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6879 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6880 DAG.MaskedValueIsZero(RHS, HighMask)) { 6881 // The inputs are both zero-extended. 6882 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6883 Result.push_back(Lo); 6884 Result.push_back(Hi); 6885 if (Opcode != ISD::MUL) { 6886 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6887 Result.push_back(Zero); 6888 Result.push_back(Zero); 6889 } 6890 return true; 6891 } 6892 } 6893 6894 if (!VT.isVector() && Opcode == ISD::MUL && 6895 DAG.ComputeNumSignBits(LHS) > InnerBitSize && 6896 DAG.ComputeNumSignBits(RHS) > InnerBitSize) { 6897 // The input values are both sign-extended. 6898 // TODO non-MUL case? 6899 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6900 Result.push_back(Lo); 6901 Result.push_back(Hi); 6902 return true; 6903 } 6904 } 6905 6906 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6907 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6908 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6909 6910 if (!LH.getNode() && !RH.getNode() && 6911 isOperationLegalOrCustom(ISD::SRL, VT) && 6912 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6913 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6914 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6915 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6916 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6917 } 6918 6919 if (!LH.getNode()) 6920 return false; 6921 6922 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6923 return false; 6924 6925 Result.push_back(Lo); 6926 6927 if (Opcode == ISD::MUL) { 6928 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6929 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6930 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6931 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6932 Result.push_back(Hi); 6933 return true; 6934 } 6935 6936 // Compute the full width result. 6937 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6938 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6939 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6940 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6941 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6942 }; 6943 6944 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6945 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6946 return false; 6947 6948 // This is effectively the add part of a multiply-add of half-sized operands, 6949 // so it cannot overflow. 6950 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6951 6952 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6953 return false; 6954 6955 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6956 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6957 6958 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6959 isOperationLegalOrCustom(ISD::ADDE, VT)); 6960 if (UseGlue) 6961 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6962 Merge(Lo, Hi)); 6963 else 6964 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6965 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6966 6967 SDValue Carry = Next.getValue(1); 6968 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6969 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6970 6971 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6972 return false; 6973 6974 if (UseGlue) 6975 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6976 Carry); 6977 else 6978 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6979 Zero, Carry); 6980 6981 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6982 6983 if (Opcode == ISD::SMUL_LOHI) { 6984 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6985 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6986 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6987 6988 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6989 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6990 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6991 } 6992 6993 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6994 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6995 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6996 return true; 6997 } 6998 6999 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 7000 SelectionDAG &DAG, MulExpansionKind Kind, 7001 SDValue LL, SDValue LH, SDValue RL, 7002 SDValue RH) const { 7003 SmallVector<SDValue, 2> Result; 7004 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 7005 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 7006 DAG, Kind, LL, LH, RL, RH); 7007 if (Ok) { 7008 assert(Result.size() == 2); 7009 Lo = Result[0]; 7010 Hi = Result[1]; 7011 } 7012 return Ok; 7013 } 7014 7015 // Check that (every element of) Z is undef or not an exact multiple of BW. 7016 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 7017 return ISD::matchUnaryPredicate( 7018 Z, 7019 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 7020 true); 7021 } 7022 7023 SDValue TargetLowering::expandFunnelShift(SDNode *Node, 7024 SelectionDAG &DAG) const { 7025 EVT VT = Node->getValueType(0); 7026 7027 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 7028 !isOperationLegalOrCustom(ISD::SRL, VT) || 7029 !isOperationLegalOrCustom(ISD::SUB, VT) || 7030 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7031 return SDValue(); 7032 7033 SDValue X = Node->getOperand(0); 7034 SDValue Y = Node->getOperand(1); 7035 SDValue Z = Node->getOperand(2); 7036 7037 unsigned BW = VT.getScalarSizeInBits(); 7038 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 7039 SDLoc DL(SDValue(Node, 0)); 7040 7041 EVT ShVT = Z.getValueType(); 7042 7043 // If a funnel shift in the other direction is more supported, use it. 7044 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 7045 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7046 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 7047 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7048 // fshl X, Y, Z -> fshr X, Y, -Z 7049 // fshr X, Y, Z -> fshl X, Y, -Z 7050 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7051 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 7052 } else { 7053 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 7054 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 7055 SDValue One = DAG.getConstant(1, DL, ShVT); 7056 if (IsFSHL) { 7057 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7058 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 7059 } else { 7060 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7061 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 7062 } 7063 Z = DAG.getNOT(DL, Z, ShVT); 7064 } 7065 return DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 7066 } 7067 7068 SDValue ShX, ShY; 7069 SDValue ShAmt, InvShAmt; 7070 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7071 // fshl: X << C | Y >> (BW - C) 7072 // fshr: X << (BW - C) | Y >> C 7073 // where C = Z % BW is not zero 7074 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7075 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7076 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 7077 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 7078 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 7079 } else { 7080 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 7081 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 7082 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 7083 if (isPowerOf2_32(BW)) { 7084 // Z % BW -> Z & (BW - 1) 7085 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 7086 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 7087 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 7088 } else { 7089 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7090 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7091 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 7092 } 7093 7094 SDValue One = DAG.getConstant(1, DL, ShVT); 7095 if (IsFSHL) { 7096 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 7097 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 7098 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 7099 } else { 7100 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 7101 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 7102 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 7103 } 7104 } 7105 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 7106 } 7107 7108 // TODO: Merge with expandFunnelShift. 7109 SDValue TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 7110 SelectionDAG &DAG) const { 7111 EVT VT = Node->getValueType(0); 7112 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 7113 bool IsLeft = Node->getOpcode() == ISD::ROTL; 7114 SDValue Op0 = Node->getOperand(0); 7115 SDValue Op1 = Node->getOperand(1); 7116 SDLoc DL(SDValue(Node, 0)); 7117 7118 EVT ShVT = Op1.getValueType(); 7119 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7120 7121 // If a rotate in the other direction is more supported, use it. 7122 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 7123 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7124 isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 7125 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7126 return DAG.getNode(RevRot, DL, VT, Op0, Sub); 7127 } 7128 7129 if (!AllowVectorOps && VT.isVector() && 7130 (!isOperationLegalOrCustom(ISD::SHL, VT) || 7131 !isOperationLegalOrCustom(ISD::SRL, VT) || 7132 !isOperationLegalOrCustom(ISD::SUB, VT) || 7133 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 7134 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 7135 return SDValue(); 7136 7137 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 7138 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 7139 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 7140 SDValue ShVal; 7141 SDValue HsVal; 7142 if (isPowerOf2_32(EltSizeInBits)) { 7143 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 7144 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 7145 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7146 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 7147 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7148 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 7149 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 7150 } else { 7151 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 7152 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 7153 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 7154 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 7155 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7156 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 7157 SDValue One = DAG.getConstant(1, DL, ShVT); 7158 HsVal = 7159 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 7160 } 7161 return DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 7162 } 7163 7164 void TargetLowering::expandShiftParts(SDNode *Node, SDValue &Lo, SDValue &Hi, 7165 SelectionDAG &DAG) const { 7166 assert(Node->getNumOperands() == 3 && "Not a double-shift!"); 7167 EVT VT = Node->getValueType(0); 7168 unsigned VTBits = VT.getScalarSizeInBits(); 7169 assert(isPowerOf2_32(VTBits) && "Power-of-two integer type expected"); 7170 7171 bool IsSHL = Node->getOpcode() == ISD::SHL_PARTS; 7172 bool IsSRA = Node->getOpcode() == ISD::SRA_PARTS; 7173 SDValue ShOpLo = Node->getOperand(0); 7174 SDValue ShOpHi = Node->getOperand(1); 7175 SDValue ShAmt = Node->getOperand(2); 7176 EVT ShAmtVT = ShAmt.getValueType(); 7177 EVT ShAmtCCVT = 7178 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShAmtVT); 7179 SDLoc dl(Node); 7180 7181 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and 7182 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's usually optimized 7183 // away during isel. 7184 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7185 DAG.getConstant(VTBits - 1, dl, ShAmtVT)); 7186 SDValue Tmp1 = IsSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7187 DAG.getConstant(VTBits - 1, dl, ShAmtVT)) 7188 : DAG.getConstant(0, dl, VT); 7189 7190 SDValue Tmp2, Tmp3; 7191 if (IsSHL) { 7192 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); 7193 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); 7194 } else { 7195 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); 7196 Tmp3 = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); 7197 } 7198 7199 // If the shift amount is larger or equal than the width of a part we don't 7200 // use the result from the FSHL/FSHR. Insert a test and select the appropriate 7201 // values for large shift amounts. 7202 SDValue AndNode = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7203 DAG.getConstant(VTBits, dl, ShAmtVT)); 7204 SDValue Cond = DAG.getSetCC(dl, ShAmtCCVT, AndNode, 7205 DAG.getConstant(0, dl, ShAmtVT), ISD::SETNE); 7206 7207 if (IsSHL) { 7208 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7209 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7210 } else { 7211 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7212 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7213 } 7214 } 7215 7216 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 7217 SelectionDAG &DAG) const { 7218 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7219 SDValue Src = Node->getOperand(OpNo); 7220 EVT SrcVT = Src.getValueType(); 7221 EVT DstVT = Node->getValueType(0); 7222 SDLoc dl(SDValue(Node, 0)); 7223 7224 // FIXME: Only f32 to i64 conversions are supported. 7225 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 7226 return false; 7227 7228 if (Node->isStrictFPOpcode()) 7229 // When a NaN is converted to an integer a trap is allowed. We can't 7230 // use this expansion here because it would eliminate that trap. Other 7231 // traps are also allowed and cannot be eliminated. See 7232 // IEEE 754-2008 sec 5.8. 7233 return false; 7234 7235 // Expand f32 -> i64 conversion 7236 // This algorithm comes from compiler-rt's implementation of fixsfdi: 7237 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c 7238 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 7239 EVT IntVT = SrcVT.changeTypeToInteger(); 7240 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 7241 7242 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 7243 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 7244 SDValue Bias = DAG.getConstant(127, dl, IntVT); 7245 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 7246 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 7247 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 7248 7249 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 7250 7251 SDValue ExponentBits = DAG.getNode( 7252 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 7253 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 7254 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 7255 7256 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 7257 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 7258 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 7259 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 7260 7261 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 7262 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 7263 DAG.getConstant(0x00800000, dl, IntVT)); 7264 7265 R = DAG.getZExtOrTrunc(R, dl, DstVT); 7266 7267 R = DAG.getSelectCC( 7268 dl, Exponent, ExponentLoBit, 7269 DAG.getNode(ISD::SHL, dl, DstVT, R, 7270 DAG.getZExtOrTrunc( 7271 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 7272 dl, IntShVT)), 7273 DAG.getNode(ISD::SRL, dl, DstVT, R, 7274 DAG.getZExtOrTrunc( 7275 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 7276 dl, IntShVT)), 7277 ISD::SETGT); 7278 7279 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 7280 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 7281 7282 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 7283 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 7284 return true; 7285 } 7286 7287 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 7288 SDValue &Chain, 7289 SelectionDAG &DAG) const { 7290 SDLoc dl(SDValue(Node, 0)); 7291 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7292 SDValue Src = Node->getOperand(OpNo); 7293 7294 EVT SrcVT = Src.getValueType(); 7295 EVT DstVT = Node->getValueType(0); 7296 EVT SetCCVT = 7297 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7298 EVT DstSetCCVT = 7299 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7300 7301 // Only expand vector types if we have the appropriate vector bit operations. 7302 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 7303 ISD::FP_TO_SINT; 7304 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 7305 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 7306 return false; 7307 7308 // If the maximum float value is smaller then the signed integer range, 7309 // the destination signmask can't be represented by the float, so we can 7310 // just use FP_TO_SINT directly. 7311 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 7312 APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits())); 7313 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 7314 if (APFloat::opOverflow & 7315 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 7316 if (Node->isStrictFPOpcode()) { 7317 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7318 { Node->getOperand(0), Src }); 7319 Chain = Result.getValue(1); 7320 } else 7321 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7322 return true; 7323 } 7324 7325 // Don't expand it if there isn't cheap fsub instruction. 7326 if (!isOperationLegalOrCustom( 7327 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 7328 return false; 7329 7330 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7331 SDValue Sel; 7332 7333 if (Node->isStrictFPOpcode()) { 7334 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7335 Node->getOperand(0), /*IsSignaling*/ true); 7336 Chain = Sel.getValue(1); 7337 } else { 7338 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 7339 } 7340 7341 bool Strict = Node->isStrictFPOpcode() || 7342 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 7343 7344 if (Strict) { 7345 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 7346 // signmask then offset (the result of which should be fully representable). 7347 // Sel = Src < 0x8000000000000000 7348 // FltOfs = select Sel, 0, 0x8000000000000000 7349 // IntOfs = select Sel, 0, 0x8000000000000000 7350 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7351 7352 // TODO: Should any fast-math-flags be set for the FSUB? 7353 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 7354 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7355 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7356 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 7357 DAG.getConstant(0, dl, DstVT), 7358 DAG.getConstant(SignMask, dl, DstVT)); 7359 SDValue SInt; 7360 if (Node->isStrictFPOpcode()) { 7361 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 7362 { Chain, Src, FltOfs }); 7363 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7364 { Val.getValue(1), Val }); 7365 Chain = SInt.getValue(1); 7366 } else { 7367 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 7368 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 7369 } 7370 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7371 } else { 7372 // Expand based on maximum range of FP_TO_SINT: 7373 // True = fp_to_sint(Src) 7374 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 7375 // Result = select (Src < 0x8000000000000000), True, False 7376 7377 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7378 // TODO: Should any fast-math-flags be set for the FSUB? 7379 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 7380 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 7381 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 7382 DAG.getConstant(SignMask, dl, DstVT)); 7383 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7384 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 7385 } 7386 return true; 7387 } 7388 7389 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 7390 SDValue &Chain, 7391 SelectionDAG &DAG) const { 7392 // This transform is not correct for converting 0 when rounding mode is set 7393 // to round toward negative infinity which will produce -0.0. So disable under 7394 // strictfp. 7395 if (Node->isStrictFPOpcode()) 7396 return false; 7397 7398 SDValue Src = Node->getOperand(0); 7399 EVT SrcVT = Src.getValueType(); 7400 EVT DstVT = Node->getValueType(0); 7401 7402 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 7403 return false; 7404 7405 // Only expand vector types if we have the appropriate vector bit operations. 7406 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 7407 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 7408 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 7409 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 7410 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 7411 return false; 7412 7413 SDLoc dl(SDValue(Node, 0)); 7414 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 7415 7416 // Implementation of unsigned i64 to f64 following the algorithm in 7417 // __floatundidf in compiler_rt. This implementation performs rounding 7418 // correctly in all rounding modes with the exception of converting 0 7419 // when rounding toward negative infinity. In that case the fsub will produce 7420 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 7421 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 7422 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 7423 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 7424 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 7425 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 7426 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 7427 7428 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 7429 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 7430 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 7431 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 7432 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 7433 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 7434 SDValue HiSub = 7435 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 7436 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 7437 return true; 7438 } 7439 7440 SDValue 7441 TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node, 7442 SelectionDAG &DAG) const { 7443 unsigned Opcode = Node->getOpcode(); 7444 assert((Opcode == ISD::FMINNUM || Opcode == ISD::FMAXNUM || 7445 Opcode == ISD::STRICT_FMINNUM || Opcode == ISD::STRICT_FMAXNUM) && 7446 "Wrong opcode"); 7447 7448 if (Node->getFlags().hasNoNaNs()) { 7449 ISD::CondCode Pred = Opcode == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 7450 SDValue Op1 = Node->getOperand(0); 7451 SDValue Op2 = Node->getOperand(1); 7452 SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred); 7453 // Copy FMF flags, but always set the no-signed-zeros flag 7454 // as this is implied by the FMINNUM/FMAXNUM semantics. 7455 SDNodeFlags Flags = Node->getFlags(); 7456 Flags.setNoSignedZeros(true); 7457 SelCC->setFlags(Flags); 7458 return SelCC; 7459 } 7460 7461 return SDValue(); 7462 } 7463 7464 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 7465 SelectionDAG &DAG) const { 7466 SDLoc dl(Node); 7467 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 7468 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 7469 EVT VT = Node->getValueType(0); 7470 7471 if (VT.isScalableVector()) 7472 report_fatal_error( 7473 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 7474 7475 if (isOperationLegalOrCustom(NewOp, VT)) { 7476 SDValue Quiet0 = Node->getOperand(0); 7477 SDValue Quiet1 = Node->getOperand(1); 7478 7479 if (!Node->getFlags().hasNoNaNs()) { 7480 // Insert canonicalizes if it's possible we need to quiet to get correct 7481 // sNaN behavior. 7482 if (!DAG.isKnownNeverSNaN(Quiet0)) { 7483 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 7484 Node->getFlags()); 7485 } 7486 if (!DAG.isKnownNeverSNaN(Quiet1)) { 7487 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 7488 Node->getFlags()); 7489 } 7490 } 7491 7492 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 7493 } 7494 7495 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 7496 // instead if there are no NaNs. 7497 if (Node->getFlags().hasNoNaNs()) { 7498 unsigned IEEE2018Op = 7499 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 7500 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 7501 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 7502 Node->getOperand(1), Node->getFlags()); 7503 } 7504 } 7505 7506 if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG)) 7507 return SelCC; 7508 7509 return SDValue(); 7510 } 7511 7512 SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op, 7513 unsigned Test, SDNodeFlags Flags, 7514 const SDLoc &DL, 7515 SelectionDAG &DAG) const { 7516 EVT OperandVT = Op.getValueType(); 7517 assert(OperandVT.isFloatingPoint()); 7518 7519 // Degenerated cases. 7520 if (Test == 0) 7521 return DAG.getBoolConstant(false, DL, ResultVT, OperandVT); 7522 if ((Test & fcAllFlags) == fcAllFlags) 7523 return DAG.getBoolConstant(true, DL, ResultVT, OperandVT); 7524 7525 // PPC double double is a pair of doubles, of which the higher part determines 7526 // the value class. 7527 if (OperandVT == MVT::ppcf128) { 7528 Op = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::f64, Op, 7529 DAG.getConstant(1, DL, MVT::i32)); 7530 OperandVT = MVT::f64; 7531 } 7532 7533 // Some checks may be represented as inversion of simpler check, for example 7534 // "inf|normal|subnormal|zero" => !"nan". 7535 bool IsInverted = false; 7536 if (unsigned InvertedCheck = getInvertedFPClassTest(Test)) { 7537 IsInverted = true; 7538 Test = InvertedCheck; 7539 } 7540 7541 // Floating-point type properties. 7542 EVT ScalarFloatVT = OperandVT.getScalarType(); 7543 const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext()); 7544 const llvm::fltSemantics &Semantics = FloatTy->getFltSemantics(); 7545 bool IsF80 = (ScalarFloatVT == MVT::f80); 7546 7547 // Some checks can be implemented using float comparisons, if floating point 7548 // exceptions are ignored. 7549 if (Flags.hasNoFPExcept() && 7550 isOperationLegalOrCustom(ISD::SETCC, OperandVT.getScalarType())) { 7551 if (Test == fcZero) 7552 return DAG.getSetCC(DL, ResultVT, Op, 7553 DAG.getConstantFP(0.0, DL, OperandVT), 7554 IsInverted ? ISD::SETUNE : ISD::SETOEQ); 7555 if (Test == fcNan) 7556 return DAG.getSetCC(DL, ResultVT, Op, Op, 7557 IsInverted ? ISD::SETO : ISD::SETUO); 7558 } 7559 7560 // In the general case use integer operations. 7561 unsigned BitSize = OperandVT.getScalarSizeInBits(); 7562 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), BitSize); 7563 if (OperandVT.isVector()) 7564 IntVT = EVT::getVectorVT(*DAG.getContext(), IntVT, 7565 OperandVT.getVectorElementCount()); 7566 SDValue OpAsInt = DAG.getBitcast(IntVT, Op); 7567 7568 // Various masks. 7569 APInt SignBit = APInt::getSignMask(BitSize); 7570 APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign. 7571 APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit. 7572 const unsigned ExplicitIntBitInF80 = 63; 7573 APInt ExpMask = Inf; 7574 if (IsF80) 7575 ExpMask.clearBit(ExplicitIntBitInF80); 7576 APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf; 7577 APInt QNaNBitMask = 7578 APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1); 7579 APInt InvertionMask = APInt::getAllOnesValue(ResultVT.getScalarSizeInBits()); 7580 7581 SDValue ValueMaskV = DAG.getConstant(ValueMask, DL, IntVT); 7582 SDValue SignBitV = DAG.getConstant(SignBit, DL, IntVT); 7583 SDValue ExpMaskV = DAG.getConstant(ExpMask, DL, IntVT); 7584 SDValue ZeroV = DAG.getConstant(0, DL, IntVT); 7585 SDValue InfV = DAG.getConstant(Inf, DL, IntVT); 7586 SDValue ResultInvertionMask = DAG.getConstant(InvertionMask, DL, ResultVT); 7587 7588 SDValue Res; 7589 const auto appendResult = [&](SDValue PartialRes) { 7590 if (PartialRes) { 7591 if (Res) 7592 Res = DAG.getNode(ISD::OR, DL, ResultVT, Res, PartialRes); 7593 else 7594 Res = PartialRes; 7595 } 7596 }; 7597 7598 SDValue IntBitIsSetV; // Explicit integer bit in f80 mantissa is set. 7599 const auto getIntBitIsSet = [&]() -> SDValue { 7600 if (!IntBitIsSetV) { 7601 APInt IntBitMask(BitSize, 0); 7602 IntBitMask.setBit(ExplicitIntBitInF80); 7603 SDValue IntBitMaskV = DAG.getConstant(IntBitMask, DL, IntVT); 7604 SDValue IntBitV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, IntBitMaskV); 7605 IntBitIsSetV = DAG.getSetCC(DL, ResultVT, IntBitV, ZeroV, ISD::SETNE); 7606 } 7607 return IntBitIsSetV; 7608 }; 7609 7610 // Split the value into sign bit and absolute value. 7611 SDValue AbsV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, ValueMaskV); 7612 SDValue SignV = DAG.getSetCC(DL, ResultVT, OpAsInt, 7613 DAG.getConstant(0.0, DL, IntVT), ISD::SETLT); 7614 7615 // Tests that involve more than one class should be processed first. 7616 SDValue PartialRes; 7617 7618 if (IsF80) 7619 ; // Detect finite numbers of f80 by checking individual classes because 7620 // they have different settings of the explicit integer bit. 7621 else if ((Test & fcFinite) == fcFinite) { 7622 // finite(V) ==> abs(V) < exp_mask 7623 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 7624 Test &= ~fcFinite; 7625 } else if ((Test & fcFinite) == fcPosFinite) { 7626 // finite(V) && V > 0 ==> V < exp_mask 7627 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ExpMaskV, ISD::SETULT); 7628 Test &= ~fcPosFinite; 7629 } else if ((Test & fcFinite) == fcNegFinite) { 7630 // finite(V) && V < 0 ==> abs(V) < exp_mask && signbit == 1 7631 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 7632 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7633 Test &= ~fcNegFinite; 7634 } 7635 appendResult(PartialRes); 7636 7637 // Check for individual classes. 7638 7639 if (unsigned PartialCheck = Test & fcZero) { 7640 if (PartialCheck == fcPosZero) 7641 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ZeroV, ISD::SETEQ); 7642 else if (PartialCheck == fcZero) 7643 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ZeroV, ISD::SETEQ); 7644 else // ISD::fcNegZero 7645 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, SignBitV, ISD::SETEQ); 7646 appendResult(PartialRes); 7647 } 7648 7649 if (unsigned PartialCheck = Test & fcInf) { 7650 if (PartialCheck == fcPosInf) 7651 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, InfV, ISD::SETEQ); 7652 else if (PartialCheck == fcInf) 7653 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETEQ); 7654 else { // ISD::fcNegInf 7655 APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt(); 7656 SDValue NegInfV = DAG.getConstant(NegInf, DL, IntVT); 7657 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, NegInfV, ISD::SETEQ); 7658 } 7659 appendResult(PartialRes); 7660 } 7661 7662 if (unsigned PartialCheck = Test & fcNan) { 7663 APInt InfWithQnanBit = Inf | QNaNBitMask; 7664 SDValue InfWithQnanBitV = DAG.getConstant(InfWithQnanBit, DL, IntVT); 7665 if (PartialCheck == fcNan) { 7666 // isnan(V) ==> abs(V) > int(inf) 7667 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 7668 if (IsF80) { 7669 // Recognize unsupported values as NaNs for compatibility with glibc. 7670 // In them (exp(V)==0) == int_bit. 7671 SDValue ExpBits = DAG.getNode(ISD::AND, DL, IntVT, AbsV, ExpMaskV); 7672 SDValue ExpIsZero = 7673 DAG.getSetCC(DL, ResultVT, ExpBits, ZeroV, ISD::SETEQ); 7674 SDValue IsPseudo = 7675 DAG.getSetCC(DL, ResultVT, getIntBitIsSet(), ExpIsZero, ISD::SETEQ); 7676 PartialRes = DAG.getNode(ISD::OR, DL, ResultVT, PartialRes, IsPseudo); 7677 } 7678 } else if (PartialCheck == fcQNan) { 7679 // isquiet(V) ==> abs(V) >= (unsigned(Inf) | quiet_bit) 7680 PartialRes = 7681 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETGE); 7682 } else { // ISD::fcSNan 7683 // issignaling(V) ==> abs(V) > unsigned(Inf) && 7684 // abs(V) < (unsigned(Inf) | quiet_bit) 7685 SDValue IsNan = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 7686 SDValue IsNotQnan = 7687 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETLT); 7688 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, IsNan, IsNotQnan); 7689 } 7690 appendResult(PartialRes); 7691 } 7692 7693 if (unsigned PartialCheck = Test & fcSubnormal) { 7694 // issubnormal(V) ==> unsigned(abs(V) - 1) < (all mantissa bits set) 7695 // issubnormal(V) && V>0 ==> unsigned(V - 1) < (all mantissa bits set) 7696 SDValue V = (PartialCheck == fcPosSubnormal) ? OpAsInt : AbsV; 7697 SDValue MantissaV = DAG.getConstant(AllOneMantissa, DL, IntVT); 7698 SDValue VMinusOneV = 7699 DAG.getNode(ISD::SUB, DL, IntVT, V, DAG.getConstant(1, DL, IntVT)); 7700 PartialRes = DAG.getSetCC(DL, ResultVT, VMinusOneV, MantissaV, ISD::SETULT); 7701 if (PartialCheck == fcNegSubnormal) 7702 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7703 appendResult(PartialRes); 7704 } 7705 7706 if (unsigned PartialCheck = Test & fcNormal) { 7707 // isnormal(V) ==> (0 < exp < max_exp) ==> (unsigned(exp-1) < (max_exp-1)) 7708 APInt ExpLSB = ExpMask & ~(ExpMask.shl(1)); 7709 SDValue ExpLSBV = DAG.getConstant(ExpLSB, DL, IntVT); 7710 SDValue ExpMinus1 = DAG.getNode(ISD::SUB, DL, IntVT, AbsV, ExpLSBV); 7711 APInt ExpLimit = ExpMask - ExpLSB; 7712 SDValue ExpLimitV = DAG.getConstant(ExpLimit, DL, IntVT); 7713 PartialRes = DAG.getSetCC(DL, ResultVT, ExpMinus1, ExpLimitV, ISD::SETULT); 7714 if (PartialCheck == fcNegNormal) 7715 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7716 else if (PartialCheck == fcPosNormal) { 7717 SDValue PosSignV = 7718 DAG.getNode(ISD::XOR, DL, ResultVT, SignV, ResultInvertionMask); 7719 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, PosSignV); 7720 } 7721 if (IsF80) 7722 PartialRes = 7723 DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, getIntBitIsSet()); 7724 appendResult(PartialRes); 7725 } 7726 7727 if (!Res) 7728 return DAG.getConstant(IsInverted, DL, ResultVT); 7729 if (IsInverted) 7730 Res = DAG.getNode(ISD::XOR, DL, ResultVT, Res, ResultInvertionMask); 7731 return Res; 7732 } 7733 7734 // Only expand vector types if we have the appropriate vector bit operations. 7735 static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT) { 7736 assert(VT.isVector() && "Expected vector type"); 7737 unsigned Len = VT.getScalarSizeInBits(); 7738 return TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 7739 TLI.isOperationLegalOrCustom(ISD::SUB, VT) && 7740 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 7741 (Len == 8 || TLI.isOperationLegalOrCustom(ISD::MUL, VT)) && 7742 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT); 7743 } 7744 7745 SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const { 7746 SDLoc dl(Node); 7747 EVT VT = Node->getValueType(0); 7748 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7749 SDValue Op = Node->getOperand(0); 7750 unsigned Len = VT.getScalarSizeInBits(); 7751 assert(VT.isInteger() && "CTPOP not implemented for this type."); 7752 7753 // TODO: Add support for irregular type lengths. 7754 if (!(Len <= 128 && Len % 8 == 0)) 7755 return SDValue(); 7756 7757 // Only expand vector types if we have the appropriate vector bit operations. 7758 if (VT.isVector() && !canExpandVectorCTPOP(*this, VT)) 7759 return SDValue(); 7760 7761 // This is the "best" algorithm from 7762 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 7763 SDValue Mask55 = 7764 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 7765 SDValue Mask33 = 7766 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 7767 SDValue Mask0F = 7768 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 7769 7770 // v = v - ((v >> 1) & 0x55555555...) 7771 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 7772 DAG.getNode(ISD::AND, dl, VT, 7773 DAG.getNode(ISD::SRL, dl, VT, Op, 7774 DAG.getConstant(1, dl, ShVT)), 7775 Mask55)); 7776 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 7777 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 7778 DAG.getNode(ISD::AND, dl, VT, 7779 DAG.getNode(ISD::SRL, dl, VT, Op, 7780 DAG.getConstant(2, dl, ShVT)), 7781 Mask33)); 7782 // v = (v + (v >> 4)) & 0x0F0F0F0F... 7783 Op = DAG.getNode(ISD::AND, dl, VT, 7784 DAG.getNode(ISD::ADD, dl, VT, Op, 7785 DAG.getNode(ISD::SRL, dl, VT, Op, 7786 DAG.getConstant(4, dl, ShVT))), 7787 Mask0F); 7788 7789 if (Len <= 8) 7790 return Op; 7791 7792 // Avoid the multiply if we only have 2 bytes to add. 7793 // TODO: Only doing this for scalars because vectors weren't as obviously 7794 // improved. 7795 if (Len == 16 && !VT.isVector()) { 7796 // v = (v + (v >> 8)) & 0x00FF; 7797 return DAG.getNode(ISD::AND, dl, VT, 7798 DAG.getNode(ISD::ADD, dl, VT, Op, 7799 DAG.getNode(ISD::SRL, dl, VT, Op, 7800 DAG.getConstant(8, dl, ShVT))), 7801 DAG.getConstant(0xFF, dl, VT)); 7802 } 7803 7804 // v = (v * 0x01010101...) >> (Len - 8) 7805 SDValue Mask01 = 7806 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 7807 return DAG.getNode(ISD::SRL, dl, VT, 7808 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 7809 DAG.getConstant(Len - 8, dl, ShVT)); 7810 } 7811 7812 SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { 7813 SDLoc dl(Node); 7814 EVT VT = Node->getValueType(0); 7815 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7816 SDValue Op = Node->getOperand(0); 7817 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7818 7819 // If the non-ZERO_UNDEF version is supported we can use that instead. 7820 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 7821 isOperationLegalOrCustom(ISD::CTLZ, VT)) 7822 return DAG.getNode(ISD::CTLZ, dl, VT, Op); 7823 7824 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7825 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 7826 EVT SetCCVT = 7827 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7828 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 7829 SDValue Zero = DAG.getConstant(0, dl, VT); 7830 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7831 return DAG.getSelect(dl, VT, SrcIsZero, 7832 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 7833 } 7834 7835 // Only expand vector types if we have the appropriate vector bit operations. 7836 // This includes the operations needed to expand CTPOP if it isn't supported. 7837 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7838 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7839 !canExpandVectorCTPOP(*this, VT)) || 7840 !isOperationLegalOrCustom(ISD::SRL, VT) || 7841 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7842 return SDValue(); 7843 7844 // for now, we do this: 7845 // x = x | (x >> 1); 7846 // x = x | (x >> 2); 7847 // ... 7848 // x = x | (x >>16); 7849 // x = x | (x >>32); // for 64-bit input 7850 // return popcount(~x); 7851 // 7852 // Ref: "Hacker's Delight" by Henry Warren 7853 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 7854 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 7855 Op = DAG.getNode(ISD::OR, dl, VT, Op, 7856 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 7857 } 7858 Op = DAG.getNOT(dl, Op, VT); 7859 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 7860 } 7861 7862 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { 7863 SDLoc dl(Node); 7864 EVT VT = Node->getValueType(0); 7865 SDValue Op = Node->getOperand(0); 7866 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7867 7868 // If the non-ZERO_UNDEF version is supported we can use that instead. 7869 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 7870 isOperationLegalOrCustom(ISD::CTTZ, VT)) 7871 return DAG.getNode(ISD::CTTZ, dl, VT, Op); 7872 7873 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7874 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 7875 EVT SetCCVT = 7876 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7877 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 7878 SDValue Zero = DAG.getConstant(0, dl, VT); 7879 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7880 return DAG.getSelect(dl, VT, SrcIsZero, 7881 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 7882 } 7883 7884 // Only expand vector types if we have the appropriate vector bit operations. 7885 // This includes the operations needed to expand CTPOP if it isn't supported. 7886 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7887 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7888 !isOperationLegalOrCustom(ISD::CTLZ, VT) && 7889 !canExpandVectorCTPOP(*this, VT)) || 7890 !isOperationLegalOrCustom(ISD::SUB, VT) || 7891 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 7892 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7893 return SDValue(); 7894 7895 // for now, we use: { return popcount(~x & (x - 1)); } 7896 // unless the target has ctlz but not ctpop, in which case we use: 7897 // { return 32 - nlz(~x & (x-1)); } 7898 // Ref: "Hacker's Delight" by Henry Warren 7899 SDValue Tmp = DAG.getNode( 7900 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 7901 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 7902 7903 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 7904 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 7905 return DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 7906 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 7907 } 7908 7909 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 7910 } 7911 7912 SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, 7913 bool IsNegative) const { 7914 SDLoc dl(N); 7915 EVT VT = N->getValueType(0); 7916 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7917 SDValue Op = N->getOperand(0); 7918 7919 // abs(x) -> smax(x,sub(0,x)) 7920 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7921 isOperationLegal(ISD::SMAX, VT)) { 7922 SDValue Zero = DAG.getConstant(0, dl, VT); 7923 return DAG.getNode(ISD::SMAX, dl, VT, Op, 7924 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7925 } 7926 7927 // abs(x) -> umin(x,sub(0,x)) 7928 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 7929 isOperationLegal(ISD::UMIN, VT)) { 7930 SDValue Zero = DAG.getConstant(0, dl, VT); 7931 Op = DAG.getFreeze(Op); 7932 return DAG.getNode(ISD::UMIN, dl, VT, Op, 7933 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7934 } 7935 7936 // 0 - abs(x) -> smin(x, sub(0,x)) 7937 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 7938 isOperationLegal(ISD::SMIN, VT)) { 7939 Op = DAG.getFreeze(Op); 7940 SDValue Zero = DAG.getConstant(0, dl, VT); 7941 return DAG.getNode(ISD::SMIN, dl, VT, Op, 7942 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 7943 } 7944 7945 // Only expand vector types if we have the appropriate vector operations. 7946 if (VT.isVector() && 7947 (!isOperationLegalOrCustom(ISD::SRA, VT) || 7948 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 7949 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 7950 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 7951 return SDValue(); 7952 7953 Op = DAG.getFreeze(Op); 7954 SDValue Shift = 7955 DAG.getNode(ISD::SRA, dl, VT, Op, 7956 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 7957 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 7958 7959 // abs(x) -> Y = sra (X, size(X)-1); sub (xor (X, Y), Y) 7960 if (!IsNegative) 7961 return DAG.getNode(ISD::SUB, dl, VT, Xor, Shift); 7962 7963 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 7964 return DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 7965 } 7966 7967 SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const { 7968 SDLoc dl(N); 7969 EVT VT = N->getValueType(0); 7970 SDValue Op = N->getOperand(0); 7971 7972 if (!VT.isSimple()) 7973 return SDValue(); 7974 7975 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7976 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 7977 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 7978 default: 7979 return SDValue(); 7980 case MVT::i16: 7981 // Use a rotate by 8. This can be further expanded if necessary. 7982 return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7983 case MVT::i32: 7984 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7985 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7986 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7987 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7988 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 7989 DAG.getConstant(0xFF0000, dl, VT)); 7990 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); 7991 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 7992 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 7993 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 7994 case MVT::i64: 7995 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 7996 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 7997 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 7998 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 7999 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8000 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8001 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 8002 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 8003 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, 8004 DAG.getConstant(255ULL<<48, dl, VT)); 8005 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, 8006 DAG.getConstant(255ULL<<40, dl, VT)); 8007 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, 8008 DAG.getConstant(255ULL<<32, dl, VT)); 8009 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, 8010 DAG.getConstant(255ULL<<24, dl, VT)); 8011 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 8012 DAG.getConstant(255ULL<<16, dl, VT)); 8013 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, 8014 DAG.getConstant(255ULL<<8 , dl, VT)); 8015 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 8016 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 8017 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 8018 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 8019 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 8020 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 8021 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 8022 } 8023 } 8024 8025 SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 8026 SDLoc dl(N); 8027 EVT VT = N->getValueType(0); 8028 SDValue Op = N->getOperand(0); 8029 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8030 unsigned Sz = VT.getScalarSizeInBits(); 8031 8032 SDValue Tmp, Tmp2, Tmp3; 8033 8034 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 8035 // and finally the i1 pairs. 8036 // TODO: We can easily support i4/i2 legal types if any target ever does. 8037 if (Sz >= 8 && isPowerOf2_32(Sz)) { 8038 // Create the masks - repeating the pattern every byte. 8039 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 8040 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 8041 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 8042 8043 // BSWAP if the type is wider than a single byte. 8044 Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); 8045 8046 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 8047 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT)); 8048 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask4, dl, VT)); 8049 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT)); 8050 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); 8051 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8052 8053 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 8054 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT)); 8055 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask2, dl, VT)); 8056 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT)); 8057 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); 8058 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8059 8060 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 8061 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT)); 8062 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask1, dl, VT)); 8063 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT)); 8064 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); 8065 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8066 return Tmp; 8067 } 8068 8069 Tmp = DAG.getConstant(0, dl, VT); 8070 for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { 8071 if (I < J) 8072 Tmp2 = 8073 DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); 8074 else 8075 Tmp2 = 8076 DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); 8077 8078 APInt Shift(Sz, 1); 8079 Shift <<= J; 8080 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); 8081 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); 8082 } 8083 8084 return Tmp; 8085 } 8086 8087 std::pair<SDValue, SDValue> 8088 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 8089 SelectionDAG &DAG) const { 8090 SDLoc SL(LD); 8091 SDValue Chain = LD->getChain(); 8092 SDValue BasePTR = LD->getBasePtr(); 8093 EVT SrcVT = LD->getMemoryVT(); 8094 EVT DstVT = LD->getValueType(0); 8095 ISD::LoadExtType ExtType = LD->getExtensionType(); 8096 8097 if (SrcVT.isScalableVector()) 8098 report_fatal_error("Cannot scalarize scalable vector loads"); 8099 8100 unsigned NumElem = SrcVT.getVectorNumElements(); 8101 8102 EVT SrcEltVT = SrcVT.getScalarType(); 8103 EVT DstEltVT = DstVT.getScalarType(); 8104 8105 // A vector must always be stored in memory as-is, i.e. without any padding 8106 // between the elements, since various code depend on it, e.g. in the 8107 // handling of a bitcast of a vector type to int, which may be done with a 8108 // vector store followed by an integer load. A vector that does not have 8109 // elements that are byte-sized must therefore be stored as an integer 8110 // built out of the extracted vector elements. 8111 if (!SrcEltVT.isByteSized()) { 8112 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 8113 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 8114 8115 unsigned NumSrcBits = SrcVT.getSizeInBits(); 8116 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 8117 8118 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 8119 SDValue SrcEltBitMask = DAG.getConstant( 8120 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 8121 8122 // Load the whole vector and avoid masking off the top bits as it makes 8123 // the codegen worse. 8124 SDValue Load = 8125 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 8126 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 8127 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 8128 8129 SmallVector<SDValue, 8> Vals; 8130 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8131 unsigned ShiftIntoIdx = 8132 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 8133 SDValue ShiftAmount = 8134 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 8135 LoadVT, SL, /*LegalTypes=*/false); 8136 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 8137 SDValue Elt = 8138 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 8139 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 8140 8141 if (ExtType != ISD::NON_EXTLOAD) { 8142 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 8143 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 8144 } 8145 8146 Vals.push_back(Scalar); 8147 } 8148 8149 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 8150 return std::make_pair(Value, Load.getValue(1)); 8151 } 8152 8153 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 8154 assert(SrcEltVT.isByteSized()); 8155 8156 SmallVector<SDValue, 8> Vals; 8157 SmallVector<SDValue, 8> LoadChains; 8158 8159 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8160 SDValue ScalarLoad = 8161 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 8162 LD->getPointerInfo().getWithOffset(Idx * Stride), 8163 SrcEltVT, LD->getOriginalAlign(), 8164 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 8165 8166 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 8167 8168 Vals.push_back(ScalarLoad.getValue(0)); 8169 LoadChains.push_back(ScalarLoad.getValue(1)); 8170 } 8171 8172 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 8173 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 8174 8175 return std::make_pair(Value, NewChain); 8176 } 8177 8178 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 8179 SelectionDAG &DAG) const { 8180 SDLoc SL(ST); 8181 8182 SDValue Chain = ST->getChain(); 8183 SDValue BasePtr = ST->getBasePtr(); 8184 SDValue Value = ST->getValue(); 8185 EVT StVT = ST->getMemoryVT(); 8186 8187 if (StVT.isScalableVector()) 8188 report_fatal_error("Cannot scalarize scalable vector stores"); 8189 8190 // The type of the data we want to save 8191 EVT RegVT = Value.getValueType(); 8192 EVT RegSclVT = RegVT.getScalarType(); 8193 8194 // The type of data as saved in memory. 8195 EVT MemSclVT = StVT.getScalarType(); 8196 8197 unsigned NumElem = StVT.getVectorNumElements(); 8198 8199 // A vector must always be stored in memory as-is, i.e. without any padding 8200 // between the elements, since various code depend on it, e.g. in the 8201 // handling of a bitcast of a vector type to int, which may be done with a 8202 // vector store followed by an integer load. A vector that does not have 8203 // elements that are byte-sized must therefore be stored as an integer 8204 // built out of the extracted vector elements. 8205 if (!MemSclVT.isByteSized()) { 8206 unsigned NumBits = StVT.getSizeInBits(); 8207 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 8208 8209 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 8210 8211 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8212 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 8213 DAG.getVectorIdxConstant(Idx, SL)); 8214 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 8215 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 8216 unsigned ShiftIntoIdx = 8217 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 8218 SDValue ShiftAmount = 8219 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 8220 SDValue ShiftedElt = 8221 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 8222 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 8223 } 8224 8225 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 8226 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 8227 ST->getAAInfo()); 8228 } 8229 8230 // Store Stride in bytes 8231 unsigned Stride = MemSclVT.getSizeInBits() / 8; 8232 assert(Stride && "Zero stride!"); 8233 // Extract each of the elements from the original vector and save them into 8234 // memory individually. 8235 SmallVector<SDValue, 8> Stores; 8236 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8237 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 8238 DAG.getVectorIdxConstant(Idx, SL)); 8239 8240 SDValue Ptr = 8241 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 8242 8243 // This scalar TruncStore may be illegal, but we legalize it later. 8244 SDValue Store = DAG.getTruncStore( 8245 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 8246 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 8247 ST->getAAInfo()); 8248 8249 Stores.push_back(Store); 8250 } 8251 8252 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 8253 } 8254 8255 std::pair<SDValue, SDValue> 8256 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 8257 assert(LD->getAddressingMode() == ISD::UNINDEXED && 8258 "unaligned indexed loads not implemented!"); 8259 SDValue Chain = LD->getChain(); 8260 SDValue Ptr = LD->getBasePtr(); 8261 EVT VT = LD->getValueType(0); 8262 EVT LoadedVT = LD->getMemoryVT(); 8263 SDLoc dl(LD); 8264 auto &MF = DAG.getMachineFunction(); 8265 8266 if (VT.isFloatingPoint() || VT.isVector()) { 8267 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 8268 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 8269 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 8270 LoadedVT.isVector()) { 8271 // Scalarize the load and let the individual components be handled. 8272 return scalarizeVectorLoad(LD, DAG); 8273 } 8274 8275 // Expand to a (misaligned) integer load of the same size, 8276 // then bitconvert to floating point or vector. 8277 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 8278 LD->getMemOperand()); 8279 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 8280 if (LoadedVT != VT) 8281 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 8282 ISD::ANY_EXTEND, dl, VT, Result); 8283 8284 return std::make_pair(Result, newLoad.getValue(1)); 8285 } 8286 8287 // Copy the value to a (aligned) stack slot using (unaligned) integer 8288 // loads and stores, then do a (aligned) load from the stack slot. 8289 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 8290 unsigned LoadedBytes = LoadedVT.getStoreSize(); 8291 unsigned RegBytes = RegVT.getSizeInBits() / 8; 8292 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 8293 8294 // Make sure the stack slot is also aligned for the register type. 8295 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 8296 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 8297 SmallVector<SDValue, 8> Stores; 8298 SDValue StackPtr = StackBase; 8299 unsigned Offset = 0; 8300 8301 EVT PtrVT = Ptr.getValueType(); 8302 EVT StackPtrVT = StackPtr.getValueType(); 8303 8304 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 8305 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 8306 8307 // Do all but one copies using the full register width. 8308 for (unsigned i = 1; i < NumRegs; i++) { 8309 // Load one integer register's worth from the original location. 8310 SDValue Load = DAG.getLoad( 8311 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 8312 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 8313 LD->getAAInfo()); 8314 // Follow the load with a store to the stack slot. Remember the store. 8315 Stores.push_back(DAG.getStore( 8316 Load.getValue(1), dl, Load, StackPtr, 8317 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 8318 // Increment the pointers. 8319 Offset += RegBytes; 8320 8321 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 8322 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 8323 } 8324 8325 // The last copy may be partial. Do an extending load. 8326 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8327 8 * (LoadedBytes - Offset)); 8328 SDValue Load = 8329 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 8330 LD->getPointerInfo().getWithOffset(Offset), MemVT, 8331 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 8332 LD->getAAInfo()); 8333 // Follow the load with a store to the stack slot. Remember the store. 8334 // On big-endian machines this requires a truncating store to ensure 8335 // that the bits end up in the right place. 8336 Stores.push_back(DAG.getTruncStore( 8337 Load.getValue(1), dl, Load, StackPtr, 8338 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 8339 8340 // The order of the stores doesn't matter - say it with a TokenFactor. 8341 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8342 8343 // Finally, perform the original load only redirected to the stack slot. 8344 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 8345 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 8346 LoadedVT); 8347 8348 // Callers expect a MERGE_VALUES node. 8349 return std::make_pair(Load, TF); 8350 } 8351 8352 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 8353 "Unaligned load of unsupported type."); 8354 8355 // Compute the new VT that is half the size of the old one. This is an 8356 // integer MVT. 8357 unsigned NumBits = LoadedVT.getSizeInBits(); 8358 EVT NewLoadedVT; 8359 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 8360 NumBits >>= 1; 8361 8362 Align Alignment = LD->getOriginalAlign(); 8363 unsigned IncrementSize = NumBits / 8; 8364 ISD::LoadExtType HiExtType = LD->getExtensionType(); 8365 8366 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 8367 if (HiExtType == ISD::NON_EXTLOAD) 8368 HiExtType = ISD::ZEXTLOAD; 8369 8370 // Load the value in two parts 8371 SDValue Lo, Hi; 8372 if (DAG.getDataLayout().isLittleEndian()) { 8373 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8374 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8375 LD->getAAInfo()); 8376 8377 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8378 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 8379 LD->getPointerInfo().getWithOffset(IncrementSize), 8380 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8381 LD->getAAInfo()); 8382 } else { 8383 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8384 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8385 LD->getAAInfo()); 8386 8387 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8388 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 8389 LD->getPointerInfo().getWithOffset(IncrementSize), 8390 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8391 LD->getAAInfo()); 8392 } 8393 8394 // aggregate the two parts 8395 SDValue ShiftAmount = 8396 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 8397 DAG.getDataLayout())); 8398 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 8399 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 8400 8401 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 8402 Hi.getValue(1)); 8403 8404 return std::make_pair(Result, TF); 8405 } 8406 8407 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 8408 SelectionDAG &DAG) const { 8409 assert(ST->getAddressingMode() == ISD::UNINDEXED && 8410 "unaligned indexed stores not implemented!"); 8411 SDValue Chain = ST->getChain(); 8412 SDValue Ptr = ST->getBasePtr(); 8413 SDValue Val = ST->getValue(); 8414 EVT VT = Val.getValueType(); 8415 Align Alignment = ST->getOriginalAlign(); 8416 auto &MF = DAG.getMachineFunction(); 8417 EVT StoreMemVT = ST->getMemoryVT(); 8418 8419 SDLoc dl(ST); 8420 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 8421 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 8422 if (isTypeLegal(intVT)) { 8423 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 8424 StoreMemVT.isVector()) { 8425 // Scalarize the store and let the individual components be handled. 8426 SDValue Result = scalarizeVectorStore(ST, DAG); 8427 return Result; 8428 } 8429 // Expand to a bitconvert of the value to the integer type of the 8430 // same size, then a (misaligned) int store. 8431 // FIXME: Does not handle truncating floating point stores! 8432 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 8433 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 8434 Alignment, ST->getMemOperand()->getFlags()); 8435 return Result; 8436 } 8437 // Do a (aligned) store to a stack slot, then copy from the stack slot 8438 // to the final destination using (unaligned) integer loads and stores. 8439 MVT RegVT = getRegisterType( 8440 *DAG.getContext(), 8441 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 8442 EVT PtrVT = Ptr.getValueType(); 8443 unsigned StoredBytes = StoreMemVT.getStoreSize(); 8444 unsigned RegBytes = RegVT.getSizeInBits() / 8; 8445 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 8446 8447 // Make sure the stack slot is also aligned for the register type. 8448 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 8449 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 8450 8451 // Perform the original store, only redirected to the stack slot. 8452 SDValue Store = DAG.getTruncStore( 8453 Chain, dl, Val, StackPtr, 8454 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 8455 8456 EVT StackPtrVT = StackPtr.getValueType(); 8457 8458 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 8459 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 8460 SmallVector<SDValue, 8> Stores; 8461 unsigned Offset = 0; 8462 8463 // Do all but one copies using the full register width. 8464 for (unsigned i = 1; i < NumRegs; i++) { 8465 // Load one integer register's worth from the stack slot. 8466 SDValue Load = DAG.getLoad( 8467 RegVT, dl, Store, StackPtr, 8468 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 8469 // Store it to the final location. Remember the store. 8470 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 8471 ST->getPointerInfo().getWithOffset(Offset), 8472 ST->getOriginalAlign(), 8473 ST->getMemOperand()->getFlags())); 8474 // Increment the pointers. 8475 Offset += RegBytes; 8476 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 8477 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 8478 } 8479 8480 // The last store may be partial. Do a truncating store. On big-endian 8481 // machines this requires an extending load from the stack slot to ensure 8482 // that the bits are in the right place. 8483 EVT LoadMemVT = 8484 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 8485 8486 // Load from the stack slot. 8487 SDValue Load = DAG.getExtLoad( 8488 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 8489 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 8490 8491 Stores.push_back( 8492 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 8493 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 8494 ST->getOriginalAlign(), 8495 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 8496 // The order of the stores doesn't matter - say it with a TokenFactor. 8497 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8498 return Result; 8499 } 8500 8501 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 8502 "Unaligned store of unknown type."); 8503 // Get the half-size VT 8504 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 8505 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 8506 unsigned IncrementSize = NumBits / 8; 8507 8508 // Divide the stored value in two parts. 8509 SDValue ShiftAmount = DAG.getConstant( 8510 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 8511 SDValue Lo = Val; 8512 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 8513 8514 // Store the two parts 8515 SDValue Store1, Store2; 8516 Store1 = DAG.getTruncStore(Chain, dl, 8517 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 8518 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 8519 ST->getMemOperand()->getFlags()); 8520 8521 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8522 Store2 = DAG.getTruncStore( 8523 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 8524 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 8525 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 8526 8527 SDValue Result = 8528 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 8529 return Result; 8530 } 8531 8532 SDValue 8533 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 8534 const SDLoc &DL, EVT DataVT, 8535 SelectionDAG &DAG, 8536 bool IsCompressedMemory) const { 8537 SDValue Increment; 8538 EVT AddrVT = Addr.getValueType(); 8539 EVT MaskVT = Mask.getValueType(); 8540 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 8541 "Incompatible types of Data and Mask"); 8542 if (IsCompressedMemory) { 8543 if (DataVT.isScalableVector()) 8544 report_fatal_error( 8545 "Cannot currently handle compressed memory with scalable vectors"); 8546 // Incrementing the pointer according to number of '1's in the mask. 8547 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 8548 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 8549 if (MaskIntVT.getSizeInBits() < 32) { 8550 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 8551 MaskIntVT = MVT::i32; 8552 } 8553 8554 // Count '1's with POPCNT. 8555 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 8556 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 8557 // Scale is an element size in bytes. 8558 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 8559 AddrVT); 8560 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 8561 } else if (DataVT.isScalableVector()) { 8562 Increment = DAG.getVScale(DL, AddrVT, 8563 APInt(AddrVT.getFixedSizeInBits(), 8564 DataVT.getStoreSize().getKnownMinSize())); 8565 } else 8566 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 8567 8568 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 8569 } 8570 8571 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, 8572 EVT VecVT, const SDLoc &dl, 8573 ElementCount SubEC) { 8574 assert(!(SubEC.isScalable() && VecVT.isFixedLengthVector()) && 8575 "Cannot index a scalable vector within a fixed-width vector"); 8576 8577 unsigned NElts = VecVT.getVectorMinNumElements(); 8578 unsigned NumSubElts = SubEC.getKnownMinValue(); 8579 EVT IdxVT = Idx.getValueType(); 8580 8581 if (VecVT.isScalableVector() && !SubEC.isScalable()) { 8582 // If this is a constant index and we know the value plus the number of the 8583 // elements in the subvector minus one is less than the minimum number of 8584 // elements then it's safe to return Idx. 8585 if (auto *IdxCst = dyn_cast<ConstantSDNode>(Idx)) 8586 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts) 8587 return Idx; 8588 SDValue VS = 8589 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getFixedSizeInBits(), NElts)); 8590 unsigned SubOpcode = NumSubElts <= NElts ? ISD::SUB : ISD::USUBSAT; 8591 SDValue Sub = DAG.getNode(SubOpcode, dl, IdxVT, VS, 8592 DAG.getConstant(NumSubElts, dl, IdxVT)); 8593 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 8594 } 8595 if (isPowerOf2_32(NElts) && NumSubElts == 1) { 8596 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), Log2_32(NElts)); 8597 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 8598 DAG.getConstant(Imm, dl, IdxVT)); 8599 } 8600 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0; 8601 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 8602 DAG.getConstant(MaxIndex, dl, IdxVT)); 8603 } 8604 8605 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 8606 SDValue VecPtr, EVT VecVT, 8607 SDValue Index) const { 8608 return getVectorSubVecPointer( 8609 DAG, VecPtr, VecVT, 8610 EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1), 8611 Index); 8612 } 8613 8614 SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, 8615 SDValue VecPtr, EVT VecVT, 8616 EVT SubVecVT, 8617 SDValue Index) const { 8618 SDLoc dl(Index); 8619 // Make sure the index type is big enough to compute in. 8620 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 8621 8622 EVT EltVT = VecVT.getVectorElementType(); 8623 8624 // Calculate the element offset and add it to the pointer. 8625 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 8626 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 8627 "Converting bits to bytes lost precision"); 8628 assert(SubVecVT.getVectorElementType() == EltVT && 8629 "Sub-vector must be a vector with matching element type"); 8630 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl, 8631 SubVecVT.getVectorElementCount()); 8632 8633 EVT IdxVT = Index.getValueType(); 8634 if (SubVecVT.isScalableVector()) 8635 Index = 8636 DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8637 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getSizeInBits(), 1))); 8638 8639 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8640 DAG.getConstant(EltSize, dl, IdxVT)); 8641 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 8642 } 8643 8644 //===----------------------------------------------------------------------===// 8645 // Implementation of Emulated TLS Model 8646 //===----------------------------------------------------------------------===// 8647 8648 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 8649 SelectionDAG &DAG) const { 8650 // Access to address of TLS varialbe xyz is lowered to a function call: 8651 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 8652 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8653 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 8654 SDLoc dl(GA); 8655 8656 ArgListTy Args; 8657 ArgListEntry Entry; 8658 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 8659 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 8660 StringRef EmuTlsVarName(NameString); 8661 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 8662 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 8663 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 8664 Entry.Ty = VoidPtrType; 8665 Args.push_back(Entry); 8666 8667 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 8668 8669 TargetLowering::CallLoweringInfo CLI(DAG); 8670 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 8671 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 8672 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 8673 8674 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 8675 // At last for X86 targets, maybe good for other targets too? 8676 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8677 MFI.setAdjustsStack(true); // Is this only for X86 target? 8678 MFI.setHasCalls(true); 8679 8680 assert((GA->getOffset() == 0) && 8681 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 8682 return CallResult.first; 8683 } 8684 8685 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 8686 SelectionDAG &DAG) const { 8687 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 8688 if (!isCtlzFast()) 8689 return SDValue(); 8690 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8691 SDLoc dl(Op); 8692 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 8693 if (C->isZero() && CC == ISD::SETEQ) { 8694 EVT VT = Op.getOperand(0).getValueType(); 8695 SDValue Zext = Op.getOperand(0); 8696 if (VT.bitsLT(MVT::i32)) { 8697 VT = MVT::i32; 8698 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 8699 } 8700 unsigned Log2b = Log2_32(VT.getSizeInBits()); 8701 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 8702 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 8703 DAG.getConstant(Log2b, dl, MVT::i32)); 8704 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 8705 } 8706 } 8707 return SDValue(); 8708 } 8709 8710 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 8711 SDValue Op0 = Node->getOperand(0); 8712 SDValue Op1 = Node->getOperand(1); 8713 EVT VT = Op0.getValueType(); 8714 unsigned Opcode = Node->getOpcode(); 8715 SDLoc DL(Node); 8716 8717 // umin(x,y) -> sub(x,usubsat(x,y)) 8718 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 8719 isOperationLegal(ISD::USUBSAT, VT)) { 8720 return DAG.getNode(ISD::SUB, DL, VT, Op0, 8721 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 8722 } 8723 8724 // umax(x,y) -> add(x,usubsat(y,x)) 8725 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 8726 isOperationLegal(ISD::USUBSAT, VT)) { 8727 return DAG.getNode(ISD::ADD, DL, VT, Op0, 8728 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 8729 } 8730 8731 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 8732 ISD::CondCode CC; 8733 switch (Opcode) { 8734 default: llvm_unreachable("How did we get here?"); 8735 case ISD::SMAX: CC = ISD::SETGT; break; 8736 case ISD::SMIN: CC = ISD::SETLT; break; 8737 case ISD::UMAX: CC = ISD::SETUGT; break; 8738 case ISD::UMIN: CC = ISD::SETULT; break; 8739 } 8740 8741 // FIXME: Should really try to split the vector in case it's legal on a 8742 // subvector. 8743 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8744 return DAG.UnrollVectorOp(Node); 8745 8746 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8747 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 8748 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 8749 } 8750 8751 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 8752 unsigned Opcode = Node->getOpcode(); 8753 SDValue LHS = Node->getOperand(0); 8754 SDValue RHS = Node->getOperand(1); 8755 EVT VT = LHS.getValueType(); 8756 SDLoc dl(Node); 8757 8758 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8759 assert(VT.isInteger() && "Expected operands to be integers"); 8760 8761 // usub.sat(a, b) -> umax(a, b) - b 8762 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 8763 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 8764 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 8765 } 8766 8767 // uadd.sat(a, b) -> umin(a, ~b) + b 8768 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 8769 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 8770 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 8771 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 8772 } 8773 8774 unsigned OverflowOp; 8775 switch (Opcode) { 8776 case ISD::SADDSAT: 8777 OverflowOp = ISD::SADDO; 8778 break; 8779 case ISD::UADDSAT: 8780 OverflowOp = ISD::UADDO; 8781 break; 8782 case ISD::SSUBSAT: 8783 OverflowOp = ISD::SSUBO; 8784 break; 8785 case ISD::USUBSAT: 8786 OverflowOp = ISD::USUBO; 8787 break; 8788 default: 8789 llvm_unreachable("Expected method to receive signed or unsigned saturation " 8790 "addition or subtraction node."); 8791 } 8792 8793 // FIXME: Should really try to split the vector in case it's legal on a 8794 // subvector. 8795 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8796 return DAG.UnrollVectorOp(Node); 8797 8798 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 8799 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8800 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8801 SDValue SumDiff = Result.getValue(0); 8802 SDValue Overflow = Result.getValue(1); 8803 SDValue Zero = DAG.getConstant(0, dl, VT); 8804 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 8805 8806 if (Opcode == ISD::UADDSAT) { 8807 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8808 // (LHS + RHS) | OverflowMask 8809 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8810 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 8811 } 8812 // Overflow ? 0xffff.... : (LHS + RHS) 8813 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 8814 } 8815 8816 if (Opcode == ISD::USUBSAT) { 8817 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8818 // (LHS - RHS) & ~OverflowMask 8819 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8820 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 8821 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 8822 } 8823 // Overflow ? 0 : (LHS - RHS) 8824 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 8825 } 8826 8827 // Overflow ? (SumDiff >> BW) ^ MinVal : SumDiff 8828 APInt MinVal = APInt::getSignedMinValue(BitWidth); 8829 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8830 SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, SumDiff, 8831 DAG.getConstant(BitWidth - 1, dl, VT)); 8832 Result = DAG.getNode(ISD::XOR, dl, VT, Shift, SatMin); 8833 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 8834 } 8835 8836 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 8837 unsigned Opcode = Node->getOpcode(); 8838 bool IsSigned = Opcode == ISD::SSHLSAT; 8839 SDValue LHS = Node->getOperand(0); 8840 SDValue RHS = Node->getOperand(1); 8841 EVT VT = LHS.getValueType(); 8842 SDLoc dl(Node); 8843 8844 assert((Node->getOpcode() == ISD::SSHLSAT || 8845 Node->getOpcode() == ISD::USHLSAT) && 8846 "Expected a SHLSAT opcode"); 8847 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8848 assert(VT.isInteger() && "Expected operands to be integers"); 8849 8850 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 8851 8852 unsigned BW = VT.getScalarSizeInBits(); 8853 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 8854 SDValue Orig = 8855 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 8856 8857 SDValue SatVal; 8858 if (IsSigned) { 8859 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 8860 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 8861 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 8862 SatMin, SatMax, ISD::SETLT); 8863 } else { 8864 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 8865 } 8866 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 8867 8868 return Result; 8869 } 8870 8871 SDValue 8872 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 8873 assert((Node->getOpcode() == ISD::SMULFIX || 8874 Node->getOpcode() == ISD::UMULFIX || 8875 Node->getOpcode() == ISD::SMULFIXSAT || 8876 Node->getOpcode() == ISD::UMULFIXSAT) && 8877 "Expected a fixed point multiplication opcode"); 8878 8879 SDLoc dl(Node); 8880 SDValue LHS = Node->getOperand(0); 8881 SDValue RHS = Node->getOperand(1); 8882 EVT VT = LHS.getValueType(); 8883 unsigned Scale = Node->getConstantOperandVal(2); 8884 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 8885 Node->getOpcode() == ISD::UMULFIXSAT); 8886 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 8887 Node->getOpcode() == ISD::SMULFIXSAT); 8888 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8889 unsigned VTSize = VT.getScalarSizeInBits(); 8890 8891 if (!Scale) { 8892 // [us]mul.fix(a, b, 0) -> mul(a, b) 8893 if (!Saturating) { 8894 if (isOperationLegalOrCustom(ISD::MUL, VT)) 8895 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8896 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 8897 SDValue Result = 8898 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8899 SDValue Product = Result.getValue(0); 8900 SDValue Overflow = Result.getValue(1); 8901 SDValue Zero = DAG.getConstant(0, dl, VT); 8902 8903 APInt MinVal = APInt::getSignedMinValue(VTSize); 8904 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 8905 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8906 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8907 // Xor the inputs, if resulting sign bit is 0 the product will be 8908 // positive, else negative. 8909 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS); 8910 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Xor, Zero, ISD::SETLT); 8911 Result = DAG.getSelect(dl, VT, ProdNeg, SatMin, SatMax); 8912 return DAG.getSelect(dl, VT, Overflow, Result, Product); 8913 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 8914 SDValue Result = 8915 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8916 SDValue Product = Result.getValue(0); 8917 SDValue Overflow = Result.getValue(1); 8918 8919 APInt MaxVal = APInt::getMaxValue(VTSize); 8920 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 8921 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 8922 } 8923 } 8924 8925 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 8926 "Expected scale to be less than the number of bits if signed or at " 8927 "most the number of bits if unsigned."); 8928 assert(LHS.getValueType() == RHS.getValueType() && 8929 "Expected both operands to be the same type"); 8930 8931 // Get the upper and lower bits of the result. 8932 SDValue Lo, Hi; 8933 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 8934 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 8935 if (isOperationLegalOrCustom(LoHiOp, VT)) { 8936 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 8937 Lo = Result.getValue(0); 8938 Hi = Result.getValue(1); 8939 } else if (isOperationLegalOrCustom(HiOp, VT)) { 8940 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 8941 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 8942 } else if (VT.isVector()) { 8943 return SDValue(); 8944 } else { 8945 report_fatal_error("Unable to expand fixed point multiplication."); 8946 } 8947 8948 if (Scale == VTSize) 8949 // Result is just the top half since we'd be shifting by the width of the 8950 // operand. Overflow impossible so this works for both UMULFIX and 8951 // UMULFIXSAT. 8952 return Hi; 8953 8954 // The result will need to be shifted right by the scale since both operands 8955 // are scaled. The result is given to us in 2 halves, so we only want part of 8956 // both in the result. 8957 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 8958 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 8959 DAG.getConstant(Scale, dl, ShiftTy)); 8960 if (!Saturating) 8961 return Result; 8962 8963 if (!Signed) { 8964 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 8965 // widened multiplication) aren't all zeroes. 8966 8967 // Saturate to max if ((Hi >> Scale) != 0), 8968 // which is the same as if (Hi > ((1 << Scale) - 1)) 8969 APInt MaxVal = APInt::getMaxValue(VTSize); 8970 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 8971 dl, VT); 8972 Result = DAG.getSelectCC(dl, Hi, LowMask, 8973 DAG.getConstant(MaxVal, dl, VT), Result, 8974 ISD::SETUGT); 8975 8976 return Result; 8977 } 8978 8979 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 8980 // widened multiplication) aren't all ones or all zeroes. 8981 8982 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 8983 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 8984 8985 if (Scale == 0) { 8986 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 8987 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 8988 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 8989 // Saturated to SatMin if wide product is negative, and SatMax if wide 8990 // product is positive ... 8991 SDValue Zero = DAG.getConstant(0, dl, VT); 8992 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 8993 ISD::SETLT); 8994 // ... but only if we overflowed. 8995 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 8996 } 8997 8998 // We handled Scale==0 above so all the bits to examine is in Hi. 8999 9000 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 9001 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 9002 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 9003 dl, VT); 9004 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 9005 // Saturate to min if (Hi >> (Scale - 1)) < -1), 9006 // which is the same as if (HI < (-1 << (Scale - 1)) 9007 SDValue HighMask = 9008 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 9009 dl, VT); 9010 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 9011 return Result; 9012 } 9013 9014 SDValue 9015 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 9016 SDValue LHS, SDValue RHS, 9017 unsigned Scale, SelectionDAG &DAG) const { 9018 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 9019 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 9020 "Expected a fixed point division opcode"); 9021 9022 EVT VT = LHS.getValueType(); 9023 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 9024 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 9025 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9026 9027 // If there is enough room in the type to upscale the LHS or downscale the 9028 // RHS before the division, we can perform it in this type without having to 9029 // resize. For signed operations, the LHS headroom is the number of 9030 // redundant sign bits, and for unsigned ones it is the number of zeroes. 9031 // The headroom for the RHS is the number of trailing zeroes. 9032 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 9033 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 9034 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 9035 9036 // For signed saturating operations, we need to be able to detect true integer 9037 // division overflow; that is, when you have MIN / -EPS. However, this 9038 // is undefined behavior and if we emit divisions that could take such 9039 // values it may cause undesired behavior (arithmetic exceptions on x86, for 9040 // example). 9041 // Avoid this by requiring an extra bit so that we never get this case. 9042 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 9043 // signed saturating division, we need to emit a whopping 32-bit division. 9044 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 9045 return SDValue(); 9046 9047 unsigned LHSShift = std::min(LHSLead, Scale); 9048 unsigned RHSShift = Scale - LHSShift; 9049 9050 // At this point, we know that if we shift the LHS up by LHSShift and the 9051 // RHS down by RHSShift, we can emit a regular division with a final scaling 9052 // factor of Scale. 9053 9054 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 9055 if (LHSShift) 9056 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 9057 DAG.getConstant(LHSShift, dl, ShiftTy)); 9058 if (RHSShift) 9059 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 9060 DAG.getConstant(RHSShift, dl, ShiftTy)); 9061 9062 SDValue Quot; 9063 if (Signed) { 9064 // For signed operations, if the resulting quotient is negative and the 9065 // remainder is nonzero, subtract 1 from the quotient to round towards 9066 // negative infinity. 9067 SDValue Rem; 9068 // FIXME: Ideally we would always produce an SDIVREM here, but if the 9069 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 9070 // we couldn't just form a libcall, but the type legalizer doesn't do it. 9071 if (isTypeLegal(VT) && 9072 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 9073 Quot = DAG.getNode(ISD::SDIVREM, dl, 9074 DAG.getVTList(VT, VT), 9075 LHS, RHS); 9076 Rem = Quot.getValue(1); 9077 Quot = Quot.getValue(0); 9078 } else { 9079 Quot = DAG.getNode(ISD::SDIV, dl, VT, 9080 LHS, RHS); 9081 Rem = DAG.getNode(ISD::SREM, dl, VT, 9082 LHS, RHS); 9083 } 9084 SDValue Zero = DAG.getConstant(0, dl, VT); 9085 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 9086 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 9087 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 9088 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 9089 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 9090 DAG.getConstant(1, dl, VT)); 9091 Quot = DAG.getSelect(dl, VT, 9092 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 9093 Sub1, Quot); 9094 } else 9095 Quot = DAG.getNode(ISD::UDIV, dl, VT, 9096 LHS, RHS); 9097 9098 return Quot; 9099 } 9100 9101 void TargetLowering::expandUADDSUBO( 9102 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 9103 SDLoc dl(Node); 9104 SDValue LHS = Node->getOperand(0); 9105 SDValue RHS = Node->getOperand(1); 9106 bool IsAdd = Node->getOpcode() == ISD::UADDO; 9107 9108 // If ADD/SUBCARRY is legal, use that instead. 9109 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 9110 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 9111 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 9112 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 9113 { LHS, RHS, CarryIn }); 9114 Result = SDValue(NodeCarry.getNode(), 0); 9115 Overflow = SDValue(NodeCarry.getNode(), 1); 9116 return; 9117 } 9118 9119 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 9120 LHS.getValueType(), LHS, RHS); 9121 9122 EVT ResultType = Node->getValueType(1); 9123 EVT SetCCType = getSetCCResultType( 9124 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 9125 SDValue SetCC; 9126 if (IsAdd && isOneConstant(RHS)) { 9127 // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces 9128 // the live range of X. We assume comparing with 0 is cheap. 9129 // The general case (X + C) < C is not necessarily beneficial. Although we 9130 // reduce the live range of X, we may introduce the materialization of 9131 // constant C. 9132 SetCC = 9133 DAG.getSetCC(dl, SetCCType, Result, 9134 DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); 9135 } else { 9136 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 9137 SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 9138 } 9139 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 9140 } 9141 9142 void TargetLowering::expandSADDSUBO( 9143 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 9144 SDLoc dl(Node); 9145 SDValue LHS = Node->getOperand(0); 9146 SDValue RHS = Node->getOperand(1); 9147 bool IsAdd = Node->getOpcode() == ISD::SADDO; 9148 9149 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 9150 LHS.getValueType(), LHS, RHS); 9151 9152 EVT ResultType = Node->getValueType(1); 9153 EVT OType = getSetCCResultType( 9154 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 9155 9156 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 9157 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 9158 if (isOperationLegal(OpcSat, LHS.getValueType())) { 9159 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 9160 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 9161 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 9162 return; 9163 } 9164 9165 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 9166 9167 // For an addition, the result should be less than one of the operands (LHS) 9168 // if and only if the other operand (RHS) is negative, otherwise there will 9169 // be overflow. 9170 // For a subtraction, the result should be less than one of the operands 9171 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 9172 // otherwise there will be overflow. 9173 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 9174 SDValue ConditionRHS = 9175 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 9176 9177 Overflow = DAG.getBoolExtOrTrunc( 9178 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 9179 ResultType, ResultType); 9180 } 9181 9182 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 9183 SDValue &Overflow, SelectionDAG &DAG) const { 9184 SDLoc dl(Node); 9185 EVT VT = Node->getValueType(0); 9186 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9187 SDValue LHS = Node->getOperand(0); 9188 SDValue RHS = Node->getOperand(1); 9189 bool isSigned = Node->getOpcode() == ISD::SMULO; 9190 9191 // For power-of-two multiplications we can use a simpler shift expansion. 9192 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 9193 const APInt &C = RHSC->getAPIntValue(); 9194 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 9195 if (C.isPowerOf2()) { 9196 // smulo(x, signed_min) is same as umulo(x, signed_min). 9197 bool UseArithShift = isSigned && !C.isMinSignedValue(); 9198 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 9199 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 9200 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 9201 Overflow = DAG.getSetCC(dl, SetCCVT, 9202 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 9203 dl, VT, Result, ShiftAmt), 9204 LHS, ISD::SETNE); 9205 return true; 9206 } 9207 } 9208 9209 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 9210 if (VT.isVector()) 9211 WideVT = 9212 EVT::getVectorVT(*DAG.getContext(), WideVT, VT.getVectorElementCount()); 9213 9214 SDValue BottomHalf; 9215 SDValue TopHalf; 9216 static const unsigned Ops[2][3] = 9217 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 9218 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 9219 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 9220 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9221 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 9222 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 9223 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 9224 RHS); 9225 TopHalf = BottomHalf.getValue(1); 9226 } else if (isTypeLegal(WideVT)) { 9227 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 9228 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 9229 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 9230 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 9231 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 9232 getShiftAmountTy(WideVT, DAG.getDataLayout())); 9233 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 9234 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 9235 } else { 9236 if (VT.isVector()) 9237 return false; 9238 9239 // We can fall back to a libcall with an illegal type for the MUL if we 9240 // have a libcall big enough. 9241 // Also, we can fall back to a division in some cases, but that's a big 9242 // performance hit in the general case. 9243 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 9244 if (WideVT == MVT::i16) 9245 LC = RTLIB::MUL_I16; 9246 else if (WideVT == MVT::i32) 9247 LC = RTLIB::MUL_I32; 9248 else if (WideVT == MVT::i64) 9249 LC = RTLIB::MUL_I64; 9250 else if (WideVT == MVT::i128) 9251 LC = RTLIB::MUL_I128; 9252 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 9253 9254 SDValue HiLHS; 9255 SDValue HiRHS; 9256 if (isSigned) { 9257 // The high part is obtained by SRA'ing all but one of the bits of low 9258 // part. 9259 unsigned LoSize = VT.getFixedSizeInBits(); 9260 HiLHS = 9261 DAG.getNode(ISD::SRA, dl, VT, LHS, 9262 DAG.getConstant(LoSize - 1, dl, 9263 getPointerTy(DAG.getDataLayout()))); 9264 HiRHS = 9265 DAG.getNode(ISD::SRA, dl, VT, RHS, 9266 DAG.getConstant(LoSize - 1, dl, 9267 getPointerTy(DAG.getDataLayout()))); 9268 } else { 9269 HiLHS = DAG.getConstant(0, dl, VT); 9270 HiRHS = DAG.getConstant(0, dl, VT); 9271 } 9272 9273 // Here we're passing the 2 arguments explicitly as 4 arguments that are 9274 // pre-lowered to the correct types. This all depends upon WideVT not 9275 // being a legal type for the architecture and thus has to be split to 9276 // two arguments. 9277 SDValue Ret; 9278 TargetLowering::MakeLibCallOptions CallOptions; 9279 CallOptions.setSExt(isSigned); 9280 CallOptions.setIsPostTypeLegalization(true); 9281 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 9282 // Halves of WideVT are packed into registers in different order 9283 // depending on platform endianness. This is usually handled by 9284 // the C calling convention, but we can't defer to it in 9285 // the legalizer. 9286 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 9287 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 9288 } else { 9289 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 9290 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 9291 } 9292 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 9293 "Ret value is a collection of constituent nodes holding result."); 9294 if (DAG.getDataLayout().isLittleEndian()) { 9295 // Same as above. 9296 BottomHalf = Ret.getOperand(0); 9297 TopHalf = Ret.getOperand(1); 9298 } else { 9299 BottomHalf = Ret.getOperand(1); 9300 TopHalf = Ret.getOperand(0); 9301 } 9302 } 9303 9304 Result = BottomHalf; 9305 if (isSigned) { 9306 SDValue ShiftAmt = DAG.getConstant( 9307 VT.getScalarSizeInBits() - 1, dl, 9308 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 9309 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 9310 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 9311 } else { 9312 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 9313 DAG.getConstant(0, dl, VT), ISD::SETNE); 9314 } 9315 9316 // Truncate the result if SetCC returns a larger type than needed. 9317 EVT RType = Node->getValueType(1); 9318 if (RType.bitsLT(Overflow.getValueType())) 9319 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 9320 9321 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 9322 "Unexpected result type for S/UMULO legalization"); 9323 return true; 9324 } 9325 9326 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 9327 SDLoc dl(Node); 9328 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9329 SDValue Op = Node->getOperand(0); 9330 EVT VT = Op.getValueType(); 9331 9332 if (VT.isScalableVector()) 9333 report_fatal_error( 9334 "Expanding reductions for scalable vectors is undefined."); 9335 9336 // Try to use a shuffle reduction for power of two vectors. 9337 if (VT.isPow2VectorType()) { 9338 while (VT.getVectorNumElements() > 1) { 9339 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 9340 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 9341 break; 9342 9343 SDValue Lo, Hi; 9344 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 9345 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 9346 VT = HalfVT; 9347 } 9348 } 9349 9350 EVT EltVT = VT.getVectorElementType(); 9351 unsigned NumElts = VT.getVectorNumElements(); 9352 9353 SmallVector<SDValue, 8> Ops; 9354 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 9355 9356 SDValue Res = Ops[0]; 9357 for (unsigned i = 1; i < NumElts; i++) 9358 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 9359 9360 // Result type may be wider than element type. 9361 if (EltVT != Node->getValueType(0)) 9362 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 9363 return Res; 9364 } 9365 9366 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 9367 SDLoc dl(Node); 9368 SDValue AccOp = Node->getOperand(0); 9369 SDValue VecOp = Node->getOperand(1); 9370 SDNodeFlags Flags = Node->getFlags(); 9371 9372 EVT VT = VecOp.getValueType(); 9373 EVT EltVT = VT.getVectorElementType(); 9374 9375 if (VT.isScalableVector()) 9376 report_fatal_error( 9377 "Expanding reductions for scalable vectors is undefined."); 9378 9379 unsigned NumElts = VT.getVectorNumElements(); 9380 9381 SmallVector<SDValue, 8> Ops; 9382 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 9383 9384 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9385 9386 SDValue Res = AccOp; 9387 for (unsigned i = 0; i < NumElts; i++) 9388 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 9389 9390 return Res; 9391 } 9392 9393 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 9394 SelectionDAG &DAG) const { 9395 EVT VT = Node->getValueType(0); 9396 SDLoc dl(Node); 9397 bool isSigned = Node->getOpcode() == ISD::SREM; 9398 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 9399 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 9400 SDValue Dividend = Node->getOperand(0); 9401 SDValue Divisor = Node->getOperand(1); 9402 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 9403 SDVTList VTs = DAG.getVTList(VT, VT); 9404 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 9405 return true; 9406 } 9407 if (isOperationLegalOrCustom(DivOpc, VT)) { 9408 // X % Y -> X-X/Y*Y 9409 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 9410 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 9411 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 9412 return true; 9413 } 9414 return false; 9415 } 9416 9417 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 9418 SelectionDAG &DAG) const { 9419 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 9420 SDLoc dl(SDValue(Node, 0)); 9421 SDValue Src = Node->getOperand(0); 9422 9423 // DstVT is the result type, while SatVT is the size to which we saturate 9424 EVT SrcVT = Src.getValueType(); 9425 EVT DstVT = Node->getValueType(0); 9426 9427 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 9428 unsigned SatWidth = SatVT.getScalarSizeInBits(); 9429 unsigned DstWidth = DstVT.getScalarSizeInBits(); 9430 assert(SatWidth <= DstWidth && 9431 "Expected saturation width smaller than result width"); 9432 9433 // Determine minimum and maximum integer values and their corresponding 9434 // floating-point values. 9435 APInt MinInt, MaxInt; 9436 if (IsSigned) { 9437 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth); 9438 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth); 9439 } else { 9440 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth); 9441 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth); 9442 } 9443 9444 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 9445 // libcall emission cannot handle this. Large result types will fail. 9446 if (SrcVT == MVT::f16) { 9447 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 9448 SrcVT = Src.getValueType(); 9449 } 9450 9451 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9452 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9453 9454 APFloat::opStatus MinStatus = 9455 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 9456 APFloat::opStatus MaxStatus = 9457 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 9458 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 9459 !(MaxStatus & APFloat::opStatus::opInexact); 9460 9461 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 9462 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 9463 9464 // If the integer bounds are exactly representable as floats and min/max are 9465 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 9466 // of comparisons and selects. 9467 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 9468 isOperationLegal(ISD::FMAXNUM, SrcVT); 9469 if (AreExactFloatBounds && MinMaxLegal) { 9470 SDValue Clamped = Src; 9471 9472 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 9473 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 9474 // Clamp by MaxFloat from above. NaN cannot occur. 9475 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 9476 // Convert clamped value to integer. 9477 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 9478 dl, DstVT, Clamped); 9479 9480 // In the unsigned case we're done, because we mapped NaN to MinFloat, 9481 // which will cast to zero. 9482 if (!IsSigned) 9483 return FpToInt; 9484 9485 // Otherwise, select 0 if Src is NaN. 9486 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9487 return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt, 9488 ISD::CondCode::SETUO); 9489 } 9490 9491 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 9492 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 9493 9494 // Result of direct conversion. The assumption here is that the operation is 9495 // non-trapping and it's fine to apply it to an out-of-range value if we 9496 // select it away later. 9497 SDValue FpToInt = 9498 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 9499 9500 SDValue Select = FpToInt; 9501 9502 // If Src ULT MinFloat, select MinInt. In particular, this also selects 9503 // MinInt if Src is NaN. 9504 Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select, 9505 ISD::CondCode::SETULT); 9506 // If Src OGT MaxFloat, select MaxInt. 9507 Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select, 9508 ISD::CondCode::SETOGT); 9509 9510 // In the unsigned case we are done, because we mapped NaN to MinInt, which 9511 // is already zero. 9512 if (!IsSigned) 9513 return Select; 9514 9515 // Otherwise, select 0 if Src is NaN. 9516 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9517 return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO); 9518 } 9519 9520 SDValue TargetLowering::expandVectorSplice(SDNode *Node, 9521 SelectionDAG &DAG) const { 9522 assert(Node->getOpcode() == ISD::VECTOR_SPLICE && "Unexpected opcode!"); 9523 assert(Node->getValueType(0).isScalableVector() && 9524 "Fixed length vector types expected to use SHUFFLE_VECTOR!"); 9525 9526 EVT VT = Node->getValueType(0); 9527 SDValue V1 = Node->getOperand(0); 9528 SDValue V2 = Node->getOperand(1); 9529 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue(); 9530 SDLoc DL(Node); 9531 9532 // Expand through memory thusly: 9533 // Alloca CONCAT_VECTORS_TYPES(V1, V2) Ptr 9534 // Store V1, Ptr 9535 // Store V2, Ptr + sizeof(V1) 9536 // If (Imm < 0) 9537 // TrailingElts = -Imm 9538 // Ptr = Ptr + sizeof(V1) - (TrailingElts * sizeof(VT.Elt)) 9539 // else 9540 // Ptr = Ptr + (Imm * sizeof(VT.Elt)) 9541 // Res = Load Ptr 9542 9543 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 9544 9545 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 9546 VT.getVectorElementCount() * 2); 9547 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 9548 EVT PtrVT = StackPtr.getValueType(); 9549 auto &MF = DAG.getMachineFunction(); 9550 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 9551 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 9552 9553 // Store the lo part of CONCAT_VECTORS(V1, V2) 9554 SDValue StoreV1 = DAG.getStore(DAG.getEntryNode(), DL, V1, StackPtr, PtrInfo); 9555 // Store the hi part of CONCAT_VECTORS(V1, V2) 9556 SDValue OffsetToV2 = DAG.getVScale( 9557 DL, PtrVT, 9558 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9559 SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); 9560 SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); 9561 9562 if (Imm >= 0) { 9563 // Load back the required element. getVectorElementPointer takes care of 9564 // clamping the index if it's out-of-bounds. 9565 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2)); 9566 // Load the spliced result 9567 return DAG.getLoad(VT, DL, StoreV2, StackPtr, 9568 MachinePointerInfo::getUnknownStack(MF)); 9569 } 9570 9571 uint64_t TrailingElts = -Imm; 9572 9573 // NOTE: TrailingElts must be clamped so as not to read outside of V1:V2. 9574 TypeSize EltByteSize = VT.getVectorElementType().getStoreSize(); 9575 SDValue TrailingBytes = 9576 DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT); 9577 9578 if (TrailingElts > VT.getVectorMinNumElements()) { 9579 SDValue VLBytes = DAG.getVScale( 9580 DL, PtrVT, 9581 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9582 TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); 9583 } 9584 9585 // Calculate the start address of the spliced result. 9586 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes); 9587 9588 // Load the spliced result 9589 return DAG.getLoad(VT, DL, StoreV2, StackPtr2, 9590 MachinePointerInfo::getUnknownStack(MF)); 9591 } 9592 9593 bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, 9594 SDValue &LHS, SDValue &RHS, 9595 SDValue &CC, SDValue Mask, 9596 SDValue EVL, bool &NeedInvert, 9597 const SDLoc &dl, SDValue &Chain, 9598 bool IsSignaling) const { 9599 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9600 MVT OpVT = LHS.getSimpleValueType(); 9601 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 9602 NeedInvert = false; 9603 assert(!EVL == !Mask && "VP Mask and EVL must either both be set or unset"); 9604 bool IsNonVP = !EVL; 9605 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 9606 default: 9607 llvm_unreachable("Unknown condition code action!"); 9608 case TargetLowering::Legal: 9609 // Nothing to do. 9610 break; 9611 case TargetLowering::Expand: { 9612 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 9613 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9614 std::swap(LHS, RHS); 9615 CC = DAG.getCondCode(InvCC); 9616 return true; 9617 } 9618 // Swapping operands didn't work. Try inverting the condition. 9619 bool NeedSwap = false; 9620 InvCC = getSetCCInverse(CCCode, OpVT); 9621 if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9622 // If inverting the condition is not enough, try swapping operands 9623 // on top of it. 9624 InvCC = ISD::getSetCCSwappedOperands(InvCC); 9625 NeedSwap = true; 9626 } 9627 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9628 CC = DAG.getCondCode(InvCC); 9629 NeedInvert = true; 9630 if (NeedSwap) 9631 std::swap(LHS, RHS); 9632 return true; 9633 } 9634 9635 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 9636 unsigned Opc = 0; 9637 switch (CCCode) { 9638 default: 9639 llvm_unreachable("Don't know how to expand this condition!"); 9640 case ISD::SETUO: 9641 if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { 9642 CC1 = ISD::SETUNE; 9643 CC2 = ISD::SETUNE; 9644 Opc = ISD::OR; 9645 break; 9646 } 9647 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9648 "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); 9649 NeedInvert = true; 9650 LLVM_FALLTHROUGH; 9651 case ISD::SETO: 9652 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9653 "If SETO is expanded, SETOEQ must be legal!"); 9654 CC1 = ISD::SETOEQ; 9655 CC2 = ISD::SETOEQ; 9656 Opc = ISD::AND; 9657 break; 9658 case ISD::SETONE: 9659 case ISD::SETUEQ: 9660 // If the SETUO or SETO CC isn't legal, we might be able to use 9661 // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one 9662 // of SETOGT/SETOLT to be legal, the other can be emulated by swapping 9663 // the operands. 9664 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9665 if (!TLI.isCondCodeLegal(CC2, OpVT) && 9666 (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) || 9667 TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) { 9668 CC1 = ISD::SETOGT; 9669 CC2 = ISD::SETOLT; 9670 Opc = ISD::OR; 9671 NeedInvert = ((unsigned)CCCode & 0x8U); 9672 break; 9673 } 9674 LLVM_FALLTHROUGH; 9675 case ISD::SETOEQ: 9676 case ISD::SETOGT: 9677 case ISD::SETOGE: 9678 case ISD::SETOLT: 9679 case ISD::SETOLE: 9680 case ISD::SETUNE: 9681 case ISD::SETUGT: 9682 case ISD::SETUGE: 9683 case ISD::SETULT: 9684 case ISD::SETULE: 9685 // If we are floating point, assign and break, otherwise fall through. 9686 if (!OpVT.isInteger()) { 9687 // We can use the 4th bit to tell if we are the unordered 9688 // or ordered version of the opcode. 9689 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9690 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 9691 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 9692 break; 9693 } 9694 // Fallthrough if we are unsigned integer. 9695 LLVM_FALLTHROUGH; 9696 case ISD::SETLE: 9697 case ISD::SETGT: 9698 case ISD::SETGE: 9699 case ISD::SETLT: 9700 case ISD::SETNE: 9701 case ISD::SETEQ: 9702 // If all combinations of inverting the condition and swapping operands 9703 // didn't work then we have no means to expand the condition. 9704 llvm_unreachable("Don't know how to expand this condition!"); 9705 } 9706 9707 SDValue SetCC1, SetCC2; 9708 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 9709 // If we aren't the ordered or unorder operation, 9710 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 9711 if (IsNonVP) { 9712 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); 9713 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); 9714 } else { 9715 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL); 9716 SetCC2 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL); 9717 } 9718 } else { 9719 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 9720 if (IsNonVP) { 9721 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); 9722 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); 9723 } else { 9724 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL); 9725 SetCC2 = DAG.getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL); 9726 } 9727 } 9728 if (Chain) 9729 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), 9730 SetCC2.getValue(1)); 9731 if (IsNonVP) 9732 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 9733 else { 9734 // Transform the binary opcode to the VP equivalent. 9735 assert((Opc == ISD::OR || Opc == ISD::AND) && "Unexpected opcode"); 9736 Opc = Opc == ISD::OR ? ISD::VP_OR : ISD::VP_AND; 9737 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL); 9738 } 9739 RHS = SDValue(); 9740 CC = SDValue(); 9741 return true; 9742 } 9743 } 9744 return false; 9745 } 9746