1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SelectionDAG::Legalize method. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineJumpTableInfo.h" 23 #include "llvm/IR/CallingConv.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DebugInfo.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetFrameLowering.h" 35 #include "llvm/Target/TargetLowering.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include "llvm/Target/TargetSubtargetInfo.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "legalizedag" 41 42 //===----------------------------------------------------------------------===// 43 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 44 /// hacks on it until the target machine can handle it. This involves 45 /// eliminating value sizes the machine cannot handle (promoting small sizes to 46 /// large sizes or splitting up large values into small values) as well as 47 /// eliminating operations the machine cannot handle. 48 /// 49 /// This code also does a small amount of optimization and recognition of idioms 50 /// as part of its processing. For example, if a target does not support a 51 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 52 /// will attempt merge setcc and brc instructions into brcc's. 53 /// 54 namespace { 55 class SelectionDAGLegalize { 56 const TargetMachine &TM; 57 const TargetLowering &TLI; 58 SelectionDAG &DAG; 59 60 /// \brief The set of nodes which have already been legalized. We hold a 61 /// reference to it in order to update as necessary on node deletion. 62 SmallPtrSetImpl<SDNode *> &LegalizedNodes; 63 64 /// \brief A set of all the nodes updated during legalization. 65 SmallSetVector<SDNode *, 16> *UpdatedNodes; 66 67 EVT getSetCCResultType(EVT VT) const { 68 return TLI.getSetCCResultType(*DAG.getContext(), VT); 69 } 70 71 // Libcall insertion helpers. 72 73 public: 74 SelectionDAGLegalize(SelectionDAG &DAG, 75 SmallPtrSetImpl<SDNode *> &LegalizedNodes, 76 SmallSetVector<SDNode *, 16> *UpdatedNodes = nullptr) 77 : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG), 78 LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {} 79 80 /// \brief Legalizes the given operation. 81 void LegalizeOp(SDNode *Node); 82 83 private: 84 SDValue OptimizeFloatStore(StoreSDNode *ST); 85 86 void LegalizeLoadOps(SDNode *Node); 87 void LegalizeStoreOps(SDNode *Node); 88 89 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 90 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 91 /// is necessary to spill the vector being inserted into to memory, perform 92 /// the insert there, and then read the result back. 93 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 94 SDValue Idx, SDLoc dl); 95 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 96 SDValue Idx, SDLoc dl); 97 98 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 99 /// performs the same shuffe in terms of order or result bytes, but on a type 100 /// whose vector element type is narrower than the original shuffle type. 101 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 102 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl, 103 SDValue N1, SDValue N2, 104 ArrayRef<int> Mask) const; 105 106 bool LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 107 bool &NeedInvert, SDLoc dl); 108 109 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 110 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 111 unsigned NumOps, bool isSigned, SDLoc dl); 112 113 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 114 SDNode *Node, bool isSigned); 115 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 116 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 117 RTLIB::Libcall Call_F128, 118 RTLIB::Libcall Call_PPCF128); 119 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 120 RTLIB::Libcall Call_I8, 121 RTLIB::Libcall Call_I16, 122 RTLIB::Libcall Call_I32, 123 RTLIB::Libcall Call_I64, 124 RTLIB::Libcall Call_I128); 125 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 126 void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 127 128 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, SDLoc dl); 129 SDValue ExpandBUILD_VECTOR(SDNode *Node); 130 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 131 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 132 SmallVectorImpl<SDValue> &Results); 133 SDValue ExpandFCOPYSIGN(SDNode *Node); 134 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 135 SDLoc dl); 136 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 137 SDLoc dl); 138 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 139 SDLoc dl); 140 141 SDValue ExpandBSWAP(SDValue Op, SDLoc dl); 142 SDValue ExpandBitCount(unsigned Opc, SDValue Op, SDLoc dl); 143 144 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 145 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 146 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 147 148 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); 149 150 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 151 152 void ExpandNode(SDNode *Node); 153 void PromoteNode(SDNode *Node); 154 155 public: 156 // Node replacement helpers 157 void ReplacedNode(SDNode *N) { 158 LegalizedNodes.erase(N); 159 } 160 void ReplaceNode(SDNode *Old, SDNode *New) { 161 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG); 162 dbgs() << " with: "; New->dump(&DAG)); 163 164 assert(Old->getNumValues() == New->getNumValues() && 165 "Replacing one node with another that produces a different number " 166 "of values!"); 167 DAG.ReplaceAllUsesWith(Old, New); 168 for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) 169 DAG.TransferDbgValues(SDValue(Old, i), SDValue(New, i)); 170 if (UpdatedNodes) 171 UpdatedNodes->insert(New); 172 ReplacedNode(Old); 173 } 174 void ReplaceNode(SDValue Old, SDValue New) { 175 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG); 176 dbgs() << " with: "; New->dump(&DAG)); 177 178 DAG.ReplaceAllUsesWith(Old, New); 179 DAG.TransferDbgValues(Old, New); 180 if (UpdatedNodes) 181 UpdatedNodes->insert(New.getNode()); 182 ReplacedNode(Old.getNode()); 183 } 184 void ReplaceNode(SDNode *Old, const SDValue *New) { 185 DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG)); 186 187 DAG.ReplaceAllUsesWith(Old, New); 188 for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) { 189 DEBUG(dbgs() << (i == 0 ? " with: " 190 : " and: "); 191 New[i]->dump(&DAG)); 192 DAG.TransferDbgValues(SDValue(Old, i), New[i]); 193 if (UpdatedNodes) 194 UpdatedNodes->insert(New[i].getNode()); 195 } 196 ReplacedNode(Old); 197 } 198 }; 199 } 200 201 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 202 /// performs the same shuffe in terms of order or result bytes, but on a type 203 /// whose vector element type is narrower than the original shuffle type. 204 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 205 SDValue 206 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, SDLoc dl, 207 SDValue N1, SDValue N2, 208 ArrayRef<int> Mask) const { 209 unsigned NumMaskElts = VT.getVectorNumElements(); 210 unsigned NumDestElts = NVT.getVectorNumElements(); 211 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 212 213 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 214 215 if (NumEltsGrowth == 1) 216 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 217 218 SmallVector<int, 8> NewMask; 219 for (unsigned i = 0; i != NumMaskElts; ++i) { 220 int Idx = Mask[i]; 221 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 222 if (Idx < 0) 223 NewMask.push_back(-1); 224 else 225 NewMask.push_back(Idx * NumEltsGrowth + j); 226 } 227 } 228 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 229 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 230 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 231 } 232 233 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 234 /// a load from the constant pool. 235 SDValue 236 SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { 237 bool Extend = false; 238 SDLoc dl(CFP); 239 240 // If a FP immediate is precise when represented as a float and if the 241 // target can do an extending load from float to double, we put it into 242 // the constant pool as a float, even if it's is statically typed as a 243 // double. This shrinks FP constants and canonicalizes them for targets where 244 // an FP extending load is the same cost as a normal load (such as on the x87 245 // fp stack or PPC FP unit). 246 EVT VT = CFP->getValueType(0); 247 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 248 if (!UseCP) { 249 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 250 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 251 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 252 } 253 254 EVT OrigVT = VT; 255 EVT SVT = VT; 256 while (SVT != MVT::f32 && SVT != MVT::f16) { 257 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 258 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 259 // Only do this if the target has a native EXTLOAD instruction from 260 // smaller type. 261 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 262 TLI.ShouldShrinkFPConstant(OrigVT)) { 263 Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 264 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 265 VT = SVT; 266 Extend = true; 267 } 268 } 269 270 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 271 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 272 if (Extend) { 273 SDValue Result = 274 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 275 DAG.getEntryNode(), 276 CPIdx, MachinePointerInfo::getConstantPool(), 277 VT, false, false, false, Alignment); 278 return Result; 279 } 280 SDValue Result = 281 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 282 MachinePointerInfo::getConstantPool(), false, false, false, 283 Alignment); 284 return Result; 285 } 286 287 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 288 static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 289 const TargetLowering &TLI, 290 SelectionDAGLegalize *DAGLegalize) { 291 assert(ST->getAddressingMode() == ISD::UNINDEXED && 292 "unaligned indexed stores not implemented!"); 293 SDValue Chain = ST->getChain(); 294 SDValue Ptr = ST->getBasePtr(); 295 SDValue Val = ST->getValue(); 296 EVT VT = Val.getValueType(); 297 int Alignment = ST->getAlignment(); 298 unsigned AS = ST->getAddressSpace(); 299 300 SDLoc dl(ST); 301 if (ST->getMemoryVT().isFloatingPoint() || 302 ST->getMemoryVT().isVector()) { 303 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 304 if (TLI.isTypeLegal(intVT)) { 305 // Expand to a bitconvert of the value to the integer type of the 306 // same size, then a (misaligned) int store. 307 // FIXME: Does not handle truncating floating point stores! 308 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 309 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 310 ST->isVolatile(), ST->isNonTemporal(), Alignment); 311 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 312 return; 313 } 314 // Do a (aligned) store to a stack slot, then copy from the stack slot 315 // to the final destination using (unaligned) integer loads and stores. 316 EVT StoredVT = ST->getMemoryVT(); 317 MVT RegVT = 318 TLI.getRegisterType(*DAG.getContext(), 319 EVT::getIntegerVT(*DAG.getContext(), 320 StoredVT.getSizeInBits())); 321 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 322 unsigned RegBytes = RegVT.getSizeInBits() / 8; 323 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 324 325 // Make sure the stack slot is also aligned for the register type. 326 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 327 328 // Perform the original store, only redirected to the stack slot. 329 SDValue Store = DAG.getTruncStore(Chain, dl, 330 Val, StackPtr, MachinePointerInfo(), 331 StoredVT, false, false, 0); 332 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy(AS)); 333 SmallVector<SDValue, 8> Stores; 334 unsigned Offset = 0; 335 336 // Do all but one copies using the full register width. 337 for (unsigned i = 1; i < NumRegs; i++) { 338 // Load one integer register's worth from the stack slot. 339 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 340 MachinePointerInfo(), 341 false, false, false, 0); 342 // Store it to the final location. Remember the store. 343 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 344 ST->getPointerInfo().getWithOffset(Offset), 345 ST->isVolatile(), ST->isNonTemporal(), 346 MinAlign(ST->getAlignment(), Offset))); 347 // Increment the pointers. 348 Offset += RegBytes; 349 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 350 Increment); 351 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 352 } 353 354 // The last store may be partial. Do a truncating store. On big-endian 355 // machines this requires an extending load from the stack slot to ensure 356 // that the bits are in the right place. 357 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 358 8 * (StoredBytes - Offset)); 359 360 // Load from the stack slot. 361 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 362 MachinePointerInfo(), 363 MemVT, false, false, false, 0); 364 365 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 366 ST->getPointerInfo() 367 .getWithOffset(Offset), 368 MemVT, ST->isVolatile(), 369 ST->isNonTemporal(), 370 MinAlign(ST->getAlignment(), Offset), 371 ST->getAAInfo())); 372 // The order of the stores doesn't matter - say it with a TokenFactor. 373 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 374 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 375 return; 376 } 377 assert(ST->getMemoryVT().isInteger() && 378 !ST->getMemoryVT().isVector() && 379 "Unaligned store of unknown type."); 380 // Get the half-size VT 381 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 382 int NumBits = NewStoredVT.getSizeInBits(); 383 int IncrementSize = NumBits / 8; 384 385 // Divide the stored value in two parts. 386 SDValue ShiftAmount = DAG.getConstant(NumBits, 387 TLI.getShiftAmountTy(Val.getValueType())); 388 SDValue Lo = Val; 389 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 390 391 // Store the two parts 392 SDValue Store1, Store2; 393 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 394 ST->getPointerInfo(), NewStoredVT, 395 ST->isVolatile(), ST->isNonTemporal(), Alignment); 396 397 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 398 DAG.getConstant(IncrementSize, TLI.getPointerTy(AS))); 399 Alignment = MinAlign(Alignment, IncrementSize); 400 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 401 ST->getPointerInfo().getWithOffset(IncrementSize), 402 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 403 Alignment, ST->getAAInfo()); 404 405 SDValue Result = 406 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 407 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 408 } 409 410 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 411 static void 412 ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 413 const TargetLowering &TLI, 414 SDValue &ValResult, SDValue &ChainResult) { 415 assert(LD->getAddressingMode() == ISD::UNINDEXED && 416 "unaligned indexed loads not implemented!"); 417 SDValue Chain = LD->getChain(); 418 SDValue Ptr = LD->getBasePtr(); 419 EVT VT = LD->getValueType(0); 420 EVT LoadedVT = LD->getMemoryVT(); 421 SDLoc dl(LD); 422 if (VT.isFloatingPoint() || VT.isVector()) { 423 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 424 if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) { 425 // Expand to a (misaligned) integer load of the same size, 426 // then bitconvert to floating point or vector. 427 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 428 LD->getMemOperand()); 429 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 430 if (LoadedVT != VT) 431 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 432 ISD::ANY_EXTEND, dl, VT, Result); 433 434 ValResult = Result; 435 ChainResult = Chain; 436 return; 437 } 438 439 // Copy the value to a (aligned) stack slot using (unaligned) integer 440 // loads and stores, then do a (aligned) load from the stack slot. 441 MVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 442 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 443 unsigned RegBytes = RegVT.getSizeInBits() / 8; 444 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 445 446 // Make sure the stack slot is also aligned for the register type. 447 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 448 449 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 450 SmallVector<SDValue, 8> Stores; 451 SDValue StackPtr = StackBase; 452 unsigned Offset = 0; 453 454 // Do all but one copies using the full register width. 455 for (unsigned i = 1; i < NumRegs; i++) { 456 // Load one integer register's worth from the original location. 457 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 458 LD->getPointerInfo().getWithOffset(Offset), 459 LD->isVolatile(), LD->isNonTemporal(), 460 LD->isInvariant(), 461 MinAlign(LD->getAlignment(), Offset), 462 LD->getAAInfo()); 463 // Follow the load with a store to the stack slot. Remember the store. 464 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 465 MachinePointerInfo(), false, false, 0)); 466 // Increment the pointers. 467 Offset += RegBytes; 468 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 469 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 470 Increment); 471 } 472 473 // The last copy may be partial. Do an extending load. 474 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 475 8 * (LoadedBytes - Offset)); 476 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 477 LD->getPointerInfo().getWithOffset(Offset), 478 MemVT, LD->isVolatile(), 479 LD->isNonTemporal(), 480 LD->isInvariant(), 481 MinAlign(LD->getAlignment(), Offset), 482 LD->getAAInfo()); 483 // Follow the load with a store to the stack slot. Remember the store. 484 // On big-endian machines this requires a truncating store to ensure 485 // that the bits end up in the right place. 486 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 487 MachinePointerInfo(), MemVT, 488 false, false, 0)); 489 490 // The order of the stores doesn't matter - say it with a TokenFactor. 491 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 492 493 // Finally, perform the original load only redirected to the stack slot. 494 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 495 MachinePointerInfo(), LoadedVT, false,false, false, 496 0); 497 498 // Callers expect a MERGE_VALUES node. 499 ValResult = Load; 500 ChainResult = TF; 501 return; 502 } 503 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 504 "Unaligned load of unsupported type."); 505 506 // Compute the new VT that is half the size of the old one. This is an 507 // integer MVT. 508 unsigned NumBits = LoadedVT.getSizeInBits(); 509 EVT NewLoadedVT; 510 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 511 NumBits >>= 1; 512 513 unsigned Alignment = LD->getAlignment(); 514 unsigned IncrementSize = NumBits / 8; 515 ISD::LoadExtType HiExtType = LD->getExtensionType(); 516 517 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 518 if (HiExtType == ISD::NON_EXTLOAD) 519 HiExtType = ISD::ZEXTLOAD; 520 521 // Load the value in two parts 522 SDValue Lo, Hi; 523 if (TLI.isLittleEndian()) { 524 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 525 NewLoadedVT, LD->isVolatile(), 526 LD->isNonTemporal(), LD->isInvariant(), Alignment, 527 LD->getAAInfo()); 528 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 529 DAG.getConstant(IncrementSize, Ptr.getValueType())); 530 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 531 LD->getPointerInfo().getWithOffset(IncrementSize), 532 NewLoadedVT, LD->isVolatile(), 533 LD->isNonTemporal(),LD->isInvariant(), 534 MinAlign(Alignment, IncrementSize), LD->getAAInfo()); 535 } else { 536 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 537 NewLoadedVT, LD->isVolatile(), 538 LD->isNonTemporal(), LD->isInvariant(), Alignment, 539 LD->getAAInfo()); 540 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 541 DAG.getConstant(IncrementSize, Ptr.getValueType())); 542 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 543 LD->getPointerInfo().getWithOffset(IncrementSize), 544 NewLoadedVT, LD->isVolatile(), 545 LD->isNonTemporal(), LD->isInvariant(), 546 MinAlign(Alignment, IncrementSize), LD->getAAInfo()); 547 } 548 549 // aggregate the two parts 550 SDValue ShiftAmount = DAG.getConstant(NumBits, 551 TLI.getShiftAmountTy(Hi.getValueType())); 552 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 553 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 554 555 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 556 Hi.getValue(1)); 557 558 ValResult = Result; 559 ChainResult = TF; 560 } 561 562 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 563 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 564 /// is necessary to spill the vector being inserted into to memory, perform 565 /// the insert there, and then read the result back. 566 SDValue SelectionDAGLegalize:: 567 PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 568 SDLoc dl) { 569 SDValue Tmp1 = Vec; 570 SDValue Tmp2 = Val; 571 SDValue Tmp3 = Idx; 572 573 // If the target doesn't support this, we have to spill the input vector 574 // to a temporary stack slot, update the element, then reload it. This is 575 // badness. We could also load the value into a vector register (either 576 // with a "move to register" or "extload into register" instruction, then 577 // permute it into place, if the idx is a constant and if the idx is 578 // supported by the target. 579 EVT VT = Tmp1.getValueType(); 580 EVT EltVT = VT.getVectorElementType(); 581 EVT IdxVT = Tmp3.getValueType(); 582 EVT PtrVT = TLI.getPointerTy(); 583 SDValue StackPtr = DAG.CreateStackTemporary(VT); 584 585 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 586 587 // Store the vector. 588 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 589 MachinePointerInfo::getFixedStack(SPFI), 590 false, false, 0); 591 592 // Truncate or zero extend offset to target pointer type. 593 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 594 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 595 // Add the offset to the index. 596 unsigned EltSize = EltVT.getSizeInBits()/8; 597 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 598 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 599 // Store the scalar value. 600 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 601 false, false, 0); 602 // Load the updated vector. 603 return DAG.getLoad(VT, dl, Ch, StackPtr, 604 MachinePointerInfo::getFixedStack(SPFI), false, false, 605 false, 0); 606 } 607 608 609 SDValue SelectionDAGLegalize:: 610 ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, SDLoc dl) { 611 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 612 // SCALAR_TO_VECTOR requires that the type of the value being inserted 613 // match the element type of the vector being created, except for 614 // integers in which case the inserted value can be over width. 615 EVT EltVT = Vec.getValueType().getVectorElementType(); 616 if (Val.getValueType() == EltVT || 617 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 618 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 619 Vec.getValueType(), Val); 620 621 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 622 // We generate a shuffle of InVec and ScVec, so the shuffle mask 623 // should be 0,1,2,3,4,5... with the appropriate element replaced with 624 // elt 0 of the RHS. 625 SmallVector<int, 8> ShufOps; 626 for (unsigned i = 0; i != NumElts; ++i) 627 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 628 629 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 630 &ShufOps[0]); 631 } 632 } 633 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 634 } 635 636 SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 637 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 638 // FIXME: We shouldn't do this for TargetConstantFP's. 639 // FIXME: move this to the DAG Combiner! Note that we can't regress due 640 // to phase ordering between legalized code and the dag combiner. This 641 // probably means that we need to integrate dag combiner and legalizer 642 // together. 643 // We generally can't do this one for long doubles. 644 SDValue Chain = ST->getChain(); 645 SDValue Ptr = ST->getBasePtr(); 646 unsigned Alignment = ST->getAlignment(); 647 bool isVolatile = ST->isVolatile(); 648 bool isNonTemporal = ST->isNonTemporal(); 649 AAMDNodes AAInfo = ST->getAAInfo(); 650 SDLoc dl(ST); 651 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 652 if (CFP->getValueType(0) == MVT::f32 && 653 TLI.isTypeLegal(MVT::i32)) { 654 SDValue Con = DAG.getConstant(CFP->getValueAPF(). 655 bitcastToAPInt().zextOrTrunc(32), 656 MVT::i32); 657 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), 658 isVolatile, isNonTemporal, Alignment, AAInfo); 659 } 660 661 if (CFP->getValueType(0) == MVT::f64) { 662 // If this target supports 64-bit registers, do a single 64-bit store. 663 if (TLI.isTypeLegal(MVT::i64)) { 664 SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 665 zextOrTrunc(64), MVT::i64); 666 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), 667 isVolatile, isNonTemporal, Alignment, AAInfo); 668 } 669 670 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { 671 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 672 // stores. If the target supports neither 32- nor 64-bits, this 673 // xform is certainly not worth it. 674 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 675 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 676 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 677 if (TLI.isBigEndian()) std::swap(Lo, Hi); 678 679 Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile, 680 isNonTemporal, Alignment, AAInfo); 681 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 682 DAG.getConstant(4, Ptr.getValueType())); 683 Hi = DAG.getStore(Chain, dl, Hi, Ptr, 684 ST->getPointerInfo().getWithOffset(4), 685 isVolatile, isNonTemporal, MinAlign(Alignment, 4U), 686 AAInfo); 687 688 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 689 } 690 } 691 } 692 return SDValue(nullptr, 0); 693 } 694 695 void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { 696 StoreSDNode *ST = cast<StoreSDNode>(Node); 697 SDValue Chain = ST->getChain(); 698 SDValue Ptr = ST->getBasePtr(); 699 SDLoc dl(Node); 700 701 unsigned Alignment = ST->getAlignment(); 702 bool isVolatile = ST->isVolatile(); 703 bool isNonTemporal = ST->isNonTemporal(); 704 AAMDNodes AAInfo = ST->getAAInfo(); 705 706 if (!ST->isTruncatingStore()) { 707 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 708 ReplaceNode(ST, OptStore); 709 return; 710 } 711 712 { 713 SDValue Value = ST->getValue(); 714 MVT VT = Value.getSimpleValueType(); 715 switch (TLI.getOperationAction(ISD::STORE, VT)) { 716 default: llvm_unreachable("This action is not supported yet!"); 717 case TargetLowering::Legal: { 718 // If this is an unaligned store and the target doesn't support it, 719 // expand it. 720 unsigned AS = ST->getAddressSpace(); 721 unsigned Align = ST->getAlignment(); 722 if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) { 723 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 724 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); 725 if (Align < ABIAlignment) 726 ExpandUnalignedStore(cast<StoreSDNode>(Node), 727 DAG, TLI, this); 728 } 729 break; 730 } 731 case TargetLowering::Custom: { 732 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 733 if (Res.getNode()) 734 ReplaceNode(SDValue(Node, 0), Res); 735 return; 736 } 737 case TargetLowering::Promote: { 738 MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT); 739 assert(NVT.getSizeInBits() == VT.getSizeInBits() && 740 "Can only promote stores to same size type"); 741 Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value); 742 SDValue Result = 743 DAG.getStore(Chain, dl, Value, Ptr, 744 ST->getPointerInfo(), isVolatile, 745 isNonTemporal, Alignment, AAInfo); 746 ReplaceNode(SDValue(Node, 0), Result); 747 break; 748 } 749 } 750 return; 751 } 752 } else { 753 SDValue Value = ST->getValue(); 754 755 EVT StVT = ST->getMemoryVT(); 756 unsigned StWidth = StVT.getSizeInBits(); 757 758 if (StWidth != StVT.getStoreSizeInBits()) { 759 // Promote to a byte-sized store with upper bits zero if not 760 // storing an integral number of bytes. For example, promote 761 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 762 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 763 StVT.getStoreSizeInBits()); 764 Value = DAG.getZeroExtendInReg(Value, dl, StVT); 765 SDValue Result = 766 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 767 NVT, isVolatile, isNonTemporal, Alignment, 768 AAInfo); 769 ReplaceNode(SDValue(Node, 0), Result); 770 } else if (StWidth & (StWidth - 1)) { 771 // If not storing a power-of-2 number of bits, expand as two stores. 772 assert(!StVT.isVector() && "Unsupported truncstore!"); 773 unsigned RoundWidth = 1 << Log2_32(StWidth); 774 assert(RoundWidth < StWidth); 775 unsigned ExtraWidth = StWidth - RoundWidth; 776 assert(ExtraWidth < RoundWidth); 777 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 778 "Store size not an integral number of bytes!"); 779 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 780 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 781 SDValue Lo, Hi; 782 unsigned IncrementSize; 783 784 if (TLI.isLittleEndian()) { 785 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 786 // Store the bottom RoundWidth bits. 787 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 788 RoundVT, 789 isVolatile, isNonTemporal, Alignment, 790 AAInfo); 791 792 // Store the remaining ExtraWidth bits. 793 IncrementSize = RoundWidth / 8; 794 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 795 DAG.getConstant(IncrementSize, Ptr.getValueType())); 796 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value, 797 DAG.getConstant(RoundWidth, 798 TLI.getShiftAmountTy(Value.getValueType()))); 799 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, 800 ST->getPointerInfo().getWithOffset(IncrementSize), 801 ExtraVT, isVolatile, isNonTemporal, 802 MinAlign(Alignment, IncrementSize), AAInfo); 803 } else { 804 // Big endian - avoid unaligned stores. 805 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 806 // Store the top RoundWidth bits. 807 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value, 808 DAG.getConstant(ExtraWidth, 809 TLI.getShiftAmountTy(Value.getValueType()))); 810 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(), 811 RoundVT, isVolatile, isNonTemporal, Alignment, 812 AAInfo); 813 814 // Store the remaining ExtraWidth bits. 815 IncrementSize = RoundWidth / 8; 816 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 817 DAG.getConstant(IncrementSize, Ptr.getValueType())); 818 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, 819 ST->getPointerInfo().getWithOffset(IncrementSize), 820 ExtraVT, isVolatile, isNonTemporal, 821 MinAlign(Alignment, IncrementSize), AAInfo); 822 } 823 824 // The order of the stores doesn't matter. 825 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 826 ReplaceNode(SDValue(Node, 0), Result); 827 } else { 828 switch (TLI.getTruncStoreAction(ST->getValue().getSimpleValueType(), 829 StVT.getSimpleVT())) { 830 default: llvm_unreachable("This action is not supported yet!"); 831 case TargetLowering::Legal: { 832 unsigned AS = ST->getAddressSpace(); 833 unsigned Align = ST->getAlignment(); 834 // If this is an unaligned store and the target doesn't support it, 835 // expand it. 836 if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) { 837 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 838 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); 839 if (Align < ABIAlignment) 840 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); 841 } 842 break; 843 } 844 case TargetLowering::Custom: { 845 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 846 if (Res.getNode()) 847 ReplaceNode(SDValue(Node, 0), Res); 848 return; 849 } 850 case TargetLowering::Expand: 851 assert(!StVT.isVector() && 852 "Vector Stores are handled in LegalizeVectorOps"); 853 854 // TRUNCSTORE:i16 i32 -> STORE i16 855 assert(TLI.isTypeLegal(StVT) && 856 "Do not know how to expand this store!"); 857 Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value); 858 SDValue Result = 859 DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 860 isVolatile, isNonTemporal, Alignment, AAInfo); 861 ReplaceNode(SDValue(Node, 0), Result); 862 break; 863 } 864 } 865 } 866 } 867 868 void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { 869 LoadSDNode *LD = cast<LoadSDNode>(Node); 870 SDValue Chain = LD->getChain(); // The chain. 871 SDValue Ptr = LD->getBasePtr(); // The base pointer. 872 SDValue Value; // The value returned by the load op. 873 SDLoc dl(Node); 874 875 ISD::LoadExtType ExtType = LD->getExtensionType(); 876 if (ExtType == ISD::NON_EXTLOAD) { 877 MVT VT = Node->getSimpleValueType(0); 878 SDValue RVal = SDValue(Node, 0); 879 SDValue RChain = SDValue(Node, 1); 880 881 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 882 default: llvm_unreachable("This action is not supported yet!"); 883 case TargetLowering::Legal: { 884 unsigned AS = LD->getAddressSpace(); 885 unsigned Align = LD->getAlignment(); 886 // If this is an unaligned load and the target doesn't support it, 887 // expand it. 888 if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) { 889 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 890 unsigned ABIAlignment = 891 TLI.getDataLayout()->getABITypeAlignment(Ty); 892 if (Align < ABIAlignment){ 893 ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain); 894 } 895 } 896 break; 897 } 898 case TargetLowering::Custom: { 899 SDValue Res = TLI.LowerOperation(RVal, DAG); 900 if (Res.getNode()) { 901 RVal = Res; 902 RChain = Res.getValue(1); 903 } 904 break; 905 } 906 case TargetLowering::Promote: { 907 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 908 assert(NVT.getSizeInBits() == VT.getSizeInBits() && 909 "Can only promote loads to same size type"); 910 911 SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand()); 912 RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res); 913 RChain = Res.getValue(1); 914 break; 915 } 916 } 917 if (RChain.getNode() != Node) { 918 assert(RVal.getNode() != Node && "Load must be completely replaced"); 919 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal); 920 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain); 921 if (UpdatedNodes) { 922 UpdatedNodes->insert(RVal.getNode()); 923 UpdatedNodes->insert(RChain.getNode()); 924 } 925 ReplacedNode(Node); 926 } 927 return; 928 } 929 930 EVT SrcVT = LD->getMemoryVT(); 931 unsigned SrcWidth = SrcVT.getSizeInBits(); 932 unsigned Alignment = LD->getAlignment(); 933 bool isVolatile = LD->isVolatile(); 934 bool isNonTemporal = LD->isNonTemporal(); 935 bool isInvariant = LD->isInvariant(); 936 AAMDNodes AAInfo = LD->getAAInfo(); 937 938 if (SrcWidth != SrcVT.getStoreSizeInBits() && 939 // Some targets pretend to have an i1 loading operation, and actually 940 // load an i8. This trick is correct for ZEXTLOAD because the top 7 941 // bits are guaranteed to be zero; it helps the optimizers understand 942 // that these bits are zero. It is also useful for EXTLOAD, since it 943 // tells the optimizers that those bits are undefined. It would be 944 // nice to have an effective generic way of getting these benefits... 945 // Until such a way is found, don't insist on promoting i1 here. 946 (SrcVT != MVT::i1 || 947 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 948 // Promote to a byte-sized load if not loading an integral number of 949 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 950 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 951 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 952 SDValue Ch; 953 954 // The extra bits are guaranteed to be zero, since we stored them that 955 // way. A zext load from NVT thus automatically gives zext from SrcVT. 956 957 ISD::LoadExtType NewExtType = 958 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 959 960 SDValue Result = 961 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 962 Chain, Ptr, LD->getPointerInfo(), 963 NVT, isVolatile, isNonTemporal, isInvariant, Alignment, 964 AAInfo); 965 966 Ch = Result.getValue(1); // The chain. 967 968 if (ExtType == ISD::SEXTLOAD) 969 // Having the top bits zero doesn't help when sign extending. 970 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 971 Result.getValueType(), 972 Result, DAG.getValueType(SrcVT)); 973 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 974 // All the top bits are guaranteed to be zero - inform the optimizers. 975 Result = DAG.getNode(ISD::AssertZext, dl, 976 Result.getValueType(), Result, 977 DAG.getValueType(SrcVT)); 978 979 Value = Result; 980 Chain = Ch; 981 } else if (SrcWidth & (SrcWidth - 1)) { 982 // If not loading a power-of-2 number of bits, expand as two loads. 983 assert(!SrcVT.isVector() && "Unsupported extload!"); 984 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 985 assert(RoundWidth < SrcWidth); 986 unsigned ExtraWidth = SrcWidth - RoundWidth; 987 assert(ExtraWidth < RoundWidth); 988 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 989 "Load size not an integral number of bytes!"); 990 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 991 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 992 SDValue Lo, Hi, Ch; 993 unsigned IncrementSize; 994 995 if (TLI.isLittleEndian()) { 996 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 997 // Load the bottom RoundWidth bits. 998 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 999 Chain, Ptr, 1000 LD->getPointerInfo(), RoundVT, isVolatile, 1001 isNonTemporal, isInvariant, Alignment, AAInfo); 1002 1003 // Load the remaining ExtraWidth bits. 1004 IncrementSize = RoundWidth / 8; 1005 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 1006 DAG.getConstant(IncrementSize, Ptr.getValueType())); 1007 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, 1008 LD->getPointerInfo().getWithOffset(IncrementSize), 1009 ExtraVT, isVolatile, isNonTemporal, isInvariant, 1010 MinAlign(Alignment, IncrementSize), AAInfo); 1011 1012 // Build a factor node to remember that this load is independent of 1013 // the other one. 1014 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1015 Hi.getValue(1)); 1016 1017 // Move the top bits to the right place. 1018 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1019 DAG.getConstant(RoundWidth, 1020 TLI.getShiftAmountTy(Hi.getValueType()))); 1021 1022 // Join the hi and lo parts. 1023 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1024 } else { 1025 // Big endian - avoid unaligned loads. 1026 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1027 // Load the top RoundWidth bits. 1028 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, 1029 LD->getPointerInfo(), RoundVT, isVolatile, 1030 isNonTemporal, isInvariant, Alignment, AAInfo); 1031 1032 // Load the remaining ExtraWidth bits. 1033 IncrementSize = RoundWidth / 8; 1034 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 1035 DAG.getConstant(IncrementSize, Ptr.getValueType())); 1036 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1037 dl, Node->getValueType(0), Chain, Ptr, 1038 LD->getPointerInfo().getWithOffset(IncrementSize), 1039 ExtraVT, isVolatile, isNonTemporal, isInvariant, 1040 MinAlign(Alignment, IncrementSize), AAInfo); 1041 1042 // Build a factor node to remember that this load is independent of 1043 // the other one. 1044 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1045 Hi.getValue(1)); 1046 1047 // Move the top bits to the right place. 1048 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1049 DAG.getConstant(ExtraWidth, 1050 TLI.getShiftAmountTy(Hi.getValueType()))); 1051 1052 // Join the hi and lo parts. 1053 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1054 } 1055 1056 Chain = Ch; 1057 } else { 1058 bool isCustom = false; 1059 switch (TLI.getLoadExtAction(ExtType, SrcVT.getSimpleVT())) { 1060 default: llvm_unreachable("This action is not supported yet!"); 1061 case TargetLowering::Custom: 1062 isCustom = true; 1063 // FALLTHROUGH 1064 case TargetLowering::Legal: { 1065 Value = SDValue(Node, 0); 1066 Chain = SDValue(Node, 1); 1067 1068 if (isCustom) { 1069 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 1070 if (Res.getNode()) { 1071 Value = Res; 1072 Chain = Res.getValue(1); 1073 } 1074 } else { 1075 // If this is an unaligned load and the target doesn't support 1076 // it, expand it. 1077 EVT MemVT = LD->getMemoryVT(); 1078 unsigned AS = LD->getAddressSpace(); 1079 unsigned Align = LD->getAlignment(); 1080 if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) { 1081 Type *Ty = 1082 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1083 unsigned ABIAlignment = 1084 TLI.getDataLayout()->getABITypeAlignment(Ty); 1085 if (Align < ABIAlignment){ 1086 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 1087 DAG, TLI, Value, Chain); 1088 } 1089 } 1090 } 1091 break; 1092 } 1093 case TargetLowering::Expand: 1094 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && 1095 TLI.isTypeLegal(SrcVT)) { 1096 SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr, 1097 LD->getMemOperand()); 1098 unsigned ExtendOp; 1099 switch (ExtType) { 1100 case ISD::EXTLOAD: 1101 ExtendOp = (SrcVT.isFloatingPoint() ? 1102 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1103 break; 1104 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1105 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1106 default: llvm_unreachable("Unexpected extend load type!"); 1107 } 1108 Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1109 Chain = Load.getValue(1); 1110 break; 1111 } 1112 1113 assert(!SrcVT.isVector() && 1114 "Vector Loads are handled in LegalizeVectorOps"); 1115 1116 // FIXME: This does not work for vectors on most targets. Sign- 1117 // and zero-extend operations are currently folded into extending 1118 // loads, whether they are legal or not, and then we end up here 1119 // without any support for legalizing them. 1120 assert(ExtType != ISD::EXTLOAD && 1121 "EXTLOAD should always be supported!"); 1122 // Turn the unsupported load into an EXTLOAD followed by an 1123 // explicit zero/sign extend inreg. 1124 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, 1125 Node->getValueType(0), 1126 Chain, Ptr, SrcVT, 1127 LD->getMemOperand()); 1128 SDValue ValRes; 1129 if (ExtType == ISD::SEXTLOAD) 1130 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1131 Result.getValueType(), 1132 Result, DAG.getValueType(SrcVT)); 1133 else 1134 ValRes = DAG.getZeroExtendInReg(Result, dl, 1135 SrcVT.getScalarType()); 1136 Value = ValRes; 1137 Chain = Result.getValue(1); 1138 break; 1139 } 1140 } 1141 1142 // Since loads produce two values, make sure to remember that we legalized 1143 // both of them. 1144 if (Chain.getNode() != Node) { 1145 assert(Value.getNode() != Node && "Load must be completely replaced"); 1146 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value); 1147 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); 1148 if (UpdatedNodes) { 1149 UpdatedNodes->insert(Value.getNode()); 1150 UpdatedNodes->insert(Chain.getNode()); 1151 } 1152 ReplacedNode(Node); 1153 } 1154 } 1155 1156 /// LegalizeOp - Return a legal replacement for the given operation, with 1157 /// all legal operands. 1158 void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { 1159 DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG)); 1160 1161 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 1162 return; 1163 1164 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 1165 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == 1166 TargetLowering::TypeLegal && 1167 "Unexpected illegal type!"); 1168 1169 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 1170 assert((TLI.getTypeAction(*DAG.getContext(), 1171 Node->getOperand(i).getValueType()) == 1172 TargetLowering::TypeLegal || 1173 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 1174 "Unexpected illegal type!"); 1175 1176 // Figure out the correct action; the way to query this varies by opcode 1177 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 1178 bool SimpleFinishLegalizing = true; 1179 switch (Node->getOpcode()) { 1180 case ISD::INTRINSIC_W_CHAIN: 1181 case ISD::INTRINSIC_WO_CHAIN: 1182 case ISD::INTRINSIC_VOID: 1183 case ISD::STACKSAVE: 1184 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 1185 break; 1186 case ISD::VAARG: 1187 Action = TLI.getOperationAction(Node->getOpcode(), 1188 Node->getValueType(0)); 1189 if (Action != TargetLowering::Promote) 1190 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 1191 break; 1192 case ISD::FP_TO_FP16: 1193 case ISD::SINT_TO_FP: 1194 case ISD::UINT_TO_FP: 1195 case ISD::EXTRACT_VECTOR_ELT: 1196 Action = TLI.getOperationAction(Node->getOpcode(), 1197 Node->getOperand(0).getValueType()); 1198 break; 1199 case ISD::FP_ROUND_INREG: 1200 case ISD::SIGN_EXTEND_INREG: { 1201 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 1202 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 1203 break; 1204 } 1205 case ISD::ATOMIC_STORE: { 1206 Action = TLI.getOperationAction(Node->getOpcode(), 1207 Node->getOperand(2).getValueType()); 1208 break; 1209 } 1210 case ISD::SELECT_CC: 1211 case ISD::SETCC: 1212 case ISD::BR_CC: { 1213 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 1214 Node->getOpcode() == ISD::SETCC ? 2 : 1; 1215 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 1216 MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType(); 1217 ISD::CondCode CCCode = 1218 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 1219 Action = TLI.getCondCodeAction(CCCode, OpVT); 1220 if (Action == TargetLowering::Legal) { 1221 if (Node->getOpcode() == ISD::SELECT_CC) 1222 Action = TLI.getOperationAction(Node->getOpcode(), 1223 Node->getValueType(0)); 1224 else 1225 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 1226 } 1227 break; 1228 } 1229 case ISD::LOAD: 1230 case ISD::STORE: 1231 // FIXME: Model these properly. LOAD and STORE are complicated, and 1232 // STORE expects the unlegalized operand in some cases. 1233 SimpleFinishLegalizing = false; 1234 break; 1235 case ISD::CALLSEQ_START: 1236 case ISD::CALLSEQ_END: 1237 // FIXME: This shouldn't be necessary. These nodes have special properties 1238 // dealing with the recursive nature of legalization. Removing this 1239 // special case should be done as part of making LegalizeDAG non-recursive. 1240 SimpleFinishLegalizing = false; 1241 break; 1242 case ISD::EXTRACT_ELEMENT: 1243 case ISD::FLT_ROUNDS_: 1244 case ISD::SADDO: 1245 case ISD::SSUBO: 1246 case ISD::UADDO: 1247 case ISD::USUBO: 1248 case ISD::SMULO: 1249 case ISD::UMULO: 1250 case ISD::FPOWI: 1251 case ISD::MERGE_VALUES: 1252 case ISD::EH_RETURN: 1253 case ISD::FRAME_TO_ARGS_OFFSET: 1254 case ISD::EH_SJLJ_SETJMP: 1255 case ISD::EH_SJLJ_LONGJMP: 1256 // These operations lie about being legal: when they claim to be legal, 1257 // they should actually be expanded. 1258 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1259 if (Action == TargetLowering::Legal) 1260 Action = TargetLowering::Expand; 1261 break; 1262 case ISD::INIT_TRAMPOLINE: 1263 case ISD::ADJUST_TRAMPOLINE: 1264 case ISD::FRAMEADDR: 1265 case ISD::RETURNADDR: 1266 // These operations lie about being legal: when they claim to be legal, 1267 // they should actually be custom-lowered. 1268 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1269 if (Action == TargetLowering::Legal) 1270 Action = TargetLowering::Custom; 1271 break; 1272 case ISD::READ_REGISTER: 1273 case ISD::WRITE_REGISTER: 1274 // Named register is legal in the DAG, but blocked by register name 1275 // selection if not implemented by target (to chose the correct register) 1276 // They'll be converted to Copy(To/From)Reg. 1277 Action = TargetLowering::Legal; 1278 break; 1279 case ISD::DEBUGTRAP: 1280 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1281 if (Action == TargetLowering::Expand) { 1282 // replace ISD::DEBUGTRAP with ISD::TRAP 1283 SDValue NewVal; 1284 NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(), 1285 Node->getOperand(0)); 1286 ReplaceNode(Node, NewVal.getNode()); 1287 LegalizeOp(NewVal.getNode()); 1288 return; 1289 } 1290 break; 1291 1292 default: 1293 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 1294 Action = TargetLowering::Legal; 1295 } else { 1296 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1297 } 1298 break; 1299 } 1300 1301 if (SimpleFinishLegalizing) { 1302 SDNode *NewNode = Node; 1303 switch (Node->getOpcode()) { 1304 default: break; 1305 case ISD::SHL: 1306 case ISD::SRL: 1307 case ISD::SRA: 1308 case ISD::ROTL: 1309 case ISD::ROTR: 1310 // Legalizing shifts/rotates requires adjusting the shift amount 1311 // to the appropriate width. 1312 if (!Node->getOperand(1).getValueType().isVector()) { 1313 SDValue SAO = 1314 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(), 1315 Node->getOperand(1)); 1316 HandleSDNode Handle(SAO); 1317 LegalizeOp(SAO.getNode()); 1318 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0), 1319 Handle.getValue()); 1320 } 1321 break; 1322 case ISD::SRL_PARTS: 1323 case ISD::SRA_PARTS: 1324 case ISD::SHL_PARTS: 1325 // Legalizing shifts/rotates requires adjusting the shift amount 1326 // to the appropriate width. 1327 if (!Node->getOperand(2).getValueType().isVector()) { 1328 SDValue SAO = 1329 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(), 1330 Node->getOperand(2)); 1331 HandleSDNode Handle(SAO); 1332 LegalizeOp(SAO.getNode()); 1333 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0), 1334 Node->getOperand(1), 1335 Handle.getValue()); 1336 } 1337 break; 1338 } 1339 1340 if (NewNode != Node) { 1341 ReplaceNode(Node, NewNode); 1342 Node = NewNode; 1343 } 1344 switch (Action) { 1345 case TargetLowering::Legal: 1346 return; 1347 case TargetLowering::Custom: { 1348 // FIXME: The handling for custom lowering with multiple results is 1349 // a complete mess. 1350 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 1351 if (Res.getNode()) { 1352 if (!(Res.getNode() != Node || Res.getResNo() != 0)) 1353 return; 1354 1355 if (Node->getNumValues() == 1) { 1356 // We can just directly replace this node with the lowered value. 1357 ReplaceNode(SDValue(Node, 0), Res); 1358 return; 1359 } 1360 1361 SmallVector<SDValue, 8> ResultVals; 1362 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 1363 ResultVals.push_back(Res.getValue(i)); 1364 ReplaceNode(Node, ResultVals.data()); 1365 return; 1366 } 1367 } 1368 // FALL THROUGH 1369 case TargetLowering::Expand: 1370 ExpandNode(Node); 1371 return; 1372 case TargetLowering::Promote: 1373 PromoteNode(Node); 1374 return; 1375 } 1376 } 1377 1378 switch (Node->getOpcode()) { 1379 default: 1380 #ifndef NDEBUG 1381 dbgs() << "NODE: "; 1382 Node->dump( &DAG); 1383 dbgs() << "\n"; 1384 #endif 1385 llvm_unreachable("Do not know how to legalize this operator!"); 1386 1387 case ISD::CALLSEQ_START: 1388 case ISD::CALLSEQ_END: 1389 break; 1390 case ISD::LOAD: { 1391 return LegalizeLoadOps(Node); 1392 } 1393 case ISD::STORE: { 1394 return LegalizeStoreOps(Node); 1395 } 1396 } 1397 } 1398 1399 SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1400 SDValue Vec = Op.getOperand(0); 1401 SDValue Idx = Op.getOperand(1); 1402 SDLoc dl(Op); 1403 1404 // Before we generate a new store to a temporary stack slot, see if there is 1405 // already one that we can use. There often is because when we scalarize 1406 // vector operations (using SelectionDAG::UnrollVectorOp for example) a whole 1407 // series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in 1408 // the vector. If all are expanded here, we don't want one store per vector 1409 // element. 1410 SDValue StackPtr, Ch; 1411 for (SDNode::use_iterator UI = Vec.getNode()->use_begin(), 1412 UE = Vec.getNode()->use_end(); UI != UE; ++UI) { 1413 SDNode *User = *UI; 1414 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) { 1415 if (ST->isIndexed() || ST->isTruncatingStore() || 1416 ST->getValue() != Vec) 1417 continue; 1418 1419 // Make sure that nothing else could have stored into the destination of 1420 // this store. 1421 if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode())) 1422 continue; 1423 1424 StackPtr = ST->getBasePtr(); 1425 Ch = SDValue(ST, 0); 1426 break; 1427 } 1428 } 1429 1430 if (!Ch.getNode()) { 1431 // Store the value to a temporary stack slot, then LOAD the returned part. 1432 StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1433 Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1434 MachinePointerInfo(), false, false, 0); 1435 } 1436 1437 // Add the offset to the index. 1438 unsigned EltSize = 1439 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1440 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1441 DAG.getConstant(EltSize, Idx.getValueType())); 1442 1443 Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy()); 1444 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1445 1446 if (Op.getValueType().isVector()) 1447 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1448 false, false, false, 0); 1449 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1450 MachinePointerInfo(), 1451 Vec.getValueType().getVectorElementType(), 1452 false, false, false, 0); 1453 } 1454 1455 SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1456 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1457 1458 SDValue Vec = Op.getOperand(0); 1459 SDValue Part = Op.getOperand(1); 1460 SDValue Idx = Op.getOperand(2); 1461 SDLoc dl(Op); 1462 1463 // Store the value to a temporary stack slot, then LOAD the returned part. 1464 1465 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1466 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1467 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1468 1469 // First store the whole vector. 1470 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1471 false, false, 0); 1472 1473 // Then store the inserted part. 1474 1475 // Add the offset to the index. 1476 unsigned EltSize = 1477 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1478 1479 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1480 DAG.getConstant(EltSize, Idx.getValueType())); 1481 Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy()); 1482 1483 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1484 StackPtr); 1485 1486 // Store the subvector. 1487 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1488 MachinePointerInfo(), false, false, 0); 1489 1490 // Finally, load the updated vector. 1491 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1492 false, false, false, 0); 1493 } 1494 1495 SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1496 // We can't handle this case efficiently. Allocate a sufficiently 1497 // aligned object on the stack, store each element into it, then load 1498 // the result as a vector. 1499 // Create the stack frame object. 1500 EVT VT = Node->getValueType(0); 1501 EVT EltVT = VT.getVectorElementType(); 1502 SDLoc dl(Node); 1503 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1504 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1505 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1506 1507 // Emit a store of each element to the stack slot. 1508 SmallVector<SDValue, 8> Stores; 1509 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1510 // Store (in the right endianness) the elements to memory. 1511 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1512 // Ignore undef elements. 1513 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1514 1515 unsigned Offset = TypeByteSize*i; 1516 1517 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1518 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1519 1520 // If the destination vector element type is narrower than the source 1521 // element type, only store the bits necessary. 1522 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1523 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1524 Node->getOperand(i), Idx, 1525 PtrInfo.getWithOffset(Offset), 1526 EltVT, false, false, 0)); 1527 } else 1528 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1529 Node->getOperand(i), Idx, 1530 PtrInfo.getWithOffset(Offset), 1531 false, false, 0)); 1532 } 1533 1534 SDValue StoreChain; 1535 if (!Stores.empty()) // Not all undef elements? 1536 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 1537 else 1538 StoreChain = DAG.getEntryNode(); 1539 1540 // Result is a load from the stack slot. 1541 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, 1542 false, false, false, 0); 1543 } 1544 1545 SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1546 SDLoc dl(Node); 1547 SDValue Tmp1 = Node->getOperand(0); 1548 SDValue Tmp2 = Node->getOperand(1); 1549 1550 // Get the sign bit of the RHS. First obtain a value that has the same 1551 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1552 SDValue SignBit; 1553 EVT FloatVT = Tmp2.getValueType(); 1554 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1555 if (TLI.isTypeLegal(IVT)) { 1556 // Convert to an integer with the same sign bit. 1557 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1558 } else { 1559 // Store the float to memory, then load the sign part out as an integer. 1560 MVT LoadTy = TLI.getPointerTy(); 1561 // First create a temporary that is aligned for both the load and store. 1562 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1563 // Then store the float to it. 1564 SDValue Ch = 1565 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1566 false, false, 0); 1567 if (TLI.isBigEndian()) { 1568 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1569 // Load out a legal integer with the same sign bit as the float. 1570 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1571 false, false, false, 0); 1572 } else { // Little endian 1573 SDValue LoadPtr = StackPtr; 1574 // The float may be wider than the integer we are going to load. Advance 1575 // the pointer so that the loaded integer will contain the sign bit. 1576 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1577 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1578 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), LoadPtr, 1579 DAG.getConstant(ByteOffset, LoadPtr.getValueType())); 1580 // Load a legal integer containing the sign bit. 1581 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1582 false, false, false, 0); 1583 // Move the sign bit to the top bit of the loaded integer. 1584 unsigned BitShift = LoadTy.getSizeInBits() - 1585 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1586 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1587 if (BitShift) 1588 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1589 DAG.getConstant(BitShift, 1590 TLI.getShiftAmountTy(SignBit.getValueType()))); 1591 } 1592 } 1593 // Now get the sign bit proper, by seeing whether the value is negative. 1594 SignBit = DAG.getSetCC(dl, getSetCCResultType(SignBit.getValueType()), 1595 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1596 ISD::SETLT); 1597 // Get the absolute value of the result. 1598 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1599 // Select between the nabs and abs value based on the sign bit of 1600 // the input. 1601 return DAG.getSelect(dl, AbsVal.getValueType(), SignBit, 1602 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1603 AbsVal); 1604 } 1605 1606 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1607 SmallVectorImpl<SDValue> &Results) { 1608 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1609 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1610 " not tell us which reg is the stack pointer!"); 1611 SDLoc dl(Node); 1612 EVT VT = Node->getValueType(0); 1613 SDValue Tmp1 = SDValue(Node, 0); 1614 SDValue Tmp2 = SDValue(Node, 1); 1615 SDValue Tmp3 = Node->getOperand(2); 1616 SDValue Chain = Tmp1.getOperand(0); 1617 1618 // Chain the dynamic stack allocation so that it doesn't modify the stack 1619 // pointer when other instructions are using the stack. 1620 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true), 1621 SDLoc(Node)); 1622 1623 SDValue Size = Tmp2.getOperand(1); 1624 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1625 Chain = SP.getValue(1); 1626 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1627 unsigned StackAlign = 1628 TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment(); 1629 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1630 if (Align > StackAlign) 1631 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, 1632 DAG.getConstant(-(uint64_t)Align, VT)); 1633 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1634 1635 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1636 DAG.getIntPtrConstant(0, true), SDValue(), 1637 SDLoc(Node)); 1638 1639 Results.push_back(Tmp1); 1640 Results.push_back(Tmp2); 1641 } 1642 1643 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1644 /// condition code CC on the current target. 1645 /// 1646 /// If the SETCC has been legalized using AND / OR, then the legalized node 1647 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert 1648 /// will be set to false. 1649 /// 1650 /// If the SETCC has been legalized by using getSetCCSwappedOperands(), 1651 /// then the values of LHS and RHS will be swapped, CC will be set to the 1652 /// new condition, and NeedInvert will be set to false. 1653 /// 1654 /// If the SETCC has been legalized using the inverse condcode, then LHS and 1655 /// RHS will be unchanged, CC will set to the inverted condcode, and NeedInvert 1656 /// will be set to true. The caller must invert the result of the SETCC with 1657 /// SelectionDAG::getLogicalNOT() or take equivalent action to swap the effect 1658 /// of a true/false result. 1659 /// 1660 /// \returns true if the SetCC has been legalized, false if it hasn't. 1661 bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1662 SDValue &LHS, SDValue &RHS, 1663 SDValue &CC, 1664 bool &NeedInvert, 1665 SDLoc dl) { 1666 MVT OpVT = LHS.getSimpleValueType(); 1667 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1668 NeedInvert = false; 1669 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1670 default: llvm_unreachable("Unknown condition code action!"); 1671 case TargetLowering::Legal: 1672 // Nothing to do. 1673 break; 1674 case TargetLowering::Expand: { 1675 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 1676 if (TLI.isCondCodeLegal(InvCC, OpVT)) { 1677 std::swap(LHS, RHS); 1678 CC = DAG.getCondCode(InvCC); 1679 return true; 1680 } 1681 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1682 unsigned Opc = 0; 1683 switch (CCCode) { 1684 default: llvm_unreachable("Don't know how to expand this condition!"); 1685 case ISD::SETO: 1686 assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT) 1687 == TargetLowering::Legal 1688 && "If SETO is expanded, SETOEQ must be legal!"); 1689 CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break; 1690 case ISD::SETUO: 1691 assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT) 1692 == TargetLowering::Legal 1693 && "If SETUO is expanded, SETUNE must be legal!"); 1694 CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break; 1695 case ISD::SETOEQ: 1696 case ISD::SETOGT: 1697 case ISD::SETOGE: 1698 case ISD::SETOLT: 1699 case ISD::SETOLE: 1700 case ISD::SETONE: 1701 case ISD::SETUEQ: 1702 case ISD::SETUNE: 1703 case ISD::SETUGT: 1704 case ISD::SETUGE: 1705 case ISD::SETULT: 1706 case ISD::SETULE: 1707 // If we are floating point, assign and break, otherwise fall through. 1708 if (!OpVT.isInteger()) { 1709 // We can use the 4th bit to tell if we are the unordered 1710 // or ordered version of the opcode. 1711 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 1712 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 1713 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 1714 break; 1715 } 1716 // Fallthrough if we are unsigned integer. 1717 case ISD::SETLE: 1718 case ISD::SETGT: 1719 case ISD::SETGE: 1720 case ISD::SETLT: 1721 // We only support using the inverted operation, which is computed above 1722 // and not a different manner of supporting expanding these cases. 1723 llvm_unreachable("Don't know how to expand this condition!"); 1724 case ISD::SETNE: 1725 case ISD::SETEQ: 1726 // Try inverting the result of the inverse condition. 1727 InvCC = CCCode == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ; 1728 if (TLI.isCondCodeLegal(InvCC, OpVT)) { 1729 CC = DAG.getCondCode(InvCC); 1730 NeedInvert = true; 1731 return true; 1732 } 1733 // If inverting the condition didn't work then we have no means to expand 1734 // the condition. 1735 llvm_unreachable("Don't know how to expand this condition!"); 1736 } 1737 1738 SDValue SetCC1, SetCC2; 1739 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 1740 // If we aren't the ordered or unorder operation, 1741 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 1742 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1743 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1744 } else { 1745 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 1746 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1); 1747 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2); 1748 } 1749 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1750 RHS = SDValue(); 1751 CC = SDValue(); 1752 return true; 1753 } 1754 } 1755 return false; 1756 } 1757 1758 /// EmitStackConvert - Emit a store/load combination to the stack. This stores 1759 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1760 /// a load from the stack slot to DestVT, extending it if needed. 1761 /// The resultant code need not be legal. 1762 SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1763 EVT SlotVT, 1764 EVT DestVT, 1765 SDLoc dl) { 1766 // Create the stack frame object. 1767 unsigned SrcAlign = 1768 TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType(). 1769 getTypeForEVT(*DAG.getContext())); 1770 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1771 1772 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1773 int SPFI = StackPtrFI->getIndex(); 1774 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1775 1776 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1777 unsigned SlotSize = SlotVT.getSizeInBits(); 1778 unsigned DestSize = DestVT.getSizeInBits(); 1779 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1780 unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType); 1781 1782 // Emit a store to the stack slot. Use a truncstore if the input value is 1783 // later than DestVT. 1784 SDValue Store; 1785 1786 if (SrcSize > SlotSize) 1787 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1788 PtrInfo, SlotVT, false, false, SrcAlign); 1789 else { 1790 assert(SrcSize == SlotSize && "Invalid store"); 1791 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1792 PtrInfo, false, false, SrcAlign); 1793 } 1794 1795 // Result is a load from the stack slot. 1796 if (SlotSize == DestSize) 1797 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1798 false, false, false, DestAlign); 1799 1800 assert(SlotSize < DestSize && "Unknown extension!"); 1801 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1802 PtrInfo, SlotVT, false, false, false, DestAlign); 1803 } 1804 1805 SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1806 SDLoc dl(Node); 1807 // Create a vector sized/aligned stack slot, store the value to element #0, 1808 // then load the whole vector back out. 1809 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1810 1811 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1812 int SPFI = StackPtrFI->getIndex(); 1813 1814 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1815 StackPtr, 1816 MachinePointerInfo::getFixedStack(SPFI), 1817 Node->getValueType(0).getVectorElementType(), 1818 false, false, 0); 1819 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1820 MachinePointerInfo::getFixedStack(SPFI), 1821 false, false, false, 0); 1822 } 1823 1824 static bool 1825 ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG, 1826 const TargetLowering &TLI, SDValue &Res) { 1827 unsigned NumElems = Node->getNumOperands(); 1828 SDLoc dl(Node); 1829 EVT VT = Node->getValueType(0); 1830 1831 // Try to group the scalars into pairs, shuffle the pairs together, then 1832 // shuffle the pairs of pairs together, etc. until the vector has 1833 // been built. This will work only if all of the necessary shuffle masks 1834 // are legal. 1835 1836 // We do this in two phases; first to check the legality of the shuffles, 1837 // and next, assuming that all shuffles are legal, to create the new nodes. 1838 for (int Phase = 0; Phase < 2; ++Phase) { 1839 SmallVector<std::pair<SDValue, SmallVector<int, 16> >, 16> IntermedVals, 1840 NewIntermedVals; 1841 for (unsigned i = 0; i < NumElems; ++i) { 1842 SDValue V = Node->getOperand(i); 1843 if (V.getOpcode() == ISD::UNDEF) 1844 continue; 1845 1846 SDValue Vec; 1847 if (Phase) 1848 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); 1849 IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i))); 1850 } 1851 1852 while (IntermedVals.size() > 2) { 1853 NewIntermedVals.clear(); 1854 for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) { 1855 // This vector and the next vector are shuffled together (simply to 1856 // append the one to the other). 1857 SmallVector<int, 16> ShuffleVec(NumElems, -1); 1858 1859 SmallVector<int, 16> FinalIndices; 1860 FinalIndices.reserve(IntermedVals[i].second.size() + 1861 IntermedVals[i+1].second.size()); 1862 1863 int k = 0; 1864 for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f; 1865 ++j, ++k) { 1866 ShuffleVec[k] = j; 1867 FinalIndices.push_back(IntermedVals[i].second[j]); 1868 } 1869 for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f; 1870 ++j, ++k) { 1871 ShuffleVec[k] = NumElems + j; 1872 FinalIndices.push_back(IntermedVals[i+1].second[j]); 1873 } 1874 1875 SDValue Shuffle; 1876 if (Phase) 1877 Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first, 1878 IntermedVals[i+1].first, 1879 ShuffleVec.data()); 1880 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT)) 1881 return false; 1882 NewIntermedVals.push_back(std::make_pair(Shuffle, FinalIndices)); 1883 } 1884 1885 // If we had an odd number of defined values, then append the last 1886 // element to the array of new vectors. 1887 if ((IntermedVals.size() & 1) != 0) 1888 NewIntermedVals.push_back(IntermedVals.back()); 1889 1890 IntermedVals.swap(NewIntermedVals); 1891 } 1892 1893 assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 && 1894 "Invalid number of intermediate vectors"); 1895 SDValue Vec1 = IntermedVals[0].first; 1896 SDValue Vec2; 1897 if (IntermedVals.size() > 1) 1898 Vec2 = IntermedVals[1].first; 1899 else if (Phase) 1900 Vec2 = DAG.getUNDEF(VT); 1901 1902 SmallVector<int, 16> ShuffleVec(NumElems, -1); 1903 for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i) 1904 ShuffleVec[IntermedVals[0].second[i]] = i; 1905 for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i) 1906 ShuffleVec[IntermedVals[1].second[i]] = NumElems + i; 1907 1908 if (Phase) 1909 Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1910 else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT)) 1911 return false; 1912 } 1913 1914 return true; 1915 } 1916 1917 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1918 /// support the operation, but do support the resultant vector type. 1919 SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1920 unsigned NumElems = Node->getNumOperands(); 1921 SDValue Value1, Value2; 1922 SDLoc dl(Node); 1923 EVT VT = Node->getValueType(0); 1924 EVT OpVT = Node->getOperand(0).getValueType(); 1925 EVT EltVT = VT.getVectorElementType(); 1926 1927 // If the only non-undef value is the low element, turn this into a 1928 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1929 bool isOnlyLowElement = true; 1930 bool MoreThanTwoValues = false; 1931 bool isConstant = true; 1932 for (unsigned i = 0; i < NumElems; ++i) { 1933 SDValue V = Node->getOperand(i); 1934 if (V.getOpcode() == ISD::UNDEF) 1935 continue; 1936 if (i > 0) 1937 isOnlyLowElement = false; 1938 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1939 isConstant = false; 1940 1941 if (!Value1.getNode()) { 1942 Value1 = V; 1943 } else if (!Value2.getNode()) { 1944 if (V != Value1) 1945 Value2 = V; 1946 } else if (V != Value1 && V != Value2) { 1947 MoreThanTwoValues = true; 1948 } 1949 } 1950 1951 if (!Value1.getNode()) 1952 return DAG.getUNDEF(VT); 1953 1954 if (isOnlyLowElement) 1955 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1956 1957 // If all elements are constants, create a load from the constant pool. 1958 if (isConstant) { 1959 SmallVector<Constant*, 16> CV; 1960 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1961 if (ConstantFPSDNode *V = 1962 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1963 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1964 } else if (ConstantSDNode *V = 1965 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1966 if (OpVT==EltVT) 1967 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1968 else { 1969 // If OpVT and EltVT don't match, EltVT is not legal and the 1970 // element values have been promoted/truncated earlier. Undo this; 1971 // we don't want a v16i8 to become a v16i32 for example. 1972 const ConstantInt *CI = V->getConstantIntValue(); 1973 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1974 CI->getZExtValue())); 1975 } 1976 } else { 1977 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1978 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1979 CV.push_back(UndefValue::get(OpNTy)); 1980 } 1981 } 1982 Constant *CP = ConstantVector::get(CV); 1983 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1984 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1985 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1986 MachinePointerInfo::getConstantPool(), 1987 false, false, false, Alignment); 1988 } 1989 1990 SmallSet<SDValue, 16> DefinedValues; 1991 for (unsigned i = 0; i < NumElems; ++i) { 1992 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) 1993 continue; 1994 DefinedValues.insert(Node->getOperand(i)); 1995 } 1996 1997 if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) { 1998 if (!MoreThanTwoValues) { 1999 SmallVector<int, 8> ShuffleVec(NumElems, -1); 2000 for (unsigned i = 0; i < NumElems; ++i) { 2001 SDValue V = Node->getOperand(i); 2002 if (V.getOpcode() == ISD::UNDEF) 2003 continue; 2004 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 2005 } 2006 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 2007 // Get the splatted value into the low element of a vector register. 2008 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 2009 SDValue Vec2; 2010 if (Value2.getNode()) 2011 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 2012 else 2013 Vec2 = DAG.getUNDEF(VT); 2014 2015 // Return shuffle(LowValVec, undef, <0,0,0,0>) 2016 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 2017 } 2018 } else { 2019 SDValue Res; 2020 if (ExpandBVWithShuffles(Node, DAG, TLI, Res)) 2021 return Res; 2022 } 2023 } 2024 2025 // Otherwise, we can't handle this case efficiently. 2026 return ExpandVectorBuildThroughStack(Node); 2027 } 2028 2029 // ExpandLibCall - Expand a node into a call to a libcall. If the result value 2030 // does not fit into a register, return the lo part and set the hi part to the 2031 // by-reg argument. If it does fit into a single register, return the result 2032 // and leave the Hi part unset. 2033 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 2034 bool isSigned) { 2035 TargetLowering::ArgListTy Args; 2036 TargetLowering::ArgListEntry Entry; 2037 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2038 EVT ArgVT = Node->getOperand(i).getValueType(); 2039 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2040 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2041 Entry.isSExt = isSigned; 2042 Entry.isZExt = !isSigned; 2043 Args.push_back(Entry); 2044 } 2045 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2046 TLI.getPointerTy()); 2047 2048 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2049 2050 // By default, the input chain to this libcall is the entry node of the 2051 // function. If the libcall is going to be emitted as a tail call then 2052 // TLI.isUsedByReturnOnly will change it to the right chain if the return 2053 // node which is being folded has a non-entry input chain. 2054 SDValue InChain = DAG.getEntryNode(); 2055 2056 // isTailCall may be true since the callee does not reference caller stack 2057 // frame. Check if it's in the right position. 2058 SDValue TCChain = InChain; 2059 bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain); 2060 if (isTailCall) 2061 InChain = TCChain; 2062 2063 TargetLowering::CallLoweringInfo CLI(DAG); 2064 CLI.setDebugLoc(SDLoc(Node)).setChain(InChain) 2065 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 2066 .setTailCall(isTailCall).setSExtResult(isSigned).setZExtResult(!isSigned); 2067 2068 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 2069 2070 if (!CallInfo.second.getNode()) 2071 // It's a tailcall, return the chain (which is the DAG root). 2072 return DAG.getRoot(); 2073 2074 return CallInfo.first; 2075 } 2076 2077 /// ExpandLibCall - Generate a libcall taking the given operands as arguments 2078 /// and returning a result of type RetVT. 2079 SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 2080 const SDValue *Ops, unsigned NumOps, 2081 bool isSigned, SDLoc dl) { 2082 TargetLowering::ArgListTy Args; 2083 Args.reserve(NumOps); 2084 2085 TargetLowering::ArgListEntry Entry; 2086 for (unsigned i = 0; i != NumOps; ++i) { 2087 Entry.Node = Ops[i]; 2088 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 2089 Entry.isSExt = isSigned; 2090 Entry.isZExt = !isSigned; 2091 Args.push_back(Entry); 2092 } 2093 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2094 TLI.getPointerTy()); 2095 2096 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2097 2098 TargetLowering::CallLoweringInfo CLI(DAG); 2099 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()) 2100 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 2101 .setSExtResult(isSigned).setZExtResult(!isSigned); 2102 2103 std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI); 2104 2105 return CallInfo.first; 2106 } 2107 2108 // ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 2109 // ExpandLibCall except that the first operand is the in-chain. 2110 std::pair<SDValue, SDValue> 2111 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 2112 SDNode *Node, 2113 bool isSigned) { 2114 SDValue InChain = Node->getOperand(0); 2115 2116 TargetLowering::ArgListTy Args; 2117 TargetLowering::ArgListEntry Entry; 2118 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 2119 EVT ArgVT = Node->getOperand(i).getValueType(); 2120 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2121 Entry.Node = Node->getOperand(i); 2122 Entry.Ty = ArgTy; 2123 Entry.isSExt = isSigned; 2124 Entry.isZExt = !isSigned; 2125 Args.push_back(Entry); 2126 } 2127 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2128 TLI.getPointerTy()); 2129 2130 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2131 2132 TargetLowering::CallLoweringInfo CLI(DAG); 2133 CLI.setDebugLoc(SDLoc(Node)).setChain(InChain) 2134 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 2135 .setSExtResult(isSigned).setZExtResult(!isSigned); 2136 2137 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 2138 2139 return CallInfo; 2140 } 2141 2142 SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 2143 RTLIB::Libcall Call_F32, 2144 RTLIB::Libcall Call_F64, 2145 RTLIB::Libcall Call_F80, 2146 RTLIB::Libcall Call_F128, 2147 RTLIB::Libcall Call_PPCF128) { 2148 RTLIB::Libcall LC; 2149 switch (Node->getSimpleValueType(0).SimpleTy) { 2150 default: llvm_unreachable("Unexpected request for libcall!"); 2151 case MVT::f32: LC = Call_F32; break; 2152 case MVT::f64: LC = Call_F64; break; 2153 case MVT::f80: LC = Call_F80; break; 2154 case MVT::f128: LC = Call_F128; break; 2155 case MVT::ppcf128: LC = Call_PPCF128; break; 2156 } 2157 return ExpandLibCall(LC, Node, false); 2158 } 2159 2160 SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 2161 RTLIB::Libcall Call_I8, 2162 RTLIB::Libcall Call_I16, 2163 RTLIB::Libcall Call_I32, 2164 RTLIB::Libcall Call_I64, 2165 RTLIB::Libcall Call_I128) { 2166 RTLIB::Libcall LC; 2167 switch (Node->getSimpleValueType(0).SimpleTy) { 2168 default: llvm_unreachable("Unexpected request for libcall!"); 2169 case MVT::i8: LC = Call_I8; break; 2170 case MVT::i16: LC = Call_I16; break; 2171 case MVT::i32: LC = Call_I32; break; 2172 case MVT::i64: LC = Call_I64; break; 2173 case MVT::i128: LC = Call_I128; break; 2174 } 2175 return ExpandLibCall(LC, Node, isSigned); 2176 } 2177 2178 /// isDivRemLibcallAvailable - Return true if divmod libcall is available. 2179 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2180 const TargetLowering &TLI) { 2181 RTLIB::Libcall LC; 2182 switch (Node->getSimpleValueType(0).SimpleTy) { 2183 default: llvm_unreachable("Unexpected request for libcall!"); 2184 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2185 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2186 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2187 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2188 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2189 } 2190 2191 return TLI.getLibcallName(LC) != nullptr; 2192 } 2193 2194 /// useDivRem - Only issue divrem libcall if both quotient and remainder are 2195 /// needed. 2196 static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) { 2197 // The other use might have been replaced with a divrem already. 2198 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2199 unsigned OtherOpcode = 0; 2200 if (isSigned) 2201 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 2202 else 2203 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 2204 2205 SDValue Op0 = Node->getOperand(0); 2206 SDValue Op1 = Node->getOperand(1); 2207 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2208 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2209 SDNode *User = *UI; 2210 if (User == Node) 2211 continue; 2212 if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) && 2213 User->getOperand(0) == Op0 && 2214 User->getOperand(1) == Op1) 2215 return true; 2216 } 2217 return false; 2218 } 2219 2220 /// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 2221 /// pairs. 2222 void 2223 SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 2224 SmallVectorImpl<SDValue> &Results) { 2225 unsigned Opcode = Node->getOpcode(); 2226 bool isSigned = Opcode == ISD::SDIVREM; 2227 2228 RTLIB::Libcall LC; 2229 switch (Node->getSimpleValueType(0).SimpleTy) { 2230 default: llvm_unreachable("Unexpected request for libcall!"); 2231 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2232 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2233 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2234 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2235 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2236 } 2237 2238 // The input chain to this libcall is the entry node of the function. 2239 // Legalizing the call will automatically add the previous call to the 2240 // dependence. 2241 SDValue InChain = DAG.getEntryNode(); 2242 2243 EVT RetVT = Node->getValueType(0); 2244 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2245 2246 TargetLowering::ArgListTy Args; 2247 TargetLowering::ArgListEntry Entry; 2248 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2249 EVT ArgVT = Node->getOperand(i).getValueType(); 2250 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2251 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2252 Entry.isSExt = isSigned; 2253 Entry.isZExt = !isSigned; 2254 Args.push_back(Entry); 2255 } 2256 2257 // Also pass the return address of the remainder. 2258 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 2259 Entry.Node = FIPtr; 2260 Entry.Ty = RetTy->getPointerTo(); 2261 Entry.isSExt = isSigned; 2262 Entry.isZExt = !isSigned; 2263 Args.push_back(Entry); 2264 2265 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2266 TLI.getPointerTy()); 2267 2268 SDLoc dl(Node); 2269 TargetLowering::CallLoweringInfo CLI(DAG); 2270 CLI.setDebugLoc(dl).setChain(InChain) 2271 .setCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 2272 .setSExtResult(isSigned).setZExtResult(!isSigned); 2273 2274 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 2275 2276 // Remainder is loaded back from the stack frame. 2277 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, 2278 MachinePointerInfo(), false, false, false, 0); 2279 Results.push_back(CallInfo.first); 2280 Results.push_back(Rem); 2281 } 2282 2283 /// isSinCosLibcallAvailable - Return true if sincos libcall is available. 2284 static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) { 2285 RTLIB::Libcall LC; 2286 switch (Node->getSimpleValueType(0).SimpleTy) { 2287 default: llvm_unreachable("Unexpected request for libcall!"); 2288 case MVT::f32: LC = RTLIB::SINCOS_F32; break; 2289 case MVT::f64: LC = RTLIB::SINCOS_F64; break; 2290 case MVT::f80: LC = RTLIB::SINCOS_F80; break; 2291 case MVT::f128: LC = RTLIB::SINCOS_F128; break; 2292 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break; 2293 } 2294 return TLI.getLibcallName(LC) != nullptr; 2295 } 2296 2297 /// canCombineSinCosLibcall - Return true if sincos libcall is available and 2298 /// can be used to combine sin and cos. 2299 static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI, 2300 const TargetMachine &TM) { 2301 if (!isSinCosLibcallAvailable(Node, TLI)) 2302 return false; 2303 // GNU sin/cos functions set errno while sincos does not. Therefore 2304 // combining sin and cos is only safe if unsafe-fpmath is enabled. 2305 bool isGNU = Triple(TM.getTargetTriple()).getEnvironment() == Triple::GNU; 2306 if (isGNU && !TM.Options.UnsafeFPMath) 2307 return false; 2308 return true; 2309 } 2310 2311 /// useSinCos - Only issue sincos libcall if both sin and cos are 2312 /// needed. 2313 static bool useSinCos(SDNode *Node) { 2314 unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN 2315 ? ISD::FCOS : ISD::FSIN; 2316 2317 SDValue Op0 = Node->getOperand(0); 2318 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2319 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2320 SDNode *User = *UI; 2321 if (User == Node) 2322 continue; 2323 // The other user might have been turned into sincos already. 2324 if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS) 2325 return true; 2326 } 2327 return false; 2328 } 2329 2330 /// ExpandSinCosLibCall - Issue libcalls to sincos to compute sin / cos 2331 /// pairs. 2332 void 2333 SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node, 2334 SmallVectorImpl<SDValue> &Results) { 2335 RTLIB::Libcall LC; 2336 switch (Node->getSimpleValueType(0).SimpleTy) { 2337 default: llvm_unreachable("Unexpected request for libcall!"); 2338 case MVT::f32: LC = RTLIB::SINCOS_F32; break; 2339 case MVT::f64: LC = RTLIB::SINCOS_F64; break; 2340 case MVT::f80: LC = RTLIB::SINCOS_F80; break; 2341 case MVT::f128: LC = RTLIB::SINCOS_F128; break; 2342 case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break; 2343 } 2344 2345 // The input chain to this libcall is the entry node of the function. 2346 // Legalizing the call will automatically add the previous call to the 2347 // dependence. 2348 SDValue InChain = DAG.getEntryNode(); 2349 2350 EVT RetVT = Node->getValueType(0); 2351 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2352 2353 TargetLowering::ArgListTy Args; 2354 TargetLowering::ArgListEntry Entry; 2355 2356 // Pass the argument. 2357 Entry.Node = Node->getOperand(0); 2358 Entry.Ty = RetTy; 2359 Entry.isSExt = false; 2360 Entry.isZExt = false; 2361 Args.push_back(Entry); 2362 2363 // Pass the return address of sin. 2364 SDValue SinPtr = DAG.CreateStackTemporary(RetVT); 2365 Entry.Node = SinPtr; 2366 Entry.Ty = RetTy->getPointerTo(); 2367 Entry.isSExt = false; 2368 Entry.isZExt = false; 2369 Args.push_back(Entry); 2370 2371 // Also pass the return address of the cos. 2372 SDValue CosPtr = DAG.CreateStackTemporary(RetVT); 2373 Entry.Node = CosPtr; 2374 Entry.Ty = RetTy->getPointerTo(); 2375 Entry.isSExt = false; 2376 Entry.isZExt = false; 2377 Args.push_back(Entry); 2378 2379 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2380 TLI.getPointerTy()); 2381 2382 SDLoc dl(Node); 2383 TargetLowering::CallLoweringInfo CLI(DAG); 2384 CLI.setDebugLoc(dl).setChain(InChain) 2385 .setCallee(TLI.getLibcallCallingConv(LC), 2386 Type::getVoidTy(*DAG.getContext()), Callee, std::move(Args), 0); 2387 2388 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 2389 2390 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr, 2391 MachinePointerInfo(), false, false, false, 0)); 2392 Results.push_back(DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr, 2393 MachinePointerInfo(), false, false, false, 0)); 2394 } 2395 2396 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 2397 /// INT_TO_FP operation of the specified operand when the target requests that 2398 /// we expand it. At this point, we know that the result and operand types are 2399 /// legal for the target. 2400 SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2401 SDValue Op0, 2402 EVT DestVT, 2403 SDLoc dl) { 2404 if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) { 2405 // simple 32-bit [signed|unsigned] integer to float/double expansion 2406 2407 // Get the stack frame index of a 8 byte buffer. 2408 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2409 2410 // word offset constant for Hi/Lo address computation 2411 SDValue WordOff = DAG.getConstant(sizeof(int), StackSlot.getValueType()); 2412 // set up Hi and Lo (into buffer) address based on endian 2413 SDValue Hi = StackSlot; 2414 SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(), 2415 StackSlot, WordOff); 2416 if (TLI.isLittleEndian()) 2417 std::swap(Hi, Lo); 2418 2419 // if signed map to unsigned space 2420 SDValue Op0Mapped; 2421 if (isSigned) { 2422 // constant used to invert sign bit (signed to unsigned mapping) 2423 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2424 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2425 } else { 2426 Op0Mapped = Op0; 2427 } 2428 // store the lo of the constructed double - based on integer input 2429 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2430 Op0Mapped, Lo, MachinePointerInfo(), 2431 false, false, 0); 2432 // initial hi portion of constructed double 2433 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2434 // store the hi of the constructed double - biased exponent 2435 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2436 MachinePointerInfo(), 2437 false, false, 0); 2438 // load the constructed double 2439 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2440 MachinePointerInfo(), false, false, false, 0); 2441 // FP constant to bias correct the final result 2442 SDValue Bias = DAG.getConstantFP(isSigned ? 2443 BitsToDouble(0x4330000080000000ULL) : 2444 BitsToDouble(0x4330000000000000ULL), 2445 MVT::f64); 2446 // subtract the bias 2447 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2448 // final result 2449 SDValue Result; 2450 // handle final rounding 2451 if (DestVT == MVT::f64) { 2452 // do nothing 2453 Result = Sub; 2454 } else if (DestVT.bitsLT(MVT::f64)) { 2455 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2456 DAG.getIntPtrConstant(0)); 2457 } else if (DestVT.bitsGT(MVT::f64)) { 2458 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2459 } 2460 return Result; 2461 } 2462 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2463 // Code below here assumes !isSigned without checking again. 2464 2465 // Implementation of unsigned i64 to f64 following the algorithm in 2466 // __floatundidf in compiler_rt. This implementation has the advantage 2467 // of performing rounding correctly, both in the default rounding mode 2468 // and in all alternate rounding modes. 2469 // TODO: Generalize this for use with other types. 2470 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2471 SDValue TwoP52 = 2472 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2473 SDValue TwoP84PlusTwoP52 = 2474 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2475 SDValue TwoP84 = 2476 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2477 2478 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2479 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2480 DAG.getConstant(32, MVT::i64)); 2481 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2482 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2483 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2484 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2485 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2486 TwoP84PlusTwoP52); 2487 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2488 } 2489 2490 // Implementation of unsigned i64 to f32. 2491 // TODO: Generalize this for use with other types. 2492 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2493 // For unsigned conversions, convert them to signed conversions using the 2494 // algorithm from the x86_64 __floatundidf in compiler_rt. 2495 if (!isSigned) { 2496 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2497 2498 SDValue ShiftConst = 2499 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2500 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2501 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2502 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2503 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2504 2505 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2506 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2507 2508 // TODO: This really should be implemented using a branch rather than a 2509 // select. We happen to get lucky and machinesink does the right 2510 // thing most of the time. This would be a good candidate for a 2511 //pseudo-op, or, even better, for whole-function isel. 2512 SDValue SignBitTest = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 2513 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2514 return DAG.getSelect(dl, MVT::f32, SignBitTest, Slow, Fast); 2515 } 2516 2517 // Otherwise, implement the fully general conversion. 2518 2519 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2520 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2521 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2522 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2523 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2524 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2525 SDValue Ne = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 2526 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2527 SDValue Sel = DAG.getSelect(dl, MVT::i64, Ne, Or, Op0); 2528 SDValue Ge = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 2529 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2530 ISD::SETUGE); 2531 SDValue Sel2 = DAG.getSelect(dl, MVT::i64, Ge, Sel, Op0); 2532 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2533 2534 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2535 DAG.getConstant(32, SHVT)); 2536 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2537 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2538 SDValue TwoP32 = 2539 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2540 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2541 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2542 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2543 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2544 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2545 DAG.getIntPtrConstant(0)); 2546 } 2547 2548 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2549 2550 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(Op0.getValueType()), 2551 Op0, DAG.getConstant(0, Op0.getValueType()), 2552 ISD::SETLT); 2553 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2554 SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(), 2555 SignSet, Four, Zero); 2556 2557 // If the sign bit of the integer is set, the large number will be treated 2558 // as a negative number. To counteract this, the dynamic code adds an 2559 // offset depending on the data type. 2560 uint64_t FF; 2561 switch (Op0.getSimpleValueType().SimpleTy) { 2562 default: llvm_unreachable("Unsupported integer type!"); 2563 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2564 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2565 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2566 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2567 } 2568 if (TLI.isLittleEndian()) FF <<= 32; 2569 Constant *FudgeFactor = ConstantInt::get( 2570 Type::getInt64Ty(*DAG.getContext()), FF); 2571 2572 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2573 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2574 CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset); 2575 Alignment = std::min(Alignment, 4u); 2576 SDValue FudgeInReg; 2577 if (DestVT == MVT::f32) 2578 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2579 MachinePointerInfo::getConstantPool(), 2580 false, false, false, Alignment); 2581 else { 2582 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2583 DAG.getEntryNode(), CPIdx, 2584 MachinePointerInfo::getConstantPool(), 2585 MVT::f32, false, false, false, Alignment); 2586 HandleSDNode Handle(Load); 2587 LegalizeOp(Load.getNode()); 2588 FudgeInReg = Handle.getValue(); 2589 } 2590 2591 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2592 } 2593 2594 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2595 /// *INT_TO_FP operation of the specified operand when the target requests that 2596 /// we promote it. At this point, we know that the result and operand types are 2597 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2598 /// operation that takes a larger input. 2599 SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2600 EVT DestVT, 2601 bool isSigned, 2602 SDLoc dl) { 2603 // First step, figure out the appropriate *INT_TO_FP operation to use. 2604 EVT NewInTy = LegalOp.getValueType(); 2605 2606 unsigned OpToUse = 0; 2607 2608 // Scan for the appropriate larger type to use. 2609 while (1) { 2610 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2611 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2612 2613 // If the target supports SINT_TO_FP of this type, use it. 2614 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2615 OpToUse = ISD::SINT_TO_FP; 2616 break; 2617 } 2618 if (isSigned) continue; 2619 2620 // If the target supports UINT_TO_FP of this type, use it. 2621 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2622 OpToUse = ISD::UINT_TO_FP; 2623 break; 2624 } 2625 2626 // Otherwise, try a larger type. 2627 } 2628 2629 // Okay, we found the operation and type to use. Zero extend our input to the 2630 // desired type then run the operation on it. 2631 return DAG.getNode(OpToUse, dl, DestVT, 2632 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2633 dl, NewInTy, LegalOp)); 2634 } 2635 2636 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2637 /// FP_TO_*INT operation of the specified operand when the target requests that 2638 /// we promote it. At this point, we know that the result and operand types are 2639 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2640 /// operation that returns a larger result. 2641 SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2642 EVT DestVT, 2643 bool isSigned, 2644 SDLoc dl) { 2645 // First step, figure out the appropriate FP_TO*INT operation to use. 2646 EVT NewOutTy = DestVT; 2647 2648 unsigned OpToUse = 0; 2649 2650 // Scan for the appropriate larger type to use. 2651 while (1) { 2652 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2653 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2654 2655 // A larger signed type can hold all unsigned values of the requested type, 2656 // so using FP_TO_SINT is valid 2657 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2658 OpToUse = ISD::FP_TO_SINT; 2659 break; 2660 } 2661 2662 // However, if the value may be < 0.0, we *must* use some FP_TO_SINT. 2663 if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2664 OpToUse = ISD::FP_TO_UINT; 2665 break; 2666 } 2667 2668 // Otherwise, try a larger type. 2669 } 2670 2671 2672 // Okay, we found the operation and type to use. 2673 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2674 2675 // Truncate the result of the extended FP_TO_*INT operation to the desired 2676 // size. 2677 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2678 } 2679 2680 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2681 /// 2682 SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) { 2683 EVT VT = Op.getValueType(); 2684 EVT SHVT = TLI.getShiftAmountTy(VT); 2685 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2686 switch (VT.getSimpleVT().SimpleTy) { 2687 default: llvm_unreachable("Unhandled Expand type in BSWAP!"); 2688 case MVT::i16: 2689 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2690 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2691 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2692 case MVT::i32: 2693 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2694 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2695 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2696 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2697 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2698 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2699 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2700 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2701 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2702 case MVT::i64: 2703 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2704 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2705 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2706 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2707 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2708 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2709 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2710 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2711 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2712 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2713 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2714 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2715 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2716 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2717 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2718 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2719 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2720 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2721 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2722 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2723 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2724 } 2725 } 2726 2727 /// ExpandBitCount - Expand the specified bitcount instruction into operations. 2728 /// 2729 SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2730 SDLoc dl) { 2731 switch (Opc) { 2732 default: llvm_unreachable("Cannot expand this yet!"); 2733 case ISD::CTPOP: { 2734 EVT VT = Op.getValueType(); 2735 EVT ShVT = TLI.getShiftAmountTy(VT); 2736 unsigned Len = VT.getSizeInBits(); 2737 2738 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2739 "CTPOP not implemented for this type."); 2740 2741 // This is the "best" algorithm from 2742 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2743 2744 SDValue Mask55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), VT); 2745 SDValue Mask33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), VT); 2746 SDValue Mask0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), VT); 2747 SDValue Mask01 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), VT); 2748 2749 // v = v - ((v >> 1) & 0x55555555...) 2750 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2751 DAG.getNode(ISD::AND, dl, VT, 2752 DAG.getNode(ISD::SRL, dl, VT, Op, 2753 DAG.getConstant(1, ShVT)), 2754 Mask55)); 2755 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2756 Op = DAG.getNode(ISD::ADD, dl, VT, 2757 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2758 DAG.getNode(ISD::AND, dl, VT, 2759 DAG.getNode(ISD::SRL, dl, VT, Op, 2760 DAG.getConstant(2, ShVT)), 2761 Mask33)); 2762 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2763 Op = DAG.getNode(ISD::AND, dl, VT, 2764 DAG.getNode(ISD::ADD, dl, VT, Op, 2765 DAG.getNode(ISD::SRL, dl, VT, Op, 2766 DAG.getConstant(4, ShVT))), 2767 Mask0F); 2768 // v = (v * 0x01010101...) >> (Len - 8) 2769 Op = DAG.getNode(ISD::SRL, dl, VT, 2770 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2771 DAG.getConstant(Len - 8, ShVT)); 2772 2773 return Op; 2774 } 2775 case ISD::CTLZ_ZERO_UNDEF: 2776 // This trivially expands to CTLZ. 2777 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op); 2778 case ISD::CTLZ: { 2779 // for now, we do this: 2780 // x = x | (x >> 1); 2781 // x = x | (x >> 2); 2782 // ... 2783 // x = x | (x >>16); 2784 // x = x | (x >>32); // for 64-bit input 2785 // return popcount(~x); 2786 // 2787 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2788 EVT VT = Op.getValueType(); 2789 EVT ShVT = TLI.getShiftAmountTy(VT); 2790 unsigned len = VT.getSizeInBits(); 2791 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2792 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2793 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2794 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2795 } 2796 Op = DAG.getNOT(dl, Op, VT); 2797 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2798 } 2799 case ISD::CTTZ_ZERO_UNDEF: 2800 // This trivially expands to CTTZ. 2801 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op); 2802 case ISD::CTTZ: { 2803 // for now, we use: { return popcount(~x & (x - 1)); } 2804 // unless the target has ctlz but not ctpop, in which case we use: 2805 // { return 32 - nlz(~x & (x-1)); } 2806 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2807 EVT VT = Op.getValueType(); 2808 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2809 DAG.getNOT(dl, Op, VT), 2810 DAG.getNode(ISD::SUB, dl, VT, Op, 2811 DAG.getConstant(1, VT))); 2812 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2813 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2814 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2815 return DAG.getNode(ISD::SUB, dl, VT, 2816 DAG.getConstant(VT.getSizeInBits(), VT), 2817 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2818 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2819 } 2820 } 2821 } 2822 2823 std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2824 unsigned Opc = Node->getOpcode(); 2825 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2826 RTLIB::Libcall LC; 2827 2828 switch (Opc) { 2829 default: 2830 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2831 case ISD::ATOMIC_SWAP: 2832 switch (VT.SimpleTy) { 2833 default: llvm_unreachable("Unexpected value type for atomic!"); 2834 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2835 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2836 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2837 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2838 case MVT::i128:LC = RTLIB::SYNC_LOCK_TEST_AND_SET_16;break; 2839 } 2840 break; 2841 case ISD::ATOMIC_CMP_SWAP: 2842 switch (VT.SimpleTy) { 2843 default: llvm_unreachable("Unexpected value type for atomic!"); 2844 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2845 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2846 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2847 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2848 case MVT::i128:LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16;break; 2849 } 2850 break; 2851 case ISD::ATOMIC_LOAD_ADD: 2852 switch (VT.SimpleTy) { 2853 default: llvm_unreachable("Unexpected value type for atomic!"); 2854 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2855 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2856 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2857 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2858 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_ADD_16;break; 2859 } 2860 break; 2861 case ISD::ATOMIC_LOAD_SUB: 2862 switch (VT.SimpleTy) { 2863 default: llvm_unreachable("Unexpected value type for atomic!"); 2864 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2865 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2866 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2867 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2868 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_SUB_16;break; 2869 } 2870 break; 2871 case ISD::ATOMIC_LOAD_AND: 2872 switch (VT.SimpleTy) { 2873 default: llvm_unreachable("Unexpected value type for atomic!"); 2874 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2875 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2876 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2877 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2878 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_AND_16;break; 2879 } 2880 break; 2881 case ISD::ATOMIC_LOAD_OR: 2882 switch (VT.SimpleTy) { 2883 default: llvm_unreachable("Unexpected value type for atomic!"); 2884 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2885 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2886 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2887 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2888 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_OR_16;break; 2889 } 2890 break; 2891 case ISD::ATOMIC_LOAD_XOR: 2892 switch (VT.SimpleTy) { 2893 default: llvm_unreachable("Unexpected value type for atomic!"); 2894 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2895 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2896 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2897 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2898 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_XOR_16;break; 2899 } 2900 break; 2901 case ISD::ATOMIC_LOAD_NAND: 2902 switch (VT.SimpleTy) { 2903 default: llvm_unreachable("Unexpected value type for atomic!"); 2904 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2905 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2906 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2907 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2908 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_NAND_16;break; 2909 } 2910 break; 2911 case ISD::ATOMIC_LOAD_MAX: 2912 switch (VT.SimpleTy) { 2913 default: llvm_unreachable("Unexpected value type for atomic!"); 2914 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_MAX_1; break; 2915 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_MAX_2; break; 2916 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_MAX_4; break; 2917 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_MAX_8; break; 2918 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_MAX_16;break; 2919 } 2920 break; 2921 case ISD::ATOMIC_LOAD_UMAX: 2922 switch (VT.SimpleTy) { 2923 default: llvm_unreachable("Unexpected value type for atomic!"); 2924 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_UMAX_1; break; 2925 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_UMAX_2; break; 2926 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_UMAX_4; break; 2927 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_UMAX_8; break; 2928 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_UMAX_16;break; 2929 } 2930 break; 2931 case ISD::ATOMIC_LOAD_MIN: 2932 switch (VT.SimpleTy) { 2933 default: llvm_unreachable("Unexpected value type for atomic!"); 2934 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_MIN_1; break; 2935 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_MIN_2; break; 2936 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_MIN_4; break; 2937 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_MIN_8; break; 2938 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_MIN_16;break; 2939 } 2940 break; 2941 case ISD::ATOMIC_LOAD_UMIN: 2942 switch (VT.SimpleTy) { 2943 default: llvm_unreachable("Unexpected value type for atomic!"); 2944 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_UMIN_1; break; 2945 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_UMIN_2; break; 2946 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_UMIN_4; break; 2947 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_UMIN_8; break; 2948 case MVT::i128:LC = RTLIB::SYNC_FETCH_AND_UMIN_16;break; 2949 } 2950 break; 2951 } 2952 2953 return ExpandChainLibCall(LC, Node, false); 2954 } 2955 2956 void SelectionDAGLegalize::ExpandNode(SDNode *Node) { 2957 SmallVector<SDValue, 8> Results; 2958 SDLoc dl(Node); 2959 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2960 bool NeedInvert; 2961 switch (Node->getOpcode()) { 2962 case ISD::CTPOP: 2963 case ISD::CTLZ: 2964 case ISD::CTLZ_ZERO_UNDEF: 2965 case ISD::CTTZ: 2966 case ISD::CTTZ_ZERO_UNDEF: 2967 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2968 Results.push_back(Tmp1); 2969 break; 2970 case ISD::BSWAP: 2971 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2972 break; 2973 case ISD::FRAMEADDR: 2974 case ISD::RETURNADDR: 2975 case ISD::FRAME_TO_ARGS_OFFSET: 2976 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2977 break; 2978 case ISD::FLT_ROUNDS_: 2979 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2980 break; 2981 case ISD::EH_RETURN: 2982 case ISD::EH_LABEL: 2983 case ISD::PREFETCH: 2984 case ISD::VAEND: 2985 case ISD::EH_SJLJ_LONGJMP: 2986 // If the target didn't expand these, there's nothing to do, so just 2987 // preserve the chain and be done. 2988 Results.push_back(Node->getOperand(0)); 2989 break; 2990 case ISD::EH_SJLJ_SETJMP: 2991 // If the target didn't expand this, just return 'zero' and preserve the 2992 // chain. 2993 Results.push_back(DAG.getConstant(0, MVT::i32)); 2994 Results.push_back(Node->getOperand(0)); 2995 break; 2996 case ISD::ATOMIC_FENCE: { 2997 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2998 // FIXME: handle "fence singlethread" more efficiently. 2999 TargetLowering::ArgListTy Args; 3000 3001 TargetLowering::CallLoweringInfo CLI(DAG); 3002 CLI.setDebugLoc(dl).setChain(Node->getOperand(0)) 3003 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3004 DAG.getExternalSymbol("__sync_synchronize", 3005 TLI.getPointerTy()), std::move(Args), 0); 3006 3007 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 3008 3009 Results.push_back(CallResult.second); 3010 break; 3011 } 3012 case ISD::ATOMIC_LOAD: { 3013 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. 3014 SDValue Zero = DAG.getConstant(0, Node->getValueType(0)); 3015 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other); 3016 SDValue Swap = DAG.getAtomicCmpSwap( 3017 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs, 3018 Node->getOperand(0), Node->getOperand(1), Zero, Zero, 3019 cast<AtomicSDNode>(Node)->getMemOperand(), 3020 cast<AtomicSDNode>(Node)->getOrdering(), 3021 cast<AtomicSDNode>(Node)->getOrdering(), 3022 cast<AtomicSDNode>(Node)->getSynchScope()); 3023 Results.push_back(Swap.getValue(0)); 3024 Results.push_back(Swap.getValue(1)); 3025 break; 3026 } 3027 case ISD::ATOMIC_STORE: { 3028 // There is no libcall for atomic store; fake it with ATOMIC_SWAP. 3029 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 3030 cast<AtomicSDNode>(Node)->getMemoryVT(), 3031 Node->getOperand(0), 3032 Node->getOperand(1), Node->getOperand(2), 3033 cast<AtomicSDNode>(Node)->getMemOperand(), 3034 cast<AtomicSDNode>(Node)->getOrdering(), 3035 cast<AtomicSDNode>(Node)->getSynchScope()); 3036 Results.push_back(Swap.getValue(1)); 3037 break; 3038 } 3039 // By default, atomic intrinsics are marked Legal and lowered. Targets 3040 // which don't support them directly, however, may want libcalls, in which 3041 // case they mark them Expand, and we get here. 3042 case ISD::ATOMIC_SWAP: 3043 case ISD::ATOMIC_LOAD_ADD: 3044 case ISD::ATOMIC_LOAD_SUB: 3045 case ISD::ATOMIC_LOAD_AND: 3046 case ISD::ATOMIC_LOAD_OR: 3047 case ISD::ATOMIC_LOAD_XOR: 3048 case ISD::ATOMIC_LOAD_NAND: 3049 case ISD::ATOMIC_LOAD_MIN: 3050 case ISD::ATOMIC_LOAD_MAX: 3051 case ISD::ATOMIC_LOAD_UMIN: 3052 case ISD::ATOMIC_LOAD_UMAX: 3053 case ISD::ATOMIC_CMP_SWAP: { 3054 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 3055 Results.push_back(Tmp.first); 3056 Results.push_back(Tmp.second); 3057 break; 3058 } 3059 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { 3060 // Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and 3061 // splits out the success value as a comparison. Expanding the resulting 3062 // ATOMIC_CMP_SWAP will produce a libcall. 3063 SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other); 3064 SDValue Res = DAG.getAtomicCmpSwap( 3065 ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs, 3066 Node->getOperand(0), Node->getOperand(1), Node->getOperand(2), 3067 Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand(), 3068 cast<AtomicSDNode>(Node)->getSuccessOrdering(), 3069 cast<AtomicSDNode>(Node)->getFailureOrdering(), 3070 cast<AtomicSDNode>(Node)->getSynchScope()); 3071 3072 SDValue Success = DAG.getSetCC(SDLoc(Node), Node->getValueType(1), 3073 Res, Node->getOperand(2), ISD::SETEQ); 3074 3075 Results.push_back(Res.getValue(0)); 3076 Results.push_back(Success); 3077 Results.push_back(Res.getValue(1)); 3078 break; 3079 } 3080 case ISD::DYNAMIC_STACKALLOC: 3081 ExpandDYNAMIC_STACKALLOC(Node, Results); 3082 break; 3083 case ISD::MERGE_VALUES: 3084 for (unsigned i = 0; i < Node->getNumValues(); i++) 3085 Results.push_back(Node->getOperand(i)); 3086 break; 3087 case ISD::UNDEF: { 3088 EVT VT = Node->getValueType(0); 3089 if (VT.isInteger()) 3090 Results.push_back(DAG.getConstant(0, VT)); 3091 else { 3092 assert(VT.isFloatingPoint() && "Unknown value type!"); 3093 Results.push_back(DAG.getConstantFP(0, VT)); 3094 } 3095 break; 3096 } 3097 case ISD::TRAP: { 3098 // If this operation is not supported, lower it to 'abort()' call 3099 TargetLowering::ArgListTy Args; 3100 TargetLowering::CallLoweringInfo CLI(DAG); 3101 CLI.setDebugLoc(dl).setChain(Node->getOperand(0)) 3102 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3103 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 3104 std::move(Args), 0); 3105 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 3106 3107 Results.push_back(CallResult.second); 3108 break; 3109 } 3110 case ISD::FP_ROUND: 3111 case ISD::BITCAST: 3112 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 3113 Node->getValueType(0), dl); 3114 Results.push_back(Tmp1); 3115 break; 3116 case ISD::FP_EXTEND: 3117 Tmp1 = EmitStackConvert(Node->getOperand(0), 3118 Node->getOperand(0).getValueType(), 3119 Node->getValueType(0), dl); 3120 Results.push_back(Tmp1); 3121 break; 3122 case ISD::SIGN_EXTEND_INREG: { 3123 // NOTE: we could fall back on load/store here too for targets without 3124 // SAR. However, it is doubtful that any exist. 3125 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 3126 EVT VT = Node->getValueType(0); 3127 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 3128 if (VT.isVector()) 3129 ShiftAmountTy = VT; 3130 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 3131 ExtraVT.getScalarType().getSizeInBits(); 3132 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 3133 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 3134 Node->getOperand(0), ShiftCst); 3135 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 3136 Results.push_back(Tmp1); 3137 break; 3138 } 3139 case ISD::FP_ROUND_INREG: { 3140 // The only way we can lower this is to turn it into a TRUNCSTORE, 3141 // EXTLOAD pair, targeting a temporary location (a stack slot). 3142 3143 // NOTE: there is a choice here between constantly creating new stack 3144 // slots and always reusing the same one. We currently always create 3145 // new ones, as reuse may inhibit scheduling. 3146 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 3147 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 3148 Node->getValueType(0), dl); 3149 Results.push_back(Tmp1); 3150 break; 3151 } 3152 case ISD::SINT_TO_FP: 3153 case ISD::UINT_TO_FP: 3154 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 3155 Node->getOperand(0), Node->getValueType(0), dl); 3156 Results.push_back(Tmp1); 3157 break; 3158 case ISD::FP_TO_SINT: 3159 if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG)) 3160 Results.push_back(Tmp1); 3161 break; 3162 case ISD::FP_TO_UINT: { 3163 SDValue True, False; 3164 EVT VT = Node->getOperand(0).getValueType(); 3165 EVT NVT = Node->getValueType(0); 3166 APFloat apf(DAG.EVTToAPFloatSemantics(VT), 3167 APInt::getNullValue(VT.getSizeInBits())); 3168 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 3169 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 3170 Tmp1 = DAG.getConstantFP(apf, VT); 3171 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(VT), 3172 Node->getOperand(0), 3173 Tmp1, ISD::SETLT); 3174 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 3175 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 3176 DAG.getNode(ISD::FSUB, dl, VT, 3177 Node->getOperand(0), Tmp1)); 3178 False = DAG.getNode(ISD::XOR, dl, NVT, False, 3179 DAG.getConstant(x, NVT)); 3180 Tmp1 = DAG.getSelect(dl, NVT, Tmp2, True, False); 3181 Results.push_back(Tmp1); 3182 break; 3183 } 3184 case ISD::VAARG: { 3185 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3186 EVT VT = Node->getValueType(0); 3187 Tmp1 = Node->getOperand(0); 3188 Tmp2 = Node->getOperand(1); 3189 unsigned Align = Node->getConstantOperandVal(3); 3190 3191 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 3192 MachinePointerInfo(V), 3193 false, false, false, 0); 3194 SDValue VAList = VAListLoad; 3195 3196 if (Align > TLI.getMinStackArgumentAlignment()) { 3197 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 3198 3199 VAList = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 3200 DAG.getConstant(Align - 1, 3201 VAList.getValueType())); 3202 3203 VAList = DAG.getNode(ISD::AND, dl, VAList.getValueType(), VAList, 3204 DAG.getConstant(-(int64_t)Align, 3205 VAList.getValueType())); 3206 } 3207 3208 // Increment the pointer, VAList, to the next vaarg 3209 Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 3210 DAG.getConstant(TLI.getDataLayout()-> 3211 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 3212 VAList.getValueType())); 3213 // Store the incremented VAList to the legalized pointer 3214 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 3215 MachinePointerInfo(V), false, false, 0); 3216 // Load the actual argument out of the pointer VAList 3217 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 3218 false, false, false, 0)); 3219 Results.push_back(Results[0].getValue(1)); 3220 break; 3221 } 3222 case ISD::VACOPY: { 3223 // This defaults to loading a pointer from the input and storing it to the 3224 // output, returning the chain. 3225 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 3226 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 3227 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 3228 Node->getOperand(2), MachinePointerInfo(VS), 3229 false, false, false, 0); 3230 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 3231 MachinePointerInfo(VD), false, false, 0); 3232 Results.push_back(Tmp1); 3233 break; 3234 } 3235 case ISD::EXTRACT_VECTOR_ELT: 3236 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 3237 // This must be an access of the only element. Return it. 3238 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 3239 Node->getOperand(0)); 3240 else 3241 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 3242 Results.push_back(Tmp1); 3243 break; 3244 case ISD::EXTRACT_SUBVECTOR: 3245 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 3246 break; 3247 case ISD::INSERT_SUBVECTOR: 3248 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 3249 break; 3250 case ISD::CONCAT_VECTORS: { 3251 Results.push_back(ExpandVectorBuildThroughStack(Node)); 3252 break; 3253 } 3254 case ISD::SCALAR_TO_VECTOR: 3255 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 3256 break; 3257 case ISD::INSERT_VECTOR_ELT: 3258 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 3259 Node->getOperand(1), 3260 Node->getOperand(2), dl)); 3261 break; 3262 case ISD::VECTOR_SHUFFLE: { 3263 SmallVector<int, 32> NewMask; 3264 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 3265 3266 EVT VT = Node->getValueType(0); 3267 EVT EltVT = VT.getVectorElementType(); 3268 SDValue Op0 = Node->getOperand(0); 3269 SDValue Op1 = Node->getOperand(1); 3270 if (!TLI.isTypeLegal(EltVT)) { 3271 3272 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 3273 3274 // BUILD_VECTOR operands are allowed to be wider than the element type. 3275 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept 3276 // it. 3277 if (NewEltVT.bitsLT(EltVT)) { 3278 3279 // Convert shuffle node. 3280 // If original node was v4i64 and the new EltVT is i32, 3281 // cast operands to v8i32 and re-build the mask. 3282 3283 // Calculate new VT, the size of the new VT should be equal to original. 3284 EVT NewVT = 3285 EVT::getVectorVT(*DAG.getContext(), NewEltVT, 3286 VT.getSizeInBits() / NewEltVT.getSizeInBits()); 3287 assert(NewVT.bitsEq(VT)); 3288 3289 // cast operands to new VT 3290 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0); 3291 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1); 3292 3293 // Convert the shuffle mask 3294 unsigned int factor = 3295 NewVT.getVectorNumElements()/VT.getVectorNumElements(); 3296 3297 // EltVT gets smaller 3298 assert(factor > 0); 3299 3300 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 3301 if (Mask[i] < 0) { 3302 for (unsigned fi = 0; fi < factor; ++fi) 3303 NewMask.push_back(Mask[i]); 3304 } 3305 else { 3306 for (unsigned fi = 0; fi < factor; ++fi) 3307 NewMask.push_back(Mask[i]*factor+fi); 3308 } 3309 } 3310 Mask = NewMask; 3311 VT = NewVT; 3312 } 3313 EltVT = NewEltVT; 3314 } 3315 unsigned NumElems = VT.getVectorNumElements(); 3316 SmallVector<SDValue, 16> Ops; 3317 for (unsigned i = 0; i != NumElems; ++i) { 3318 if (Mask[i] < 0) { 3319 Ops.push_back(DAG.getUNDEF(EltVT)); 3320 continue; 3321 } 3322 unsigned Idx = Mask[i]; 3323 if (Idx < NumElems) 3324 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3325 Op0, 3326 DAG.getConstant(Idx, TLI.getVectorIdxTy()))); 3327 else 3328 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3329 Op1, 3330 DAG.getConstant(Idx - NumElems, 3331 TLI.getVectorIdxTy()))); 3332 } 3333 3334 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 3335 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type. 3336 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1); 3337 Results.push_back(Tmp1); 3338 break; 3339 } 3340 case ISD::EXTRACT_ELEMENT: { 3341 EVT OpTy = Node->getOperand(0).getValueType(); 3342 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 3343 // 1 -> Hi 3344 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 3345 DAG.getConstant(OpTy.getSizeInBits()/2, 3346 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 3347 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 3348 } else { 3349 // 0 -> Lo 3350 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 3351 Node->getOperand(0)); 3352 } 3353 Results.push_back(Tmp1); 3354 break; 3355 } 3356 case ISD::STACKSAVE: 3357 // Expand to CopyFromReg if the target set 3358 // StackPointerRegisterToSaveRestore. 3359 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3360 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 3361 Node->getValueType(0))); 3362 Results.push_back(Results[0].getValue(1)); 3363 } else { 3364 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 3365 Results.push_back(Node->getOperand(0)); 3366 } 3367 break; 3368 case ISD::STACKRESTORE: 3369 // Expand to CopyToReg if the target set 3370 // StackPointerRegisterToSaveRestore. 3371 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3372 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 3373 Node->getOperand(1))); 3374 } else { 3375 Results.push_back(Node->getOperand(0)); 3376 } 3377 break; 3378 case ISD::FCOPYSIGN: 3379 Results.push_back(ExpandFCOPYSIGN(Node)); 3380 break; 3381 case ISD::FNEG: 3382 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 3383 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 3384 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 3385 Node->getOperand(0)); 3386 Results.push_back(Tmp1); 3387 break; 3388 case ISD::FABS: { 3389 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 3390 EVT VT = Node->getValueType(0); 3391 Tmp1 = Node->getOperand(0); 3392 Tmp2 = DAG.getConstantFP(0.0, VT); 3393 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(Tmp1.getValueType()), 3394 Tmp1, Tmp2, ISD::SETUGT); 3395 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 3396 Tmp1 = DAG.getSelect(dl, VT, Tmp2, Tmp1, Tmp3); 3397 Results.push_back(Tmp1); 3398 break; 3399 } 3400 case ISD::FSQRT: 3401 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 3402 RTLIB::SQRT_F80, RTLIB::SQRT_F128, 3403 RTLIB::SQRT_PPCF128)); 3404 break; 3405 case ISD::FSIN: 3406 case ISD::FCOS: { 3407 EVT VT = Node->getValueType(0); 3408 bool isSIN = Node->getOpcode() == ISD::FSIN; 3409 // Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin / 3410 // fcos which share the same operand and both are used. 3411 if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) || 3412 canCombineSinCosLibcall(Node, TLI, TM)) 3413 && useSinCos(Node)) { 3414 SDVTList VTs = DAG.getVTList(VT, VT); 3415 Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0)); 3416 if (!isSIN) 3417 Tmp1 = Tmp1.getValue(1); 3418 Results.push_back(Tmp1); 3419 } else if (isSIN) { 3420 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 3421 RTLIB::SIN_F80, RTLIB::SIN_F128, 3422 RTLIB::SIN_PPCF128)); 3423 } else { 3424 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 3425 RTLIB::COS_F80, RTLIB::COS_F128, 3426 RTLIB::COS_PPCF128)); 3427 } 3428 break; 3429 } 3430 case ISD::FSINCOS: 3431 // Expand into sincos libcall. 3432 ExpandSinCosLibCall(Node, Results); 3433 break; 3434 case ISD::FLOG: 3435 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 3436 RTLIB::LOG_F80, RTLIB::LOG_F128, 3437 RTLIB::LOG_PPCF128)); 3438 break; 3439 case ISD::FLOG2: 3440 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 3441 RTLIB::LOG2_F80, RTLIB::LOG2_F128, 3442 RTLIB::LOG2_PPCF128)); 3443 break; 3444 case ISD::FLOG10: 3445 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 3446 RTLIB::LOG10_F80, RTLIB::LOG10_F128, 3447 RTLIB::LOG10_PPCF128)); 3448 break; 3449 case ISD::FEXP: 3450 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 3451 RTLIB::EXP_F80, RTLIB::EXP_F128, 3452 RTLIB::EXP_PPCF128)); 3453 break; 3454 case ISD::FEXP2: 3455 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 3456 RTLIB::EXP2_F80, RTLIB::EXP2_F128, 3457 RTLIB::EXP2_PPCF128)); 3458 break; 3459 case ISD::FTRUNC: 3460 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 3461 RTLIB::TRUNC_F80, RTLIB::TRUNC_F128, 3462 RTLIB::TRUNC_PPCF128)); 3463 break; 3464 case ISD::FFLOOR: 3465 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 3466 RTLIB::FLOOR_F80, RTLIB::FLOOR_F128, 3467 RTLIB::FLOOR_PPCF128)); 3468 break; 3469 case ISD::FCEIL: 3470 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 3471 RTLIB::CEIL_F80, RTLIB::CEIL_F128, 3472 RTLIB::CEIL_PPCF128)); 3473 break; 3474 case ISD::FRINT: 3475 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 3476 RTLIB::RINT_F80, RTLIB::RINT_F128, 3477 RTLIB::RINT_PPCF128)); 3478 break; 3479 case ISD::FNEARBYINT: 3480 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 3481 RTLIB::NEARBYINT_F64, 3482 RTLIB::NEARBYINT_F80, 3483 RTLIB::NEARBYINT_F128, 3484 RTLIB::NEARBYINT_PPCF128)); 3485 break; 3486 case ISD::FROUND: 3487 Results.push_back(ExpandFPLibCall(Node, RTLIB::ROUND_F32, 3488 RTLIB::ROUND_F64, 3489 RTLIB::ROUND_F80, 3490 RTLIB::ROUND_F128, 3491 RTLIB::ROUND_PPCF128)); 3492 break; 3493 case ISD::FPOWI: 3494 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 3495 RTLIB::POWI_F80, RTLIB::POWI_F128, 3496 RTLIB::POWI_PPCF128)); 3497 break; 3498 case ISD::FPOW: 3499 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 3500 RTLIB::POW_F80, RTLIB::POW_F128, 3501 RTLIB::POW_PPCF128)); 3502 break; 3503 case ISD::FDIV: 3504 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 3505 RTLIB::DIV_F80, RTLIB::DIV_F128, 3506 RTLIB::DIV_PPCF128)); 3507 break; 3508 case ISD::FREM: 3509 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 3510 RTLIB::REM_F80, RTLIB::REM_F128, 3511 RTLIB::REM_PPCF128)); 3512 break; 3513 case ISD::FMA: 3514 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, 3515 RTLIB::FMA_F80, RTLIB::FMA_F128, 3516 RTLIB::FMA_PPCF128)); 3517 break; 3518 case ISD::FP16_TO_FP: { 3519 if (Node->getValueType(0) == MVT::f32) { 3520 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3521 break; 3522 } 3523 3524 // We can extend to types bigger than f32 in two steps without changing the 3525 // result. Since "f16 -> f32" is much more commonly available, give CodeGen 3526 // the option of emitting that before resorting to a libcall. 3527 SDValue Res = 3528 DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0)); 3529 Results.push_back( 3530 DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res)); 3531 break; 3532 } 3533 case ISD::FP_TO_FP16: { 3534 RTLIB::Libcall LC = 3535 RTLIB::getFPROUND(Node->getOperand(0).getValueType(), MVT::f16); 3536 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand fp_to_fp16"); 3537 Results.push_back(ExpandLibCall(LC, Node, false)); 3538 break; 3539 } 3540 case ISD::ConstantFP: { 3541 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3542 // Check to see if this FP immediate is already legal. 3543 // If this is a legal constant, turn it into a TargetConstantFP node. 3544 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3545 Results.push_back(ExpandConstantFP(CFP, true)); 3546 break; 3547 } 3548 case ISD::FSUB: { 3549 EVT VT = Node->getValueType(0); 3550 assert(TLI.isOperationLegalOrCustom(ISD::FADD, VT) && 3551 TLI.isOperationLegalOrCustom(ISD::FNEG, VT) && 3552 "Don't know how to expand this FP subtraction!"); 3553 Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1)); 3554 Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1); 3555 Results.push_back(Tmp1); 3556 break; 3557 } 3558 case ISD::SUB: { 3559 EVT VT = Node->getValueType(0); 3560 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3561 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3562 "Don't know how to expand this subtraction!"); 3563 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3564 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3565 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, VT)); 3566 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3567 break; 3568 } 3569 case ISD::UREM: 3570 case ISD::SREM: { 3571 EVT VT = Node->getValueType(0); 3572 bool isSigned = Node->getOpcode() == ISD::SREM; 3573 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3574 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3575 Tmp2 = Node->getOperand(0); 3576 Tmp3 = Node->getOperand(1); 3577 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3578 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3579 // If div is legal, it's better to do the normal expansion 3580 !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) && 3581 useDivRem(Node, isSigned, false))) { 3582 SDVTList VTs = DAG.getVTList(VT, VT); 3583 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3584 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3585 // X % Y -> X-X/Y*Y 3586 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3587 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3588 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3589 } else if (isSigned) 3590 Tmp1 = ExpandIntLibCall(Node, true, 3591 RTLIB::SREM_I8, 3592 RTLIB::SREM_I16, RTLIB::SREM_I32, 3593 RTLIB::SREM_I64, RTLIB::SREM_I128); 3594 else 3595 Tmp1 = ExpandIntLibCall(Node, false, 3596 RTLIB::UREM_I8, 3597 RTLIB::UREM_I16, RTLIB::UREM_I32, 3598 RTLIB::UREM_I64, RTLIB::UREM_I128); 3599 Results.push_back(Tmp1); 3600 break; 3601 } 3602 case ISD::UDIV: 3603 case ISD::SDIV: { 3604 bool isSigned = Node->getOpcode() == ISD::SDIV; 3605 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3606 EVT VT = Node->getValueType(0); 3607 SDVTList VTs = DAG.getVTList(VT, VT); 3608 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3609 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3610 useDivRem(Node, isSigned, true))) 3611 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3612 Node->getOperand(1)); 3613 else if (isSigned) 3614 Tmp1 = ExpandIntLibCall(Node, true, 3615 RTLIB::SDIV_I8, 3616 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3617 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3618 else 3619 Tmp1 = ExpandIntLibCall(Node, false, 3620 RTLIB::UDIV_I8, 3621 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3622 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3623 Results.push_back(Tmp1); 3624 break; 3625 } 3626 case ISD::MULHU: 3627 case ISD::MULHS: { 3628 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3629 ISD::SMUL_LOHI; 3630 EVT VT = Node->getValueType(0); 3631 SDVTList VTs = DAG.getVTList(VT, VT); 3632 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3633 "If this wasn't legal, it shouldn't have been created!"); 3634 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3635 Node->getOperand(1)); 3636 Results.push_back(Tmp1.getValue(1)); 3637 break; 3638 } 3639 case ISD::SDIVREM: 3640 case ISD::UDIVREM: 3641 // Expand into divrem libcall 3642 ExpandDivRemLibCall(Node, Results); 3643 break; 3644 case ISD::MUL: { 3645 EVT VT = Node->getValueType(0); 3646 SDVTList VTs = DAG.getVTList(VT, VT); 3647 // See if multiply or divide can be lowered using two-result operations. 3648 // We just need the low half of the multiply; try both the signed 3649 // and unsigned forms. If the target supports both SMUL_LOHI and 3650 // UMUL_LOHI, form a preference by checking which forms of plain 3651 // MULH it supports. 3652 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3653 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3654 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3655 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3656 unsigned OpToUse = 0; 3657 if (HasSMUL_LOHI && !HasMULHS) { 3658 OpToUse = ISD::SMUL_LOHI; 3659 } else if (HasUMUL_LOHI && !HasMULHU) { 3660 OpToUse = ISD::UMUL_LOHI; 3661 } else if (HasSMUL_LOHI) { 3662 OpToUse = ISD::SMUL_LOHI; 3663 } else if (HasUMUL_LOHI) { 3664 OpToUse = ISD::UMUL_LOHI; 3665 } 3666 if (OpToUse) { 3667 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3668 Node->getOperand(1))); 3669 break; 3670 } 3671 3672 SDValue Lo, Hi; 3673 EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext()); 3674 if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) && 3675 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) && 3676 TLI.isOperationLegalOrCustom(ISD::SHL, VT) && 3677 TLI.isOperationLegalOrCustom(ISD::OR, VT) && 3678 TLI.expandMUL(Node, Lo, Hi, HalfType, DAG)) { 3679 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 3680 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi); 3681 SDValue Shift = DAG.getConstant(HalfType.getSizeInBits(), 3682 TLI.getShiftAmountTy(HalfType)); 3683 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 3684 Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi)); 3685 break; 3686 } 3687 3688 Tmp1 = ExpandIntLibCall(Node, false, 3689 RTLIB::MUL_I8, 3690 RTLIB::MUL_I16, RTLIB::MUL_I32, 3691 RTLIB::MUL_I64, RTLIB::MUL_I128); 3692 Results.push_back(Tmp1); 3693 break; 3694 } 3695 case ISD::SADDO: 3696 case ISD::SSUBO: { 3697 SDValue LHS = Node->getOperand(0); 3698 SDValue RHS = Node->getOperand(1); 3699 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3700 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3701 LHS, RHS); 3702 Results.push_back(Sum); 3703 EVT ResultType = Node->getValueType(1); 3704 EVT OType = getSetCCResultType(Node->getValueType(0)); 3705 3706 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3707 3708 // LHSSign -> LHS >= 0 3709 // RHSSign -> RHS >= 0 3710 // SumSign -> Sum >= 0 3711 // 3712 // Add: 3713 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3714 // Sub: 3715 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3716 // 3717 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3718 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3719 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3720 Node->getOpcode() == ISD::SADDO ? 3721 ISD::SETEQ : ISD::SETNE); 3722 3723 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3724 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3725 3726 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3727 Results.push_back(DAG.getBoolExtOrTrunc(Cmp, dl, ResultType, ResultType)); 3728 break; 3729 } 3730 case ISD::UADDO: 3731 case ISD::USUBO: { 3732 SDValue LHS = Node->getOperand(0); 3733 SDValue RHS = Node->getOperand(1); 3734 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3735 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3736 LHS, RHS); 3737 Results.push_back(Sum); 3738 3739 EVT ResultType = Node->getValueType(1); 3740 EVT SetCCType = getSetCCResultType(Node->getValueType(0)); 3741 ISD::CondCode CC 3742 = Node->getOpcode() == ISD::UADDO ? ISD::SETULT : ISD::SETUGT; 3743 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC); 3744 3745 Results.push_back(DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType)); 3746 break; 3747 } 3748 case ISD::UMULO: 3749 case ISD::SMULO: { 3750 EVT VT = Node->getValueType(0); 3751 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3752 SDValue LHS = Node->getOperand(0); 3753 SDValue RHS = Node->getOperand(1); 3754 SDValue BottomHalf; 3755 SDValue TopHalf; 3756 static const unsigned Ops[2][3] = 3757 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3758 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3759 bool isSigned = Node->getOpcode() == ISD::SMULO; 3760 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3761 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3762 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3763 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3764 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3765 RHS); 3766 TopHalf = BottomHalf.getValue(1); 3767 } else if (TLI.isTypeLegal(WideVT)) { 3768 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3769 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3770 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3771 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3772 DAG.getIntPtrConstant(0)); 3773 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3774 DAG.getIntPtrConstant(1)); 3775 } else { 3776 // We can fall back to a libcall with an illegal type for the MUL if we 3777 // have a libcall big enough. 3778 // Also, we can fall back to a division in some cases, but that's a big 3779 // performance hit in the general case. 3780 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3781 if (WideVT == MVT::i16) 3782 LC = RTLIB::MUL_I16; 3783 else if (WideVT == MVT::i32) 3784 LC = RTLIB::MUL_I32; 3785 else if (WideVT == MVT::i64) 3786 LC = RTLIB::MUL_I64; 3787 else if (WideVT == MVT::i128) 3788 LC = RTLIB::MUL_I128; 3789 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3790 3791 // The high part is obtained by SRA'ing all but one of the bits of low 3792 // part. 3793 unsigned LoSize = VT.getSizeInBits(); 3794 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3795 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3796 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3797 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3798 3799 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3800 // pre-lowered to the correct types. This all depends upon WideVT not 3801 // being a legal type for the architecture and thus has to be split to 3802 // two arguments. 3803 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3804 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3805 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3806 DAG.getIntPtrConstant(0)); 3807 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3808 DAG.getIntPtrConstant(1)); 3809 // Ret is a node with an illegal type. Because such things are not 3810 // generally permitted during this phase of legalization, make sure the 3811 // node has no more uses. The above EXTRACT_ELEMENT nodes should have been 3812 // folded. 3813 assert(Ret->use_empty() && 3814 "Unexpected uses of illegally type from expanded lib call."); 3815 } 3816 3817 if (isSigned) { 3818 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3819 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3820 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3821 TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf, Tmp1, 3822 ISD::SETNE); 3823 } else { 3824 TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf, 3825 DAG.getConstant(0, VT), ISD::SETNE); 3826 } 3827 Results.push_back(BottomHalf); 3828 Results.push_back(TopHalf); 3829 break; 3830 } 3831 case ISD::BUILD_PAIR: { 3832 EVT PairTy = Node->getValueType(0); 3833 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3834 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3835 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3836 DAG.getConstant(PairTy.getSizeInBits()/2, 3837 TLI.getShiftAmountTy(PairTy))); 3838 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3839 break; 3840 } 3841 case ISD::SELECT: 3842 Tmp1 = Node->getOperand(0); 3843 Tmp2 = Node->getOperand(1); 3844 Tmp3 = Node->getOperand(2); 3845 if (Tmp1.getOpcode() == ISD::SETCC) { 3846 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3847 Tmp2, Tmp3, 3848 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3849 } else { 3850 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3851 DAG.getConstant(0, Tmp1.getValueType()), 3852 Tmp2, Tmp3, ISD::SETNE); 3853 } 3854 Results.push_back(Tmp1); 3855 break; 3856 case ISD::BR_JT: { 3857 SDValue Chain = Node->getOperand(0); 3858 SDValue Table = Node->getOperand(1); 3859 SDValue Index = Node->getOperand(2); 3860 3861 EVT PTy = TLI.getPointerTy(); 3862 3863 const DataLayout &TD = *TLI.getDataLayout(); 3864 unsigned EntrySize = 3865 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3866 3867 Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), 3868 Index, DAG.getConstant(EntrySize, Index.getValueType())); 3869 SDValue Addr = DAG.getNode(ISD::ADD, dl, Index.getValueType(), 3870 Index, Table); 3871 3872 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3873 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3874 MachinePointerInfo::getJumpTable(), MemVT, 3875 false, false, false, 0); 3876 Addr = LD; 3877 if (TM.getRelocationModel() == Reloc::PIC_) { 3878 // For PIC, the sequence is: 3879 // BRIND(load(Jumptable + index) + RelocBase) 3880 // RelocBase can be JumpTable, GOT or some sort of global base. 3881 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3882 TLI.getPICJumpTableRelocBase(Table, DAG)); 3883 } 3884 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3885 Results.push_back(Tmp1); 3886 break; 3887 } 3888 case ISD::BRCOND: 3889 // Expand brcond's setcc into its constituent parts and create a BR_CC 3890 // Node. 3891 Tmp1 = Node->getOperand(0); 3892 Tmp2 = Node->getOperand(1); 3893 if (Tmp2.getOpcode() == ISD::SETCC) { 3894 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3895 Tmp1, Tmp2.getOperand(2), 3896 Tmp2.getOperand(0), Tmp2.getOperand(1), 3897 Node->getOperand(2)); 3898 } else { 3899 // We test only the i1 bit. Skip the AND if UNDEF. 3900 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3901 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3902 DAG.getConstant(1, Tmp2.getValueType())); 3903 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3904 DAG.getCondCode(ISD::SETNE), Tmp3, 3905 DAG.getConstant(0, Tmp3.getValueType()), 3906 Node->getOperand(2)); 3907 } 3908 Results.push_back(Tmp1); 3909 break; 3910 case ISD::SETCC: { 3911 Tmp1 = Node->getOperand(0); 3912 Tmp2 = Node->getOperand(1); 3913 Tmp3 = Node->getOperand(2); 3914 bool Legalized = LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, 3915 Tmp3, NeedInvert, dl); 3916 3917 if (Legalized) { 3918 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the 3919 // condition code, create a new SETCC node. 3920 if (Tmp3.getNode()) 3921 Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3922 Tmp1, Tmp2, Tmp3); 3923 3924 // If we expanded the SETCC by inverting the condition code, then wrap 3925 // the existing SETCC in a NOT to restore the intended condition. 3926 if (NeedInvert) 3927 Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0)); 3928 3929 Results.push_back(Tmp1); 3930 break; 3931 } 3932 3933 // Otherwise, SETCC for the given comparison type must be completely 3934 // illegal; expand it into a SELECT_CC. 3935 EVT VT = Node->getValueType(0); 3936 int TrueValue; 3937 switch (TLI.getBooleanContents(Tmp1->getValueType(0))) { 3938 case TargetLowering::ZeroOrOneBooleanContent: 3939 case TargetLowering::UndefinedBooleanContent: 3940 TrueValue = 1; 3941 break; 3942 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3943 TrueValue = -1; 3944 break; 3945 } 3946 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3947 DAG.getConstant(TrueValue, VT), DAG.getConstant(0, VT), 3948 Tmp3); 3949 Results.push_back(Tmp1); 3950 break; 3951 } 3952 case ISD::SELECT_CC: { 3953 Tmp1 = Node->getOperand(0); // LHS 3954 Tmp2 = Node->getOperand(1); // RHS 3955 Tmp3 = Node->getOperand(2); // True 3956 Tmp4 = Node->getOperand(3); // False 3957 EVT VT = Node->getValueType(0); 3958 SDValue CC = Node->getOperand(4); 3959 ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get(); 3960 3961 if (TLI.isCondCodeLegal(CCOp, Tmp1.getSimpleValueType())) { 3962 // If the condition code is legal, then we need to expand this 3963 // node using SETCC and SELECT. 3964 EVT CmpVT = Tmp1.getValueType(); 3965 assert(!TLI.isOperationExpand(ISD::SELECT, VT) && 3966 "Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be " 3967 "expanded."); 3968 EVT CCVT = TLI.getSetCCResultType(*DAG.getContext(), CmpVT); 3969 SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC); 3970 Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4)); 3971 break; 3972 } 3973 3974 // SELECT_CC is legal, so the condition code must not be. 3975 bool Legalized = false; 3976 // Try to legalize by inverting the condition. This is for targets that 3977 // might support an ordered version of a condition, but not the unordered 3978 // version (or vice versa). 3979 ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp, 3980 Tmp1.getValueType().isInteger()); 3981 if (TLI.isCondCodeLegal(InvCC, Tmp1.getSimpleValueType())) { 3982 // Use the new condition code and swap true and false 3983 Legalized = true; 3984 Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC); 3985 } else { 3986 // If The inverse is not legal, then try to swap the arguments using 3987 // the inverse condition code. 3988 ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InvCC); 3989 if (TLI.isCondCodeLegal(SwapInvCC, Tmp1.getSimpleValueType())) { 3990 // The swapped inverse condition is legal, so swap true and false, 3991 // lhs and rhs. 3992 Legalized = true; 3993 Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC); 3994 } 3995 } 3996 3997 if (!Legalized) { 3998 Legalized = LegalizeSetCCCondCode( 3999 getSetCCResultType(Tmp1.getValueType()), Tmp1, Tmp2, CC, NeedInvert, 4000 dl); 4001 4002 assert(Legalized && "Can't legalize SELECT_CC with legal condition!"); 4003 4004 // If we expanded the SETCC by inverting the condition code, then swap 4005 // the True/False operands to match. 4006 if (NeedInvert) 4007 std::swap(Tmp3, Tmp4); 4008 4009 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the 4010 // condition code, create a new SELECT_CC node. 4011 if (CC.getNode()) { 4012 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), 4013 Tmp1, Tmp2, Tmp3, Tmp4, CC); 4014 } else { 4015 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 4016 CC = DAG.getCondCode(ISD::SETNE); 4017 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, 4018 Tmp2, Tmp3, Tmp4, CC); 4019 } 4020 } 4021 Results.push_back(Tmp1); 4022 break; 4023 } 4024 case ISD::BR_CC: { 4025 Tmp1 = Node->getOperand(0); // Chain 4026 Tmp2 = Node->getOperand(2); // LHS 4027 Tmp3 = Node->getOperand(3); // RHS 4028 Tmp4 = Node->getOperand(1); // CC 4029 4030 bool Legalized = LegalizeSetCCCondCode(getSetCCResultType( 4031 Tmp2.getValueType()), Tmp2, Tmp3, Tmp4, NeedInvert, dl); 4032 (void)Legalized; 4033 assert(Legalized && "Can't legalize BR_CC with legal condition!"); 4034 4035 // If we expanded the SETCC by inverting the condition code, then wrap 4036 // the existing SETCC in a NOT to restore the intended condition. 4037 if (NeedInvert) 4038 Tmp4 = DAG.getNOT(dl, Tmp4, Tmp4->getValueType(0)); 4039 4040 // If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC 4041 // node. 4042 if (Tmp4.getNode()) { 4043 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, 4044 Tmp4, Tmp2, Tmp3, Node->getOperand(4)); 4045 } else { 4046 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 4047 Tmp4 = DAG.getCondCode(ISD::SETNE); 4048 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, 4049 Tmp2, Tmp3, Node->getOperand(4)); 4050 } 4051 Results.push_back(Tmp1); 4052 break; 4053 } 4054 case ISD::BUILD_VECTOR: 4055 Results.push_back(ExpandBUILD_VECTOR(Node)); 4056 break; 4057 case ISD::SRA: 4058 case ISD::SRL: 4059 case ISD::SHL: { 4060 // Scalarize vector SRA/SRL/SHL. 4061 EVT VT = Node->getValueType(0); 4062 assert(VT.isVector() && "Unable to legalize non-vector shift"); 4063 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); 4064 unsigned NumElem = VT.getVectorNumElements(); 4065 4066 SmallVector<SDValue, 8> Scalars; 4067 for (unsigned Idx = 0; Idx < NumElem; Idx++) { 4068 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 4069 VT.getScalarType(), 4070 Node->getOperand(0), DAG.getConstant(Idx, 4071 TLI.getVectorIdxTy())); 4072 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 4073 VT.getScalarType(), 4074 Node->getOperand(1), DAG.getConstant(Idx, 4075 TLI.getVectorIdxTy())); 4076 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, 4077 VT.getScalarType(), Ex, Sh)); 4078 } 4079 SDValue Result = 4080 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Scalars); 4081 ReplaceNode(SDValue(Node, 0), Result); 4082 break; 4083 } 4084 case ISD::GLOBAL_OFFSET_TABLE: 4085 case ISD::GlobalAddress: 4086 case ISD::GlobalTLSAddress: 4087 case ISD::ExternalSymbol: 4088 case ISD::ConstantPool: 4089 case ISD::JumpTable: 4090 case ISD::INTRINSIC_W_CHAIN: 4091 case ISD::INTRINSIC_WO_CHAIN: 4092 case ISD::INTRINSIC_VOID: 4093 // FIXME: Custom lowering for these operations shouldn't return null! 4094 break; 4095 } 4096 4097 // Replace the original node with the legalized result. 4098 if (!Results.empty()) 4099 ReplaceNode(Node, Results.data()); 4100 } 4101 4102 void SelectionDAGLegalize::PromoteNode(SDNode *Node) { 4103 SmallVector<SDValue, 8> Results; 4104 MVT OVT = Node->getSimpleValueType(0); 4105 if (Node->getOpcode() == ISD::UINT_TO_FP || 4106 Node->getOpcode() == ISD::SINT_TO_FP || 4107 Node->getOpcode() == ISD::SETCC) { 4108 OVT = Node->getOperand(0).getSimpleValueType(); 4109 } 4110 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 4111 SDLoc dl(Node); 4112 SDValue Tmp1, Tmp2, Tmp3; 4113 switch (Node->getOpcode()) { 4114 case ISD::CTTZ: 4115 case ISD::CTTZ_ZERO_UNDEF: 4116 case ISD::CTLZ: 4117 case ISD::CTLZ_ZERO_UNDEF: 4118 case ISD::CTPOP: 4119 // Zero extend the argument. 4120 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 4121 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is 4122 // already the correct result. 4123 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 4124 if (Node->getOpcode() == ISD::CTTZ) { 4125 // FIXME: This should set a bit in the zero extended value instead. 4126 Tmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT), 4127 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 4128 ISD::SETEQ); 4129 Tmp1 = DAG.getSelect(dl, NVT, Tmp2, 4130 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 4131 } else if (Node->getOpcode() == ISD::CTLZ || 4132 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) { 4133 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 4134 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 4135 DAG.getConstant(NVT.getSizeInBits() - 4136 OVT.getSizeInBits(), NVT)); 4137 } 4138 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 4139 break; 4140 case ISD::BSWAP: { 4141 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 4142 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 4143 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 4144 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 4145 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 4146 Results.push_back(Tmp1); 4147 break; 4148 } 4149 case ISD::FP_TO_UINT: 4150 case ISD::FP_TO_SINT: 4151 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 4152 Node->getOpcode() == ISD::FP_TO_SINT, dl); 4153 Results.push_back(Tmp1); 4154 break; 4155 case ISD::UINT_TO_FP: 4156 case ISD::SINT_TO_FP: 4157 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 4158 Node->getOpcode() == ISD::SINT_TO_FP, dl); 4159 Results.push_back(Tmp1); 4160 break; 4161 case ISD::VAARG: { 4162 SDValue Chain = Node->getOperand(0); // Get the chain. 4163 SDValue Ptr = Node->getOperand(1); // Get the pointer. 4164 4165 unsigned TruncOp; 4166 if (OVT.isVector()) { 4167 TruncOp = ISD::BITCAST; 4168 } else { 4169 assert(OVT.isInteger() 4170 && "VAARG promotion is supported only for vectors or integer types"); 4171 TruncOp = ISD::TRUNCATE; 4172 } 4173 4174 // Perform the larger operation, then convert back 4175 Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2), 4176 Node->getConstantOperandVal(3)); 4177 Chain = Tmp1.getValue(1); 4178 4179 Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1); 4180 4181 // Modified the chain result - switch anything that used the old chain to 4182 // use the new one. 4183 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2); 4184 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); 4185 if (UpdatedNodes) { 4186 UpdatedNodes->insert(Tmp2.getNode()); 4187 UpdatedNodes->insert(Chain.getNode()); 4188 } 4189 ReplacedNode(Node); 4190 break; 4191 } 4192 case ISD::AND: 4193 case ISD::OR: 4194 case ISD::XOR: { 4195 unsigned ExtOp, TruncOp; 4196 if (OVT.isVector()) { 4197 ExtOp = ISD::BITCAST; 4198 TruncOp = ISD::BITCAST; 4199 } else { 4200 assert(OVT.isInteger() && "Cannot promote logic operation"); 4201 ExtOp = ISD::ANY_EXTEND; 4202 TruncOp = ISD::TRUNCATE; 4203 } 4204 // Promote each of the values to the new type. 4205 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 4206 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 4207 // Perform the larger operation, then convert back 4208 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 4209 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 4210 break; 4211 } 4212 case ISD::SELECT: { 4213 unsigned ExtOp, TruncOp; 4214 if (Node->getValueType(0).isVector() || 4215 Node->getValueType(0).getSizeInBits() == NVT.getSizeInBits()) { 4216 ExtOp = ISD::BITCAST; 4217 TruncOp = ISD::BITCAST; 4218 } else if (Node->getValueType(0).isInteger()) { 4219 ExtOp = ISD::ANY_EXTEND; 4220 TruncOp = ISD::TRUNCATE; 4221 } else { 4222 ExtOp = ISD::FP_EXTEND; 4223 TruncOp = ISD::FP_ROUND; 4224 } 4225 Tmp1 = Node->getOperand(0); 4226 // Promote each of the values to the new type. 4227 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 4228 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 4229 // Perform the larger operation, then round down. 4230 Tmp1 = DAG.getSelect(dl, NVT, Tmp1, Tmp2, Tmp3); 4231 if (TruncOp != ISD::FP_ROUND) 4232 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 4233 else 4234 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 4235 DAG.getIntPtrConstant(0)); 4236 Results.push_back(Tmp1); 4237 break; 4238 } 4239 case ISD::VECTOR_SHUFFLE: { 4240 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 4241 4242 // Cast the two input vectors. 4243 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 4244 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 4245 4246 // Convert the shuffle mask to the right # elements. 4247 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 4248 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 4249 Results.push_back(Tmp1); 4250 break; 4251 } 4252 case ISD::SETCC: { 4253 unsigned ExtOp = ISD::FP_EXTEND; 4254 if (NVT.isInteger()) { 4255 ISD::CondCode CCCode = 4256 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 4257 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4258 } 4259 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 4260 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 4261 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 4262 Tmp1, Tmp2, Node->getOperand(2))); 4263 break; 4264 } 4265 case ISD::FDIV: 4266 case ISD::FREM: 4267 case ISD::FPOW: { 4268 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 4269 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1)); 4270 Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 4271 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 4272 Tmp3, DAG.getIntPtrConstant(0))); 4273 break; 4274 } 4275 case ISD::FLOG2: 4276 case ISD::FEXP2: 4277 case ISD::FLOG: 4278 case ISD::FEXP: { 4279 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 4280 Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 4281 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 4282 Tmp2, DAG.getIntPtrConstant(0))); 4283 break; 4284 } 4285 } 4286 4287 // Replace the original node with the legalized result. 4288 if (!Results.empty()) 4289 ReplaceNode(Node, Results.data()); 4290 } 4291 4292 // SelectionDAG::Legalize - This is the entry point for the file. 4293 // 4294 void SelectionDAG::Legalize() { 4295 AssignTopologicalOrder(); 4296 4297 SmallPtrSet<SDNode *, 16> LegalizedNodes; 4298 SelectionDAGLegalize Legalizer(*this, LegalizedNodes); 4299 4300 // Visit all the nodes. We start in topological order, so that we see 4301 // nodes with their original operands intact. Legalization can produce 4302 // new nodes which may themselves need to be legalized. Iterate until all 4303 // nodes have been legalized. 4304 for (;;) { 4305 bool AnyLegalized = false; 4306 for (auto NI = allnodes_end(); NI != allnodes_begin();) { 4307 --NI; 4308 4309 SDNode *N = NI; 4310 if (N->use_empty() && N != getRoot().getNode()) { 4311 ++NI; 4312 DeleteNode(N); 4313 continue; 4314 } 4315 4316 if (LegalizedNodes.insert(N)) { 4317 AnyLegalized = true; 4318 Legalizer.LegalizeOp(N); 4319 4320 if (N->use_empty() && N != getRoot().getNode()) { 4321 ++NI; 4322 DeleteNode(N); 4323 } 4324 } 4325 } 4326 if (!AnyLegalized) 4327 break; 4328 4329 } 4330 4331 // Remove dead nodes now. 4332 RemoveDeadNodes(); 4333 } 4334 4335 bool SelectionDAG::LegalizeOp(SDNode *N, 4336 SmallSetVector<SDNode *, 16> &UpdatedNodes) { 4337 SmallPtrSet<SDNode *, 16> LegalizedNodes; 4338 SelectionDAGLegalize Legalizer(*this, LegalizedNodes, &UpdatedNodes); 4339 4340 // Directly insert the node in question, and legalize it. This will recurse 4341 // as needed through operands. 4342 LegalizedNodes.insert(N); 4343 Legalizer.LegalizeOp(N); 4344 4345 return LegalizedNodes.count(N); 4346 } 4347