1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineConstantPool.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DebugInfo.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/ManagedStatic.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/Mutex.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetIntrinsicInfo.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include "llvm/Target/TargetRegisterInfo.h" 48 #include "llvm/Target/TargetSelectionDAGInfo.h" 49 #include <algorithm> 50 #include <cmath> 51 using namespace llvm; 52 53 /// makeVTList - Return an instance of the SDVTList struct initialized with the 54 /// specified members. 55 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 56 SDVTList Res = {VTs, NumVTs}; 57 return Res; 58 } 59 60 // Default null implementations of the callbacks. 61 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 62 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 63 64 //===----------------------------------------------------------------------===// 65 // ConstantFPSDNode Class 66 //===----------------------------------------------------------------------===// 67 68 /// isExactlyValue - We don't rely on operator== working on double values, as 69 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 70 /// As such, this method can be used to do an exact bit-for-bit comparison of 71 /// two floating point values. 72 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 73 return getValueAPF().bitwiseIsEqual(V); 74 } 75 76 bool ConstantFPSDNode::isValueValidForType(EVT VT, 77 const APFloat& Val) { 78 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 79 80 // convert modifies in place, so make a copy. 81 APFloat Val2 = APFloat(Val); 82 bool losesInfo; 83 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 84 APFloat::rmNearestTiesToEven, 85 &losesInfo); 86 return !losesInfo; 87 } 88 89 //===----------------------------------------------------------------------===// 90 // ISD Namespace 91 //===----------------------------------------------------------------------===// 92 93 /// isBuildVectorAllOnes - Return true if the specified node is a 94 /// BUILD_VECTOR where all of the elements are ~0 or undef. 95 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 96 // Look through a bit convert. 97 if (N->getOpcode() == ISD::BITCAST) 98 N = N->getOperand(0).getNode(); 99 100 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 101 102 unsigned i = 0, e = N->getNumOperands(); 103 104 // Skip over all of the undef values. 105 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 106 ++i; 107 108 // Do not accept an all-undef vector. 109 if (i == e) return false; 110 111 // Do not accept build_vectors that aren't all constants or which have non-~0 112 // elements. We have to be a bit careful here, as the type of the constant 113 // may not be the same as the type of the vector elements due to type 114 // legalization (the elements are promoted to a legal type for the target and 115 // a vector of a type may be legal when the base element type is not). 116 // We only want to check enough bits to cover the vector elements, because 117 // we care if the resultant vector is all ones, not whether the individual 118 // constants are. 119 SDValue NotZero = N->getOperand(i); 120 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 121 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 122 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 123 return false; 124 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 125 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 126 return false; 127 } else 128 return false; 129 130 // Okay, we have at least one ~0 value, check to see if the rest match or are 131 // undefs. Even with the above element type twiddling, this should be OK, as 132 // the same type legalization should have applied to all the elements. 133 for (++i; i != e; ++i) 134 if (N->getOperand(i) != NotZero && 135 N->getOperand(i).getOpcode() != ISD::UNDEF) 136 return false; 137 return true; 138 } 139 140 141 /// isBuildVectorAllZeros - Return true if the specified node is a 142 /// BUILD_VECTOR where all of the elements are 0 or undef. 143 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 144 // Look through a bit convert. 145 if (N->getOpcode() == ISD::BITCAST) 146 N = N->getOperand(0).getNode(); 147 148 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 149 150 unsigned i = 0, e = N->getNumOperands(); 151 152 // Skip over all of the undef values. 153 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 154 ++i; 155 156 // Do not accept an all-undef vector. 157 if (i == e) return false; 158 159 // Do not accept build_vectors that aren't all constants or which have non-0 160 // elements. 161 SDValue Zero = N->getOperand(i); 162 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) { 163 if (!CN->isNullValue()) 164 return false; 165 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) { 166 if (!CFPN->getValueAPF().isPosZero()) 167 return false; 168 } else 169 return false; 170 171 // Okay, we have at least one 0 value, check to see if the rest match or are 172 // undefs. 173 for (++i; i != e; ++i) 174 if (N->getOperand(i) != Zero && 175 N->getOperand(i).getOpcode() != ISD::UNDEF) 176 return false; 177 return true; 178 } 179 180 /// \brief Return true if the specified node is a BUILD_VECTOR node of 181 /// all ConstantSDNode or undef. 182 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 183 if (N->getOpcode() != ISD::BUILD_VECTOR) 184 return false; 185 186 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 187 SDValue Op = N->getOperand(i); 188 if (Op.getOpcode() == ISD::UNDEF) 189 continue; 190 if (!isa<ConstantSDNode>(Op)) 191 return false; 192 } 193 return true; 194 } 195 196 /// isScalarToVector - Return true if the specified node is a 197 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low 198 /// element is not an undef. 199 bool ISD::isScalarToVector(const SDNode *N) { 200 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) 201 return true; 202 203 if (N->getOpcode() != ISD::BUILD_VECTOR) 204 return false; 205 if (N->getOperand(0).getOpcode() == ISD::UNDEF) 206 return false; 207 unsigned NumElems = N->getNumOperands(); 208 if (NumElems == 1) 209 return false; 210 for (unsigned i = 1; i < NumElems; ++i) { 211 SDValue V = N->getOperand(i); 212 if (V.getOpcode() != ISD::UNDEF) 213 return false; 214 } 215 return true; 216 } 217 218 /// allOperandsUndef - Return true if the node has at least one operand 219 /// and all operands of the specified node are ISD::UNDEF. 220 bool ISD::allOperandsUndef(const SDNode *N) { 221 // Return false if the node has no operands. 222 // This is "logically inconsistent" with the definition of "all" but 223 // is probably the desired behavior. 224 if (N->getNumOperands() == 0) 225 return false; 226 227 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i) 228 if (N->getOperand(i).getOpcode() != ISD::UNDEF) 229 return false; 230 231 return true; 232 } 233 234 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) { 235 switch (ExtType) { 236 case ISD::EXTLOAD: 237 return ISD::ANY_EXTEND; 238 case ISD::SEXTLOAD: 239 return ISD::SIGN_EXTEND; 240 case ISD::ZEXTLOAD: 241 return ISD::ZERO_EXTEND; 242 default: 243 break; 244 } 245 246 llvm_unreachable("Invalid LoadExtType"); 247 } 248 249 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) 250 /// when given the operation for (X op Y). 251 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 252 // To perform this operation, we just need to swap the L and G bits of the 253 // operation. 254 unsigned OldL = (Operation >> 2) & 1; 255 unsigned OldG = (Operation >> 1) & 1; 256 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 257 (OldL << 1) | // New G bit 258 (OldG << 2)); // New L bit. 259 } 260 261 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where 262 /// 'op' is a valid SetCC operation. 263 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 264 unsigned Operation = Op; 265 if (isInteger) 266 Operation ^= 7; // Flip L, G, E bits, but not U. 267 else 268 Operation ^= 15; // Flip all of the condition bits. 269 270 if (Operation > ISD::SETTRUE2) 271 Operation &= ~8; // Don't let N and U bits get set. 272 273 return ISD::CondCode(Operation); 274 } 275 276 277 /// isSignedOp - For an integer comparison, return 1 if the comparison is a 278 /// signed operation and 2 if the result is an unsigned comparison. Return zero 279 /// if the operation does not depend on the sign of the input (setne and seteq). 280 static int isSignedOp(ISD::CondCode Opcode) { 281 switch (Opcode) { 282 default: llvm_unreachable("Illegal integer setcc operation!"); 283 case ISD::SETEQ: 284 case ISD::SETNE: return 0; 285 case ISD::SETLT: 286 case ISD::SETLE: 287 case ISD::SETGT: 288 case ISD::SETGE: return 1; 289 case ISD::SETULT: 290 case ISD::SETULE: 291 case ISD::SETUGT: 292 case ISD::SETUGE: return 2; 293 } 294 } 295 296 /// getSetCCOrOperation - Return the result of a logical OR between different 297 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function 298 /// returns SETCC_INVALID if it is not possible to represent the resultant 299 /// comparison. 300 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 301 bool isInteger) { 302 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 303 // Cannot fold a signed integer setcc with an unsigned integer setcc. 304 return ISD::SETCC_INVALID; 305 306 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 307 308 // If the N and U bits get set then the resultant comparison DOES suddenly 309 // care about orderedness, and is true when ordered. 310 if (Op > ISD::SETTRUE2) 311 Op &= ~16; // Clear the U bit if the N bit is set. 312 313 // Canonicalize illegal integer setcc's. 314 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 315 Op = ISD::SETNE; 316 317 return ISD::CondCode(Op); 318 } 319 320 /// getSetCCAndOperation - Return the result of a logical AND between different 321 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This 322 /// function returns zero if it is not possible to represent the resultant 323 /// comparison. 324 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 325 bool isInteger) { 326 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 327 // Cannot fold a signed setcc with an unsigned setcc. 328 return ISD::SETCC_INVALID; 329 330 // Combine all of the condition bits. 331 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 332 333 // Canonicalize illegal integer setcc's. 334 if (isInteger) { 335 switch (Result) { 336 default: break; 337 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 338 case ISD::SETOEQ: // SETEQ & SETU[LG]E 339 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 340 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 341 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 342 } 343 } 344 345 return Result; 346 } 347 348 //===----------------------------------------------------------------------===// 349 // SDNode Profile Support 350 //===----------------------------------------------------------------------===// 351 352 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 353 /// 354 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 355 ID.AddInteger(OpC); 356 } 357 358 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 359 /// solely with their pointer. 360 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 361 ID.AddPointer(VTList.VTs); 362 } 363 364 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 365 /// 366 static void AddNodeIDOperands(FoldingSetNodeID &ID, 367 const SDValue *Ops, unsigned NumOps) { 368 for (; NumOps; --NumOps, ++Ops) { 369 ID.AddPointer(Ops->getNode()); 370 ID.AddInteger(Ops->getResNo()); 371 } 372 } 373 374 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 375 /// 376 static void AddNodeIDOperands(FoldingSetNodeID &ID, 377 const SDUse *Ops, unsigned NumOps) { 378 for (; NumOps; --NumOps, ++Ops) { 379 ID.AddPointer(Ops->getNode()); 380 ID.AddInteger(Ops->getResNo()); 381 } 382 } 383 384 static void AddNodeIDNode(FoldingSetNodeID &ID, 385 unsigned short OpC, SDVTList VTList, 386 const SDValue *OpList, unsigned N) { 387 AddNodeIDOpcode(ID, OpC); 388 AddNodeIDValueTypes(ID, VTList); 389 AddNodeIDOperands(ID, OpList, N); 390 } 391 392 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to 393 /// the NodeID data. 394 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 395 switch (N->getOpcode()) { 396 case ISD::TargetExternalSymbol: 397 case ISD::ExternalSymbol: 398 llvm_unreachable("Should only be used on nodes with operands"); 399 default: break; // Normal nodes don't need extra info. 400 case ISD::TargetConstant: 401 case ISD::Constant: { 402 const ConstantSDNode *C = cast<ConstantSDNode>(N); 403 ID.AddPointer(C->getConstantIntValue()); 404 ID.AddBoolean(C->isOpaque()); 405 break; 406 } 407 case ISD::TargetConstantFP: 408 case ISD::ConstantFP: { 409 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 410 break; 411 } 412 case ISD::TargetGlobalAddress: 413 case ISD::GlobalAddress: 414 case ISD::TargetGlobalTLSAddress: 415 case ISD::GlobalTLSAddress: { 416 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 417 ID.AddPointer(GA->getGlobal()); 418 ID.AddInteger(GA->getOffset()); 419 ID.AddInteger(GA->getTargetFlags()); 420 ID.AddInteger(GA->getAddressSpace()); 421 break; 422 } 423 case ISD::BasicBlock: 424 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 425 break; 426 case ISD::Register: 427 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 428 break; 429 case ISD::RegisterMask: 430 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 431 break; 432 case ISD::SRCVALUE: 433 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 434 break; 435 case ISD::FrameIndex: 436 case ISD::TargetFrameIndex: 437 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 438 break; 439 case ISD::JumpTable: 440 case ISD::TargetJumpTable: 441 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 442 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 443 break; 444 case ISD::ConstantPool: 445 case ISD::TargetConstantPool: { 446 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 447 ID.AddInteger(CP->getAlignment()); 448 ID.AddInteger(CP->getOffset()); 449 if (CP->isMachineConstantPoolEntry()) 450 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 451 else 452 ID.AddPointer(CP->getConstVal()); 453 ID.AddInteger(CP->getTargetFlags()); 454 break; 455 } 456 case ISD::TargetIndex: { 457 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 458 ID.AddInteger(TI->getIndex()); 459 ID.AddInteger(TI->getOffset()); 460 ID.AddInteger(TI->getTargetFlags()); 461 break; 462 } 463 case ISD::LOAD: { 464 const LoadSDNode *LD = cast<LoadSDNode>(N); 465 ID.AddInteger(LD->getMemoryVT().getRawBits()); 466 ID.AddInteger(LD->getRawSubclassData()); 467 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 468 break; 469 } 470 case ISD::STORE: { 471 const StoreSDNode *ST = cast<StoreSDNode>(N); 472 ID.AddInteger(ST->getMemoryVT().getRawBits()); 473 ID.AddInteger(ST->getRawSubclassData()); 474 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 475 break; 476 } 477 case ISD::ATOMIC_CMP_SWAP: 478 case ISD::ATOMIC_SWAP: 479 case ISD::ATOMIC_LOAD_ADD: 480 case ISD::ATOMIC_LOAD_SUB: 481 case ISD::ATOMIC_LOAD_AND: 482 case ISD::ATOMIC_LOAD_OR: 483 case ISD::ATOMIC_LOAD_XOR: 484 case ISD::ATOMIC_LOAD_NAND: 485 case ISD::ATOMIC_LOAD_MIN: 486 case ISD::ATOMIC_LOAD_MAX: 487 case ISD::ATOMIC_LOAD_UMIN: 488 case ISD::ATOMIC_LOAD_UMAX: 489 case ISD::ATOMIC_LOAD: 490 case ISD::ATOMIC_STORE: { 491 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 492 ID.AddInteger(AT->getMemoryVT().getRawBits()); 493 ID.AddInteger(AT->getRawSubclassData()); 494 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 495 break; 496 } 497 case ISD::PREFETCH: { 498 const MemSDNode *PF = cast<MemSDNode>(N); 499 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 500 break; 501 } 502 case ISD::VECTOR_SHUFFLE: { 503 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 504 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 505 i != e; ++i) 506 ID.AddInteger(SVN->getMaskElt(i)); 507 break; 508 } 509 case ISD::TargetBlockAddress: 510 case ISD::BlockAddress: { 511 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 512 ID.AddPointer(BA->getBlockAddress()); 513 ID.AddInteger(BA->getOffset()); 514 ID.AddInteger(BA->getTargetFlags()); 515 break; 516 } 517 } // end switch (N->getOpcode()) 518 519 // Target specific memory nodes could also have address spaces to check. 520 if (N->isTargetMemoryOpcode()) 521 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 522 } 523 524 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 525 /// data. 526 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 527 AddNodeIDOpcode(ID, N->getOpcode()); 528 // Add the return value info. 529 AddNodeIDValueTypes(ID, N->getVTList()); 530 // Add the operand info. 531 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands()); 532 533 // Handle SDNode leafs with special info. 534 AddNodeIDCustom(ID, N); 535 } 536 537 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in 538 /// the CSE map that carries volatility, temporalness, indexing mode, and 539 /// extension/truncation information. 540 /// 541 static inline unsigned 542 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile, 543 bool isNonTemporal, bool isInvariant) { 544 assert((ConvType & 3) == ConvType && 545 "ConvType may not require more than 2 bits!"); 546 assert((AM & 7) == AM && 547 "AM may not require more than 3 bits!"); 548 return ConvType | 549 (AM << 2) | 550 (isVolatile << 5) | 551 (isNonTemporal << 6) | 552 (isInvariant << 7); 553 } 554 555 //===----------------------------------------------------------------------===// 556 // SelectionDAG Class 557 //===----------------------------------------------------------------------===// 558 559 /// doNotCSE - Return true if CSE should not be performed for this node. 560 static bool doNotCSE(SDNode *N) { 561 if (N->getValueType(0) == MVT::Glue) 562 return true; // Never CSE anything that produces a flag. 563 564 switch (N->getOpcode()) { 565 default: break; 566 case ISD::HANDLENODE: 567 case ISD::EH_LABEL: 568 return true; // Never CSE these nodes. 569 } 570 571 // Check that remaining values produced are not flags. 572 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 573 if (N->getValueType(i) == MVT::Glue) 574 return true; // Never CSE anything that produces a flag. 575 576 return false; 577 } 578 579 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 580 /// SelectionDAG. 581 void SelectionDAG::RemoveDeadNodes() { 582 // Create a dummy node (which is not added to allnodes), that adds a reference 583 // to the root node, preventing it from being deleted. 584 HandleSDNode Dummy(getRoot()); 585 586 SmallVector<SDNode*, 128> DeadNodes; 587 588 // Add all obviously-dead nodes to the DeadNodes worklist. 589 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I) 590 if (I->use_empty()) 591 DeadNodes.push_back(I); 592 593 RemoveDeadNodes(DeadNodes); 594 595 // If the root changed (e.g. it was a dead load, update the root). 596 setRoot(Dummy.getValue()); 597 } 598 599 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 600 /// given list, and any nodes that become unreachable as a result. 601 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 602 603 // Process the worklist, deleting the nodes and adding their uses to the 604 // worklist. 605 while (!DeadNodes.empty()) { 606 SDNode *N = DeadNodes.pop_back_val(); 607 608 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 609 DUL->NodeDeleted(N, 0); 610 611 // Take the node out of the appropriate CSE map. 612 RemoveNodeFromCSEMaps(N); 613 614 // Next, brutally remove the operand list. This is safe to do, as there are 615 // no cycles in the graph. 616 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 617 SDUse &Use = *I++; 618 SDNode *Operand = Use.getNode(); 619 Use.set(SDValue()); 620 621 // Now that we removed this operand, see if there are no uses of it left. 622 if (Operand->use_empty()) 623 DeadNodes.push_back(Operand); 624 } 625 626 DeallocateNode(N); 627 } 628 } 629 630 void SelectionDAG::RemoveDeadNode(SDNode *N){ 631 SmallVector<SDNode*, 16> DeadNodes(1, N); 632 633 // Create a dummy node that adds a reference to the root node, preventing 634 // it from being deleted. (This matters if the root is an operand of the 635 // dead node.) 636 HandleSDNode Dummy(getRoot()); 637 638 RemoveDeadNodes(DeadNodes); 639 } 640 641 void SelectionDAG::DeleteNode(SDNode *N) { 642 // First take this out of the appropriate CSE map. 643 RemoveNodeFromCSEMaps(N); 644 645 // Finally, remove uses due to operands of this node, remove from the 646 // AllNodes list, and delete the node. 647 DeleteNodeNotInCSEMaps(N); 648 } 649 650 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 651 assert(N != AllNodes.begin() && "Cannot delete the entry node!"); 652 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 653 654 // Drop all of the operands and decrement used node's use counts. 655 N->DropOperands(); 656 657 DeallocateNode(N); 658 } 659 660 void SelectionDAG::DeallocateNode(SDNode *N) { 661 if (N->OperandsNeedDelete) 662 delete[] N->OperandList; 663 664 // Set the opcode to DELETED_NODE to help catch bugs when node 665 // memory is reallocated. 666 N->NodeType = ISD::DELETED_NODE; 667 668 NodeAllocator.Deallocate(AllNodes.remove(N)); 669 670 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them. 671 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N); 672 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i) 673 DbgVals[i]->setIsInvalidated(); 674 } 675 676 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 677 /// correspond to it. This is useful when we're about to delete or repurpose 678 /// the node. We don't want future request for structurally identical nodes 679 /// to return N anymore. 680 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 681 bool Erased = false; 682 switch (N->getOpcode()) { 683 case ISD::HANDLENODE: return false; // noop. 684 case ISD::CONDCODE: 685 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 686 "Cond code doesn't exist!"); 687 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0; 688 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0; 689 break; 690 case ISD::ExternalSymbol: 691 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 692 break; 693 case ISD::TargetExternalSymbol: { 694 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 695 Erased = TargetExternalSymbols.erase( 696 std::pair<std::string,unsigned char>(ESN->getSymbol(), 697 ESN->getTargetFlags())); 698 break; 699 } 700 case ISD::VALUETYPE: { 701 EVT VT = cast<VTSDNode>(N)->getVT(); 702 if (VT.isExtended()) { 703 Erased = ExtendedValueTypeNodes.erase(VT); 704 } else { 705 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0; 706 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0; 707 } 708 break; 709 } 710 default: 711 // Remove it from the CSE Map. 712 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 713 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 714 Erased = CSEMap.RemoveNode(N); 715 break; 716 } 717 #ifndef NDEBUG 718 // Verify that the node was actually in one of the CSE maps, unless it has a 719 // flag result (which cannot be CSE'd) or is one of the special cases that are 720 // not subject to CSE. 721 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 722 !N->isMachineOpcode() && !doNotCSE(N)) { 723 N->dump(this); 724 dbgs() << "\n"; 725 llvm_unreachable("Node is not in map!"); 726 } 727 #endif 728 return Erased; 729 } 730 731 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 732 /// maps and modified in place. Add it back to the CSE maps, unless an identical 733 /// node already exists, in which case transfer all its users to the existing 734 /// node. This transfer can potentially trigger recursive merging. 735 /// 736 void 737 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 738 // For node types that aren't CSE'd, just act as if no identical node 739 // already exists. 740 if (!doNotCSE(N)) { 741 SDNode *Existing = CSEMap.GetOrInsertNode(N); 742 if (Existing != N) { 743 // If there was already an existing matching node, use ReplaceAllUsesWith 744 // to replace the dead one with the existing one. This can cause 745 // recursive merging of other unrelated nodes down the line. 746 ReplaceAllUsesWith(N, Existing); 747 748 // N is now dead. Inform the listeners and delete it. 749 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 750 DUL->NodeDeleted(N, Existing); 751 DeleteNodeNotInCSEMaps(N); 752 return; 753 } 754 } 755 756 // If the node doesn't already exist, we updated it. Inform listeners. 757 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 758 DUL->NodeUpdated(N); 759 } 760 761 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 762 /// were replaced with those specified. If this node is never memoized, 763 /// return null, otherwise return a pointer to the slot it would take. If a 764 /// node already exists with these operands, the slot will be non-null. 765 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 766 void *&InsertPos) { 767 if (doNotCSE(N)) 768 return 0; 769 770 SDValue Ops[] = { Op }; 771 FoldingSetNodeID ID; 772 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1); 773 AddNodeIDCustom(ID, N); 774 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 775 return Node; 776 } 777 778 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 779 /// were replaced with those specified. If this node is never memoized, 780 /// return null, otherwise return a pointer to the slot it would take. If a 781 /// node already exists with these operands, the slot will be non-null. 782 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 783 SDValue Op1, SDValue Op2, 784 void *&InsertPos) { 785 if (doNotCSE(N)) 786 return 0; 787 788 SDValue Ops[] = { Op1, Op2 }; 789 FoldingSetNodeID ID; 790 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2); 791 AddNodeIDCustom(ID, N); 792 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 793 return Node; 794 } 795 796 797 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 798 /// were replaced with those specified. If this node is never memoized, 799 /// return null, otherwise return a pointer to the slot it would take. If a 800 /// node already exists with these operands, the slot will be non-null. 801 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 802 const SDValue *Ops,unsigned NumOps, 803 void *&InsertPos) { 804 if (doNotCSE(N)) 805 return 0; 806 807 FoldingSetNodeID ID; 808 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps); 809 AddNodeIDCustom(ID, N); 810 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 811 return Node; 812 } 813 814 #ifndef NDEBUG 815 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid. 816 static void VerifyNodeCommon(SDNode *N) { 817 switch (N->getOpcode()) { 818 default: 819 break; 820 case ISD::BUILD_PAIR: { 821 EVT VT = N->getValueType(0); 822 assert(N->getNumValues() == 1 && "Too many results!"); 823 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 824 "Wrong return type!"); 825 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 826 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 827 "Mismatched operand types!"); 828 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 829 "Wrong operand type!"); 830 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 831 "Wrong return type size"); 832 break; 833 } 834 case ISD::BUILD_VECTOR: { 835 assert(N->getNumValues() == 1 && "Too many results!"); 836 assert(N->getValueType(0).isVector() && "Wrong return type!"); 837 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 838 "Wrong number of operands!"); 839 EVT EltVT = N->getValueType(0).getVectorElementType(); 840 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 841 assert((I->getValueType() == EltVT || 842 (EltVT.isInteger() && I->getValueType().isInteger() && 843 EltVT.bitsLE(I->getValueType()))) && 844 "Wrong operand type!"); 845 assert(I->getValueType() == N->getOperand(0).getValueType() && 846 "Operands must all have the same type"); 847 } 848 break; 849 } 850 } 851 } 852 853 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 854 static void VerifySDNode(SDNode *N) { 855 // The SDNode allocators cannot be used to allocate nodes with fields that are 856 // not present in an SDNode! 857 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!"); 858 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!"); 859 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!"); 860 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!"); 861 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!"); 862 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!"); 863 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!"); 864 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!"); 865 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!"); 866 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!"); 867 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!"); 868 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!"); 869 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!"); 870 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!"); 871 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!"); 872 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!"); 873 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!"); 874 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!"); 875 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!"); 876 877 VerifyNodeCommon(N); 878 } 879 880 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is 881 /// invalid. 882 static void VerifyMachineNode(SDNode *N) { 883 // The MachineNode allocators cannot be used to allocate nodes with fields 884 // that are not present in a MachineNode! 885 // Currently there are no such nodes. 886 887 VerifyNodeCommon(N); 888 } 889 #endif // NDEBUG 890 891 /// getEVTAlignment - Compute the default alignment value for the 892 /// given type. 893 /// 894 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 895 Type *Ty = VT == MVT::iPTR ? 896 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 897 VT.getTypeForEVT(*getContext()); 898 899 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty); 900 } 901 902 // EntryNode could meaningfully have debug info if we can find it... 903 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 904 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(0), OptLevel(OL), 905 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 906 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false), 907 UpdateListeners(0) { 908 AllNodes.push_back(&EntryNode); 909 DbgInfo = new SDDbgInfo(); 910 } 911 912 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) { 913 MF = &mf; 914 TLI = tli; 915 Context = &mf.getFunction()->getContext(); 916 } 917 918 SelectionDAG::~SelectionDAG() { 919 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 920 allnodes_clear(); 921 delete DbgInfo; 922 } 923 924 void SelectionDAG::allnodes_clear() { 925 assert(&*AllNodes.begin() == &EntryNode); 926 AllNodes.remove(AllNodes.begin()); 927 while (!AllNodes.empty()) 928 DeallocateNode(AllNodes.begin()); 929 } 930 931 void SelectionDAG::clear() { 932 allnodes_clear(); 933 OperandAllocator.Reset(); 934 CSEMap.clear(); 935 936 ExtendedValueTypeNodes.clear(); 937 ExternalSymbols.clear(); 938 TargetExternalSymbols.clear(); 939 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 940 static_cast<CondCodeSDNode*>(0)); 941 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 942 static_cast<SDNode*>(0)); 943 944 EntryNode.UseList = 0; 945 AllNodes.push_back(&EntryNode); 946 Root = getEntryNode(); 947 DbgInfo->clear(); 948 } 949 950 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 951 return VT.bitsGT(Op.getValueType()) ? 952 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 953 getNode(ISD::TRUNCATE, DL, VT, Op); 954 } 955 956 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 957 return VT.bitsGT(Op.getValueType()) ? 958 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 959 getNode(ISD::TRUNCATE, DL, VT, Op); 960 } 961 962 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 963 return VT.bitsGT(Op.getValueType()) ? 964 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 965 getNode(ISD::TRUNCATE, DL, VT, Op); 966 } 967 968 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) { 969 assert(!VT.isVector() && 970 "getZeroExtendInReg should use the vector element type instead of " 971 "the vector type!"); 972 if (Op.getValueType() == VT) return Op; 973 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 974 APInt Imm = APInt::getLowBitsSet(BitWidth, 975 VT.getSizeInBits()); 976 return getNode(ISD::AND, DL, Op.getValueType(), Op, 977 getConstant(Imm, Op.getValueType())); 978 } 979 980 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 981 /// 982 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) { 983 EVT EltVT = VT.getScalarType(); 984 SDValue NegOne = 985 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 986 return getNode(ISD::XOR, DL, VT, Val, NegOne); 987 } 988 989 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) { 990 EVT EltVT = VT.getScalarType(); 991 assert((EltVT.getSizeInBits() >= 64 || 992 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 993 "getConstant with a uint64_t value that doesn't fit in the type!"); 994 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO); 995 } 996 997 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO) 998 { 999 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO); 1000 } 1001 1002 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT, 1003 bool isO) { 1004 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1005 1006 EVT EltVT = VT.getScalarType(); 1007 const ConstantInt *Elt = &Val; 1008 1009 const TargetLowering *TLI = TM.getTargetLowering(); 1010 1011 // In some cases the vector type is legal but the element type is illegal and 1012 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1013 // inserted value (the type does not need to match the vector element type). 1014 // Any extra bits introduced will be truncated away. 1015 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1016 TargetLowering::TypePromoteInteger) { 1017 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1018 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); 1019 Elt = ConstantInt::get(*getContext(), NewVal); 1020 } 1021 // In other cases the element type is illegal and needs to be expanded, for 1022 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1023 // the value into n parts and use a vector type with n-times the elements. 1024 // Then bitcast to the type requested. 1025 // Legalizing constants too early makes the DAGCombiner's job harder so we 1026 // only legalize if the DAG tells us we must produce legal types. 1027 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1028 TLI->getTypeAction(*getContext(), EltVT) == 1029 TargetLowering::TypeExpandInteger) { 1030 APInt NewVal = Elt->getValue(); 1031 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1032 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1033 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1034 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1035 1036 // Check the temporary vector is the correct size. If this fails then 1037 // getTypeToTransformTo() probably returned a type whose size (in bits) 1038 // isn't a power-of-2 factor of the requested type size. 1039 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1040 1041 SmallVector<SDValue, 2> EltParts; 1042 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1043 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1044 .trunc(ViaEltSizeInBits), 1045 ViaEltVT, isT, isO)); 1046 } 1047 1048 // EltParts is currently in little endian order. If we actually want 1049 // big-endian order then reverse it now. 1050 if (TLI->isBigEndian()) 1051 std::reverse(EltParts.begin(), EltParts.end()); 1052 1053 // The elements must be reversed when the element order is different 1054 // to the endianness of the elements (because the BITCAST is itself a 1055 // vector shuffle in this situation). However, we do not need any code to 1056 // perform this reversal because getConstant() is producing a vector 1057 // splat. 1058 // This situation occurs in MIPS MSA. 1059 1060 SmallVector<SDValue, 8> Ops; 1061 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) 1062 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1063 1064 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT, 1065 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT, 1066 &Ops[0], Ops.size())); 1067 return Result; 1068 } 1069 1070 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1071 "APInt size does not match type size!"); 1072 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1073 FoldingSetNodeID ID; 1074 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 1075 ID.AddPointer(Elt); 1076 ID.AddBoolean(isO); 1077 void *IP = 0; 1078 SDNode *N = NULL; 1079 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1080 if (!VT.isVector()) 1081 return SDValue(N, 0); 1082 1083 if (!N) { 1084 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT); 1085 CSEMap.InsertNode(N, IP); 1086 AllNodes.push_back(N); 1087 } 1088 1089 SDValue Result(N, 0); 1090 if (VT.isVector()) { 1091 SmallVector<SDValue, 8> Ops; 1092 Ops.assign(VT.getVectorNumElements(), Result); 1093 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size()); 1094 } 1095 return Result; 1096 } 1097 1098 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { 1099 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget); 1100 } 1101 1102 1103 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) { 1104 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget); 1105 } 1106 1107 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){ 1108 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1109 1110 EVT EltVT = VT.getScalarType(); 1111 1112 // Do the map lookup using the actual bit pattern for the floating point 1113 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1114 // we don't have issues with SNANs. 1115 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1116 FoldingSetNodeID ID; 1117 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 1118 ID.AddPointer(&V); 1119 void *IP = 0; 1120 SDNode *N = NULL; 1121 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1122 if (!VT.isVector()) 1123 return SDValue(N, 0); 1124 1125 if (!N) { 1126 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT); 1127 CSEMap.InsertNode(N, IP); 1128 AllNodes.push_back(N); 1129 } 1130 1131 SDValue Result(N, 0); 1132 if (VT.isVector()) { 1133 SmallVector<SDValue, 8> Ops; 1134 Ops.assign(VT.getVectorNumElements(), Result); 1135 // FIXME SDLoc info might be appropriate here 1136 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size()); 1137 } 1138 return Result; 1139 } 1140 1141 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) { 1142 EVT EltVT = VT.getScalarType(); 1143 if (EltVT==MVT::f32) 1144 return getConstantFP(APFloat((float)Val), VT, isTarget); 1145 else if (EltVT==MVT::f64) 1146 return getConstantFP(APFloat(Val), VT, isTarget); 1147 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 || 1148 EltVT==MVT::f16) { 1149 bool ignored; 1150 APFloat apf = APFloat(Val); 1151 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1152 &ignored); 1153 return getConstantFP(apf, VT, isTarget); 1154 } else 1155 llvm_unreachable("Unsupported type in getConstantFP"); 1156 } 1157 1158 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL, 1159 EVT VT, int64_t Offset, 1160 bool isTargetGA, 1161 unsigned char TargetFlags) { 1162 assert((TargetFlags == 0 || isTargetGA) && 1163 "Cannot set target flags on target-independent globals"); 1164 const TargetLowering *TLI = TM.getTargetLowering(); 1165 1166 // Truncate (with sign-extension) the offset value to the pointer size. 1167 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType()); 1168 if (BitWidth < 64) 1169 Offset = SignExtend64(Offset, BitWidth); 1170 1171 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1172 if (!GVar) { 1173 // If GV is an alias then use the aliasee for determining thread-localness. 1174 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 1175 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)); 1176 } 1177 1178 unsigned Opc; 1179 if (GVar && GVar->isThreadLocal()) 1180 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1181 else 1182 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1183 1184 FoldingSetNodeID ID; 1185 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1186 ID.AddPointer(GV); 1187 ID.AddInteger(Offset); 1188 ID.AddInteger(TargetFlags); 1189 ID.AddInteger(GV->getType()->getAddressSpace()); 1190 void *IP = 0; 1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1192 return SDValue(E, 0); 1193 1194 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(), 1195 DL.getDebugLoc(), GV, VT, 1196 Offset, TargetFlags); 1197 CSEMap.InsertNode(N, IP); 1198 AllNodes.push_back(N); 1199 return SDValue(N, 0); 1200 } 1201 1202 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1203 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1204 FoldingSetNodeID ID; 1205 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1206 ID.AddInteger(FI); 1207 void *IP = 0; 1208 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1209 return SDValue(E, 0); 1210 1211 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget); 1212 CSEMap.InsertNode(N, IP); 1213 AllNodes.push_back(N); 1214 return SDValue(N, 0); 1215 } 1216 1217 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1218 unsigned char TargetFlags) { 1219 assert((TargetFlags == 0 || isTarget) && 1220 "Cannot set target flags on target-independent jump tables"); 1221 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1222 FoldingSetNodeID ID; 1223 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1224 ID.AddInteger(JTI); 1225 ID.AddInteger(TargetFlags); 1226 void *IP = 0; 1227 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1228 return SDValue(E, 0); 1229 1230 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget, 1231 TargetFlags); 1232 CSEMap.InsertNode(N, IP); 1233 AllNodes.push_back(N); 1234 return SDValue(N, 0); 1235 } 1236 1237 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1238 unsigned Alignment, int Offset, 1239 bool isTarget, 1240 unsigned char TargetFlags) { 1241 assert((TargetFlags == 0 || isTarget) && 1242 "Cannot set target flags on target-independent globals"); 1243 if (Alignment == 0) 1244 Alignment = 1245 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType()); 1246 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1247 FoldingSetNodeID ID; 1248 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1249 ID.AddInteger(Alignment); 1250 ID.AddInteger(Offset); 1251 ID.AddPointer(C); 1252 ID.AddInteger(TargetFlags); 1253 void *IP = 0; 1254 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1255 return SDValue(E, 0); 1256 1257 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1258 Alignment, TargetFlags); 1259 CSEMap.InsertNode(N, IP); 1260 AllNodes.push_back(N); 1261 return SDValue(N, 0); 1262 } 1263 1264 1265 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1266 unsigned Alignment, int Offset, 1267 bool isTarget, 1268 unsigned char TargetFlags) { 1269 assert((TargetFlags == 0 || isTarget) && 1270 "Cannot set target flags on target-independent globals"); 1271 if (Alignment == 0) 1272 Alignment = 1273 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType()); 1274 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1275 FoldingSetNodeID ID; 1276 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1277 ID.AddInteger(Alignment); 1278 ID.AddInteger(Offset); 1279 C->addSelectionDAGCSEId(ID); 1280 ID.AddInteger(TargetFlags); 1281 void *IP = 0; 1282 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1283 return SDValue(E, 0); 1284 1285 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1286 Alignment, TargetFlags); 1287 CSEMap.InsertNode(N, IP); 1288 AllNodes.push_back(N); 1289 return SDValue(N, 0); 1290 } 1291 1292 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1293 unsigned char TargetFlags) { 1294 FoldingSetNodeID ID; 1295 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0); 1296 ID.AddInteger(Index); 1297 ID.AddInteger(Offset); 1298 ID.AddInteger(TargetFlags); 1299 void *IP = 0; 1300 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1301 return SDValue(E, 0); 1302 1303 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset, 1304 TargetFlags); 1305 CSEMap.InsertNode(N, IP); 1306 AllNodes.push_back(N); 1307 return SDValue(N, 0); 1308 } 1309 1310 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1311 FoldingSetNodeID ID; 1312 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0); 1313 ID.AddPointer(MBB); 1314 void *IP = 0; 1315 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1316 return SDValue(E, 0); 1317 1318 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB); 1319 CSEMap.InsertNode(N, IP); 1320 AllNodes.push_back(N); 1321 return SDValue(N, 0); 1322 } 1323 1324 SDValue SelectionDAG::getValueType(EVT VT) { 1325 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1326 ValueTypeNodes.size()) 1327 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1328 1329 SDNode *&N = VT.isExtended() ? 1330 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1331 1332 if (N) return SDValue(N, 0); 1333 N = new (NodeAllocator) VTSDNode(VT); 1334 AllNodes.push_back(N); 1335 return SDValue(N, 0); 1336 } 1337 1338 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1339 SDNode *&N = ExternalSymbols[Sym]; 1340 if (N) return SDValue(N, 0); 1341 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT); 1342 AllNodes.push_back(N); 1343 return SDValue(N, 0); 1344 } 1345 1346 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1347 unsigned char TargetFlags) { 1348 SDNode *&N = 1349 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1350 TargetFlags)]; 1351 if (N) return SDValue(N, 0); 1352 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); 1353 AllNodes.push_back(N); 1354 return SDValue(N, 0); 1355 } 1356 1357 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1358 if ((unsigned)Cond >= CondCodeNodes.size()) 1359 CondCodeNodes.resize(Cond+1); 1360 1361 if (CondCodeNodes[Cond] == 0) { 1362 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond); 1363 CondCodeNodes[Cond] = N; 1364 AllNodes.push_back(N); 1365 } 1366 1367 return SDValue(CondCodeNodes[Cond], 0); 1368 } 1369 1370 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in 1371 // the shuffle mask M that point at N1 to point at N2, and indices that point 1372 // N2 to point at N1. 1373 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) { 1374 std::swap(N1, N2); 1375 int NElts = M.size(); 1376 for (int i = 0; i != NElts; ++i) { 1377 if (M[i] >= NElts) 1378 M[i] -= NElts; 1379 else if (M[i] >= 0) 1380 M[i] += NElts; 1381 } 1382 } 1383 1384 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, 1385 SDValue N2, const int *Mask) { 1386 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1387 "Invalid VECTOR_SHUFFLE"); 1388 1389 // Canonicalize shuffle undef, undef -> undef 1390 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) 1391 return getUNDEF(VT); 1392 1393 // Validate that all indices in Mask are within the range of the elements 1394 // input to the shuffle. 1395 unsigned NElts = VT.getVectorNumElements(); 1396 SmallVector<int, 8> MaskVec; 1397 for (unsigned i = 0; i != NElts; ++i) { 1398 assert(Mask[i] < (int)(NElts * 2) && "Index out of range"); 1399 MaskVec.push_back(Mask[i]); 1400 } 1401 1402 // Canonicalize shuffle v, v -> v, undef 1403 if (N1 == N2) { 1404 N2 = getUNDEF(VT); 1405 for (unsigned i = 0; i != NElts; ++i) 1406 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts; 1407 } 1408 1409 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1410 if (N1.getOpcode() == ISD::UNDEF) 1411 commuteShuffle(N1, N2, MaskVec); 1412 1413 // Canonicalize all index into lhs, -> shuffle lhs, undef 1414 // Canonicalize all index into rhs, -> shuffle rhs, undef 1415 bool AllLHS = true, AllRHS = true; 1416 bool N2Undef = N2.getOpcode() == ISD::UNDEF; 1417 for (unsigned i = 0; i != NElts; ++i) { 1418 if (MaskVec[i] >= (int)NElts) { 1419 if (N2Undef) 1420 MaskVec[i] = -1; 1421 else 1422 AllLHS = false; 1423 } else if (MaskVec[i] >= 0) { 1424 AllRHS = false; 1425 } 1426 } 1427 if (AllLHS && AllRHS) 1428 return getUNDEF(VT); 1429 if (AllLHS && !N2Undef) 1430 N2 = getUNDEF(VT); 1431 if (AllRHS) { 1432 N1 = getUNDEF(VT); 1433 commuteShuffle(N1, N2, MaskVec); 1434 } 1435 1436 // If Identity shuffle return that node. 1437 bool Identity = true; 1438 for (unsigned i = 0; i != NElts; ++i) { 1439 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false; 1440 } 1441 if (Identity && NElts) 1442 return N1; 1443 1444 FoldingSetNodeID ID; 1445 SDValue Ops[2] = { N1, N2 }; 1446 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2); 1447 for (unsigned i = 0; i != NElts; ++i) 1448 ID.AddInteger(MaskVec[i]); 1449 1450 void* IP = 0; 1451 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1452 return SDValue(E, 0); 1453 1454 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1455 // SDNode doesn't have access to it. This memory will be "leaked" when 1456 // the node is deallocated, but recovered when the NodeAllocator is released. 1457 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1458 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int)); 1459 1460 ShuffleVectorSDNode *N = 1461 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(), 1462 dl.getDebugLoc(), N1, N2, 1463 MaskAlloc); 1464 CSEMap.InsertNode(N, IP); 1465 AllNodes.push_back(N); 1466 return SDValue(N, 0); 1467 } 1468 1469 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl, 1470 SDValue Val, SDValue DTy, 1471 SDValue STy, SDValue Rnd, SDValue Sat, 1472 ISD::CvtCode Code) { 1473 // If the src and dest types are the same and the conversion is between 1474 // integer types of the same sign or two floats, no conversion is necessary. 1475 if (DTy == STy && 1476 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF)) 1477 return Val; 1478 1479 FoldingSetNodeID ID; 1480 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; 1481 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5); 1482 void* IP = 0; 1483 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1484 return SDValue(E, 0); 1485 1486 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(), 1487 dl.getDebugLoc(), 1488 Ops, 5, Code); 1489 CSEMap.InsertNode(N, IP); 1490 AllNodes.push_back(N); 1491 return SDValue(N, 0); 1492 } 1493 1494 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1495 FoldingSetNodeID ID; 1496 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0); 1497 ID.AddInteger(RegNo); 1498 void *IP = 0; 1499 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1500 return SDValue(E, 0); 1501 1502 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT); 1503 CSEMap.InsertNode(N, IP); 1504 AllNodes.push_back(N); 1505 return SDValue(N, 0); 1506 } 1507 1508 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1509 FoldingSetNodeID ID; 1510 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0); 1511 ID.AddPointer(RegMask); 1512 void *IP = 0; 1513 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1514 return SDValue(E, 0); 1515 1516 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask); 1517 CSEMap.InsertNode(N, IP); 1518 AllNodes.push_back(N); 1519 return SDValue(N, 0); 1520 } 1521 1522 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) { 1523 FoldingSetNodeID ID; 1524 SDValue Ops[] = { Root }; 1525 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1); 1526 ID.AddPointer(Label); 1527 void *IP = 0; 1528 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1529 return SDValue(E, 0); 1530 1531 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(), 1532 dl.getDebugLoc(), Root, Label); 1533 CSEMap.InsertNode(N, IP); 1534 AllNodes.push_back(N); 1535 return SDValue(N, 0); 1536 } 1537 1538 1539 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1540 int64_t Offset, 1541 bool isTarget, 1542 unsigned char TargetFlags) { 1543 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1544 1545 FoldingSetNodeID ID; 1546 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1547 ID.AddPointer(BA); 1548 ID.AddInteger(Offset); 1549 ID.AddInteger(TargetFlags); 1550 void *IP = 0; 1551 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1552 return SDValue(E, 0); 1553 1554 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset, 1555 TargetFlags); 1556 CSEMap.InsertNode(N, IP); 1557 AllNodes.push_back(N); 1558 return SDValue(N, 0); 1559 } 1560 1561 SDValue SelectionDAG::getSrcValue(const Value *V) { 1562 assert((!V || V->getType()->isPointerTy()) && 1563 "SrcValue is not a pointer?"); 1564 1565 FoldingSetNodeID ID; 1566 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0); 1567 ID.AddPointer(V); 1568 1569 void *IP = 0; 1570 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1571 return SDValue(E, 0); 1572 1573 SDNode *N = new (NodeAllocator) SrcValueSDNode(V); 1574 CSEMap.InsertNode(N, IP); 1575 AllNodes.push_back(N); 1576 return SDValue(N, 0); 1577 } 1578 1579 /// getMDNode - Return an MDNodeSDNode which holds an MDNode. 1580 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1581 FoldingSetNodeID ID; 1582 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0); 1583 ID.AddPointer(MD); 1584 1585 void *IP = 0; 1586 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1587 return SDValue(E, 0); 1588 1589 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); 1590 CSEMap.InsertNode(N, IP); 1591 AllNodes.push_back(N); 1592 return SDValue(N, 0); 1593 } 1594 1595 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode. 1596 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr, 1597 unsigned SrcAS, unsigned DestAS) { 1598 SDValue Ops[] = {Ptr}; 1599 FoldingSetNodeID ID; 1600 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1); 1601 ID.AddInteger(SrcAS); 1602 ID.AddInteger(DestAS); 1603 1604 void *IP = 0; 1605 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1606 return SDValue(E, 0); 1607 1608 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(), 1609 dl.getDebugLoc(), 1610 VT, Ptr, SrcAS, DestAS); 1611 CSEMap.InsertNode(N, IP); 1612 AllNodes.push_back(N); 1613 return SDValue(N, 0); 1614 } 1615 1616 /// getShiftAmountOperand - Return the specified value casted to 1617 /// the target's desired shift amount type. 1618 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1619 EVT OpTy = Op.getValueType(); 1620 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy); 1621 if (OpTy == ShTy || OpTy.isVector()) return Op; 1622 1623 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 1624 return getNode(Opcode, SDLoc(Op), ShTy, Op); 1625 } 1626 1627 /// CreateStackTemporary - Create a stack temporary, suitable for holding the 1628 /// specified value type. 1629 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1630 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1631 unsigned ByteSize = VT.getStoreSize(); 1632 Type *Ty = VT.getTypeForEVT(*getContext()); 1633 const TargetLowering *TLI = TM.getTargetLowering(); 1634 unsigned StackAlign = 1635 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign); 1636 1637 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 1638 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1639 } 1640 1641 /// CreateStackTemporary - Create a stack temporary suitable for holding 1642 /// either of the specified value types. 1643 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1644 unsigned Bytes = std::max(VT1.getStoreSizeInBits(), 1645 VT2.getStoreSizeInBits())/8; 1646 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1647 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1648 const TargetLowering *TLI = TM.getTargetLowering(); 1649 const DataLayout *TD = TLI->getDataLayout(); 1650 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), 1651 TD->getPrefTypeAlignment(Ty2)); 1652 1653 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1654 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); 1655 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1656 } 1657 1658 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, 1659 SDValue N2, ISD::CondCode Cond, SDLoc dl) { 1660 // These setcc operations always fold. 1661 switch (Cond) { 1662 default: break; 1663 case ISD::SETFALSE: 1664 case ISD::SETFALSE2: return getConstant(0, VT); 1665 case ISD::SETTRUE: 1666 case ISD::SETTRUE2: { 1667 const TargetLowering *TLI = TM.getTargetLowering(); 1668 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector()); 1669 return getConstant( 1670 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT); 1671 } 1672 1673 case ISD::SETOEQ: 1674 case ISD::SETOGT: 1675 case ISD::SETOGE: 1676 case ISD::SETOLT: 1677 case ISD::SETOLE: 1678 case ISD::SETONE: 1679 case ISD::SETO: 1680 case ISD::SETUO: 1681 case ISD::SETUEQ: 1682 case ISD::SETUNE: 1683 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1684 break; 1685 } 1686 1687 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) { 1688 const APInt &C2 = N2C->getAPIntValue(); 1689 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1690 const APInt &C1 = N1C->getAPIntValue(); 1691 1692 switch (Cond) { 1693 default: llvm_unreachable("Unknown integer setcc!"); 1694 case ISD::SETEQ: return getConstant(C1 == C2, VT); 1695 case ISD::SETNE: return getConstant(C1 != C2, VT); 1696 case ISD::SETULT: return getConstant(C1.ult(C2), VT); 1697 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT); 1698 case ISD::SETULE: return getConstant(C1.ule(C2), VT); 1699 case ISD::SETUGE: return getConstant(C1.uge(C2), VT); 1700 case ISD::SETLT: return getConstant(C1.slt(C2), VT); 1701 case ISD::SETGT: return getConstant(C1.sgt(C2), VT); 1702 case ISD::SETLE: return getConstant(C1.sle(C2), VT); 1703 case ISD::SETGE: return getConstant(C1.sge(C2), VT); 1704 } 1705 } 1706 } 1707 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1708 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) { 1709 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1710 switch (Cond) { 1711 default: break; 1712 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1713 return getUNDEF(VT); 1714 // fall through 1715 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT); 1716 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1717 return getUNDEF(VT); 1718 // fall through 1719 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1720 R==APFloat::cmpLessThan, VT); 1721 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1722 return getUNDEF(VT); 1723 // fall through 1724 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT); 1725 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1726 return getUNDEF(VT); 1727 // fall through 1728 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT); 1729 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1730 return getUNDEF(VT); 1731 // fall through 1732 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1733 R==APFloat::cmpEqual, VT); 1734 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1735 return getUNDEF(VT); 1736 // fall through 1737 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1738 R==APFloat::cmpEqual, VT); 1739 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT); 1740 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT); 1741 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1742 R==APFloat::cmpEqual, VT); 1743 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT); 1744 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1745 R==APFloat::cmpLessThan, VT); 1746 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1747 R==APFloat::cmpUnordered, VT); 1748 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT); 1749 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT); 1750 } 1751 } else { 1752 // Ensure that the constant occurs on the RHS. 1753 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1754 MVT CompVT = N1.getValueType().getSimpleVT(); 1755 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT)) 1756 return SDValue(); 1757 1758 return getSetCC(dl, VT, N2, N1, SwappedCond); 1759 } 1760 } 1761 1762 // Could not fold it. 1763 return SDValue(); 1764 } 1765 1766 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1767 /// use this predicate to simplify operations downstream. 1768 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1769 // This predicate is not safe for vector operations. 1770 if (Op.getValueType().isVector()) 1771 return false; 1772 1773 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1774 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 1775 } 1776 1777 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1778 /// this predicate to simplify operations downstream. Mask is known to be zero 1779 /// for bits that V cannot have. 1780 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1781 unsigned Depth) const { 1782 APInt KnownZero, KnownOne; 1783 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 1784 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1785 return (KnownZero & Mask) == Mask; 1786 } 1787 1788 /// ComputeMaskedBits - Determine which of the bits specified in Mask are 1789 /// known to be either zero or one and return them in the KnownZero/KnownOne 1790 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit 1791 /// processing. 1792 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, 1793 APInt &KnownOne, unsigned Depth) const { 1794 const TargetLowering *TLI = TM.getTargetLowering(); 1795 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1796 1797 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 1798 if (Depth == 6) 1799 return; // Limit search depth. 1800 1801 APInt KnownZero2, KnownOne2; 1802 1803 switch (Op.getOpcode()) { 1804 case ISD::Constant: 1805 // We know all of the bits for a constant! 1806 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 1807 KnownZero = ~KnownOne; 1808 return; 1809 case ISD::AND: 1810 // If either the LHS or the RHS are Zero, the result is zero. 1811 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1812 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1813 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1814 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1815 1816 // Output known-1 bits are only known if set in both the LHS & RHS. 1817 KnownOne &= KnownOne2; 1818 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1819 KnownZero |= KnownZero2; 1820 return; 1821 case ISD::OR: 1822 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1823 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1824 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1825 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1826 1827 // Output known-0 bits are only known if clear in both the LHS & RHS. 1828 KnownZero &= KnownZero2; 1829 // Output known-1 are known to be set if set in either the LHS | RHS. 1830 KnownOne |= KnownOne2; 1831 return; 1832 case ISD::XOR: { 1833 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1834 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1835 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1836 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1837 1838 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1839 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1840 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1841 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1842 KnownZero = KnownZeroOut; 1843 return; 1844 } 1845 case ISD::MUL: { 1846 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1847 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1848 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1849 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1850 1851 // If low bits are zero in either operand, output low known-0 bits. 1852 // Also compute a conserative estimate for high known-0 bits. 1853 // More trickiness is possible, but this is sufficient for the 1854 // interesting case of alignment computation. 1855 KnownOne.clearAllBits(); 1856 unsigned TrailZ = KnownZero.countTrailingOnes() + 1857 KnownZero2.countTrailingOnes(); 1858 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 1859 KnownZero2.countLeadingOnes(), 1860 BitWidth) - BitWidth; 1861 1862 TrailZ = std::min(TrailZ, BitWidth); 1863 LeadZ = std::min(LeadZ, BitWidth); 1864 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 1865 APInt::getHighBitsSet(BitWidth, LeadZ); 1866 return; 1867 } 1868 case ISD::UDIV: { 1869 // For the purposes of computing leading zeros we can conservatively 1870 // treat a udiv as a logical right shift by the power of 2 known to 1871 // be less than the denominator. 1872 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1873 unsigned LeadZ = KnownZero2.countLeadingOnes(); 1874 1875 KnownOne2.clearAllBits(); 1876 KnownZero2.clearAllBits(); 1877 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1878 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 1879 if (RHSUnknownLeadingOnes != BitWidth) 1880 LeadZ = std::min(BitWidth, 1881 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 1882 1883 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 1884 return; 1885 } 1886 case ISD::SELECT: 1887 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 1888 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1889 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1890 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1891 1892 // Only known if known in both the LHS and RHS. 1893 KnownOne &= KnownOne2; 1894 KnownZero &= KnownZero2; 1895 return; 1896 case ISD::SELECT_CC: 1897 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 1898 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 1899 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1900 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1901 1902 // Only known if known in both the LHS and RHS. 1903 KnownOne &= KnownOne2; 1904 KnownZero &= KnownZero2; 1905 return; 1906 case ISD::SADDO: 1907 case ISD::UADDO: 1908 case ISD::SSUBO: 1909 case ISD::USUBO: 1910 case ISD::SMULO: 1911 case ISD::UMULO: 1912 if (Op.getResNo() != 1) 1913 return; 1914 // The boolean result conforms to getBooleanContents. Fall through. 1915 case ISD::SETCC: 1916 // If we know the result of a setcc has the top bits zero, use this info. 1917 if (TLI->getBooleanContents(Op.getValueType().isVector()) == 1918 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1) 1919 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 1920 return; 1921 case ISD::SHL: 1922 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1923 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1924 unsigned ShAmt = SA->getZExtValue(); 1925 1926 // If the shift count is an invalid immediate, don't do anything. 1927 if (ShAmt >= BitWidth) 1928 return; 1929 1930 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1931 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1932 KnownZero <<= ShAmt; 1933 KnownOne <<= ShAmt; 1934 // low bits known zero. 1935 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); 1936 } 1937 return; 1938 case ISD::SRL: 1939 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1940 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1941 unsigned ShAmt = SA->getZExtValue(); 1942 1943 // If the shift count is an invalid immediate, don't do anything. 1944 if (ShAmt >= BitWidth) 1945 return; 1946 1947 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1948 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1949 KnownZero = KnownZero.lshr(ShAmt); 1950 KnownOne = KnownOne.lshr(ShAmt); 1951 1952 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1953 KnownZero |= HighBits; // High bits known zero. 1954 } 1955 return; 1956 case ISD::SRA: 1957 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1958 unsigned ShAmt = SA->getZExtValue(); 1959 1960 // If the shift count is an invalid immediate, don't do anything. 1961 if (ShAmt >= BitWidth) 1962 return; 1963 1964 // If any of the demanded bits are produced by the sign extension, we also 1965 // demand the input sign bit. 1966 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1967 1968 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1969 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1970 KnownZero = KnownZero.lshr(ShAmt); 1971 KnownOne = KnownOne.lshr(ShAmt); 1972 1973 // Handle the sign bits. 1974 APInt SignBit = APInt::getSignBit(BitWidth); 1975 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask. 1976 1977 if (KnownZero.intersects(SignBit)) { 1978 KnownZero |= HighBits; // New bits are known zero. 1979 } else if (KnownOne.intersects(SignBit)) { 1980 KnownOne |= HighBits; // New bits are known one. 1981 } 1982 } 1983 return; 1984 case ISD::SIGN_EXTEND_INREG: { 1985 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1986 unsigned EBits = EVT.getScalarType().getSizeInBits(); 1987 1988 // Sign extension. Compute the demanded bits in the result that are not 1989 // present in the input. 1990 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 1991 1992 APInt InSignBit = APInt::getSignBit(EBits); 1993 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 1994 1995 // If the sign extended bits are demanded, we know that the sign 1996 // bit is demanded. 1997 InSignBit = InSignBit.zext(BitWidth); 1998 if (NewBits.getBoolValue()) 1999 InputDemandedBits |= InSignBit; 2000 2001 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2002 KnownOne &= InputDemandedBits; 2003 KnownZero &= InputDemandedBits; 2004 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 2005 2006 // If the sign bit of the input is known set or clear, then we know the 2007 // top bits of the result. 2008 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 2009 KnownZero |= NewBits; 2010 KnownOne &= ~NewBits; 2011 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 2012 KnownOne |= NewBits; 2013 KnownZero &= ~NewBits; 2014 } else { // Input sign bit unknown 2015 KnownZero &= ~NewBits; 2016 KnownOne &= ~NewBits; 2017 } 2018 return; 2019 } 2020 case ISD::CTTZ: 2021 case ISD::CTTZ_ZERO_UNDEF: 2022 case ISD::CTLZ: 2023 case ISD::CTLZ_ZERO_UNDEF: 2024 case ISD::CTPOP: { 2025 unsigned LowBits = Log2_32(BitWidth)+1; 2026 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 2027 KnownOne.clearAllBits(); 2028 return; 2029 } 2030 case ISD::LOAD: { 2031 LoadSDNode *LD = cast<LoadSDNode>(Op); 2032 // If this is a ZEXTLoad and we are looking at the loaded value. 2033 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2034 EVT VT = LD->getMemoryVT(); 2035 unsigned MemBits = VT.getScalarType().getSizeInBits(); 2036 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 2037 } else if (const MDNode *Ranges = LD->getRanges()) { 2038 computeMaskedBitsLoad(*Ranges, KnownZero); 2039 } 2040 return; 2041 } 2042 case ISD::ZERO_EXTEND: { 2043 EVT InVT = Op.getOperand(0).getValueType(); 2044 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2045 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2046 KnownZero = KnownZero.trunc(InBits); 2047 KnownOne = KnownOne.trunc(InBits); 2048 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2049 KnownZero = KnownZero.zext(BitWidth); 2050 KnownOne = KnownOne.zext(BitWidth); 2051 KnownZero |= NewBits; 2052 return; 2053 } 2054 case ISD::SIGN_EXTEND: { 2055 EVT InVT = Op.getOperand(0).getValueType(); 2056 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2057 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2058 2059 KnownZero = KnownZero.trunc(InBits); 2060 KnownOne = KnownOne.trunc(InBits); 2061 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2062 2063 // Note if the sign bit is known to be zero or one. 2064 bool SignBitKnownZero = KnownZero.isNegative(); 2065 bool SignBitKnownOne = KnownOne.isNegative(); 2066 assert(!(SignBitKnownZero && SignBitKnownOne) && 2067 "Sign bit can't be known to be both zero and one!"); 2068 2069 KnownZero = KnownZero.zext(BitWidth); 2070 KnownOne = KnownOne.zext(BitWidth); 2071 2072 // If the sign bit is known zero or one, the top bits match. 2073 if (SignBitKnownZero) 2074 KnownZero |= NewBits; 2075 else if (SignBitKnownOne) 2076 KnownOne |= NewBits; 2077 return; 2078 } 2079 case ISD::ANY_EXTEND: { 2080 EVT InVT = Op.getOperand(0).getValueType(); 2081 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2082 KnownZero = KnownZero.trunc(InBits); 2083 KnownOne = KnownOne.trunc(InBits); 2084 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2085 KnownZero = KnownZero.zext(BitWidth); 2086 KnownOne = KnownOne.zext(BitWidth); 2087 return; 2088 } 2089 case ISD::TRUNCATE: { 2090 EVT InVT = Op.getOperand(0).getValueType(); 2091 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2092 KnownZero = KnownZero.zext(InBits); 2093 KnownOne = KnownOne.zext(InBits); 2094 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2095 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 2096 KnownZero = KnownZero.trunc(BitWidth); 2097 KnownOne = KnownOne.trunc(BitWidth); 2098 break; 2099 } 2100 case ISD::AssertZext: { 2101 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2102 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2103 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2104 KnownZero |= (~InMask); 2105 KnownOne &= (~KnownZero); 2106 return; 2107 } 2108 case ISD::FGETSIGN: 2109 // All bits are zero except the low bit. 2110 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2111 return; 2112 2113 case ISD::SUB: { 2114 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) { 2115 // We know that the top bits of C-X are clear if X contains less bits 2116 // than C (i.e. no wrap-around can happen). For example, 20-X is 2117 // positive if we can prove that X is >= 0 and < 16. 2118 if (CLHS->getAPIntValue().isNonNegative()) { 2119 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2120 // NLZ can't be BitWidth with no sign bit 2121 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2122 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2123 2124 // If all of the MaskV bits are known to be zero, then we know the 2125 // output top bits are zero, because we now know that the output is 2126 // from [0-C]. 2127 if ((KnownZero2 & MaskV) == MaskV) { 2128 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2129 // Top bits known zero. 2130 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 2131 } 2132 } 2133 } 2134 } 2135 // fall through 2136 case ISD::ADD: 2137 case ISD::ADDE: { 2138 // Output known-0 bits are known if clear or set in both the low clear bits 2139 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2140 // low 3 bits clear. 2141 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2142 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 2143 unsigned KnownZeroOut = KnownZero2.countTrailingOnes(); 2144 2145 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2146 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 2147 KnownZeroOut = std::min(KnownZeroOut, 2148 KnownZero2.countTrailingOnes()); 2149 2150 if (Op.getOpcode() == ISD::ADD) { 2151 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut); 2152 return; 2153 } 2154 2155 // With ADDE, a carry bit may be added in, so we can only use this 2156 // information if we know (at least) that the low two bits are clear. We 2157 // then return to the caller that the low bit is unknown but that other bits 2158 // are known zero. 2159 if (KnownZeroOut >= 2) // ADDE 2160 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut); 2161 return; 2162 } 2163 case ISD::SREM: 2164 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2165 const APInt &RA = Rem->getAPIntValue().abs(); 2166 if (RA.isPowerOf2()) { 2167 APInt LowBits = RA - 1; 2168 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1); 2169 2170 // The low bits of the first operand are unchanged by the srem. 2171 KnownZero = KnownZero2 & LowBits; 2172 KnownOne = KnownOne2 & LowBits; 2173 2174 // If the first operand is non-negative or has all low bits zero, then 2175 // the upper bits are all zero. 2176 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2177 KnownZero |= ~LowBits; 2178 2179 // If the first operand is negative and not all low bits are zero, then 2180 // the upper bits are all one. 2181 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2182 KnownOne |= ~LowBits; 2183 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2184 } 2185 } 2186 return; 2187 case ISD::UREM: { 2188 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2189 const APInt &RA = Rem->getAPIntValue(); 2190 if (RA.isPowerOf2()) { 2191 APInt LowBits = (RA - 1); 2192 KnownZero |= ~LowBits; 2193 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1); 2194 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2195 break; 2196 } 2197 } 2198 2199 // Since the result is less than or equal to either operand, any leading 2200 // zero bits in either operand must also exist in the result. 2201 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2202 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2203 2204 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2205 KnownZero2.countLeadingOnes()); 2206 KnownOne.clearAllBits(); 2207 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2208 return; 2209 } 2210 case ISD::FrameIndex: 2211 case ISD::TargetFrameIndex: 2212 if (unsigned Align = InferPtrAlignment(Op)) { 2213 // The low bits are known zero if the pointer is aligned. 2214 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2215 return; 2216 } 2217 break; 2218 2219 default: 2220 if (Op.getOpcode() < ISD::BUILTIN_OP_END) 2221 break; 2222 // Fallthrough 2223 case ISD::INTRINSIC_WO_CHAIN: 2224 case ISD::INTRINSIC_W_CHAIN: 2225 case ISD::INTRINSIC_VOID: 2226 // Allow the target to implement this method for its nodes. 2227 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2228 return; 2229 } 2230 } 2231 2232 /// ComputeNumSignBits - Return the number of times the sign bit of the 2233 /// register is replicated into the other bits. We know that at least 1 bit 2234 /// is always equal to the sign bit (itself), but other cases can give us 2235 /// information. For example, immediately after an "SRA X, 2", we know that 2236 /// the top 3 bits are all equal to each other, so we return 3. 2237 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ 2238 const TargetLowering *TLI = TM.getTargetLowering(); 2239 EVT VT = Op.getValueType(); 2240 assert(VT.isInteger() && "Invalid VT!"); 2241 unsigned VTBits = VT.getScalarType().getSizeInBits(); 2242 unsigned Tmp, Tmp2; 2243 unsigned FirstAnswer = 1; 2244 2245 if (Depth == 6) 2246 return 1; // Limit search depth. 2247 2248 switch (Op.getOpcode()) { 2249 default: break; 2250 case ISD::AssertSext: 2251 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2252 return VTBits-Tmp+1; 2253 case ISD::AssertZext: 2254 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2255 return VTBits-Tmp; 2256 2257 case ISD::Constant: { 2258 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2259 return Val.getNumSignBits(); 2260 } 2261 2262 case ISD::SIGN_EXTEND: 2263 Tmp = 2264 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 2265 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2266 2267 case ISD::SIGN_EXTEND_INREG: 2268 // Max of the input and what this extends. 2269 Tmp = 2270 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits(); 2271 Tmp = VTBits-Tmp+1; 2272 2273 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2274 return std::max(Tmp, Tmp2); 2275 2276 case ISD::SRA: 2277 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2278 // SRA X, C -> adds C sign bits. 2279 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2280 Tmp += C->getZExtValue(); 2281 if (Tmp > VTBits) Tmp = VTBits; 2282 } 2283 return Tmp; 2284 case ISD::SHL: 2285 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2286 // shl destroys sign bits. 2287 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2288 if (C->getZExtValue() >= VTBits || // Bad shift. 2289 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 2290 return Tmp - C->getZExtValue(); 2291 } 2292 break; 2293 case ISD::AND: 2294 case ISD::OR: 2295 case ISD::XOR: // NOT is handled here. 2296 // Logical binary ops preserve the number of sign bits at the worst. 2297 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2298 if (Tmp != 1) { 2299 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2300 FirstAnswer = std::min(Tmp, Tmp2); 2301 // We computed what we know about the sign bits as our first 2302 // answer. Now proceed to the generic code that uses 2303 // ComputeMaskedBits, and pick whichever answer is better. 2304 } 2305 break; 2306 2307 case ISD::SELECT: 2308 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2309 if (Tmp == 1) return 1; // Early out. 2310 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2311 return std::min(Tmp, Tmp2); 2312 2313 case ISD::SADDO: 2314 case ISD::UADDO: 2315 case ISD::SSUBO: 2316 case ISD::USUBO: 2317 case ISD::SMULO: 2318 case ISD::UMULO: 2319 if (Op.getResNo() != 1) 2320 break; 2321 // The boolean result conforms to getBooleanContents. Fall through. 2322 case ISD::SETCC: 2323 // If setcc returns 0/-1, all bits are sign bits. 2324 if (TLI->getBooleanContents(Op.getValueType().isVector()) == 2325 TargetLowering::ZeroOrNegativeOneBooleanContent) 2326 return VTBits; 2327 break; 2328 case ISD::ROTL: 2329 case ISD::ROTR: 2330 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2331 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2332 2333 // Handle rotate right by N like a rotate left by 32-N. 2334 if (Op.getOpcode() == ISD::ROTR) 2335 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2336 2337 // If we aren't rotating out all of the known-in sign bits, return the 2338 // number that are left. This handles rotl(sext(x), 1) for example. 2339 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2340 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2341 } 2342 break; 2343 case ISD::ADD: 2344 // Add can have at most one carry bit. Thus we know that the output 2345 // is, at worst, one more bit than the inputs. 2346 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2347 if (Tmp == 1) return 1; // Early out. 2348 2349 // Special case decrementing a value (ADD X, -1): 2350 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2351 if (CRHS->isAllOnesValue()) { 2352 APInt KnownZero, KnownOne; 2353 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2354 2355 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2356 // sign bits set. 2357 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2358 return VTBits; 2359 2360 // If we are subtracting one from a positive number, there is no carry 2361 // out of the result. 2362 if (KnownZero.isNegative()) 2363 return Tmp; 2364 } 2365 2366 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2367 if (Tmp2 == 1) return 1; 2368 return std::min(Tmp, Tmp2)-1; 2369 2370 case ISD::SUB: 2371 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2372 if (Tmp2 == 1) return 1; 2373 2374 // Handle NEG. 2375 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) 2376 if (CLHS->isNullValue()) { 2377 APInt KnownZero, KnownOne; 2378 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2379 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2380 // sign bits set. 2381 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2382 return VTBits; 2383 2384 // If the input is known to be positive (the sign bit is known clear), 2385 // the output of the NEG has the same number of sign bits as the input. 2386 if (KnownZero.isNegative()) 2387 return Tmp2; 2388 2389 // Otherwise, we treat this like a SUB. 2390 } 2391 2392 // Sub can have at most one carry bit. Thus we know that the output 2393 // is, at worst, one more bit than the inputs. 2394 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2395 if (Tmp == 1) return 1; // Early out. 2396 return std::min(Tmp, Tmp2)-1; 2397 case ISD::TRUNCATE: 2398 // FIXME: it's tricky to do anything useful for this, but it is an important 2399 // case for targets like X86. 2400 break; 2401 } 2402 2403 // If we are looking at the loaded value of the SDNode. 2404 if (Op.getResNo() == 0) { 2405 // Handle LOADX separately here. EXTLOAD case will fallthrough. 2406 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 2407 unsigned ExtType = LD->getExtensionType(); 2408 switch (ExtType) { 2409 default: break; 2410 case ISD::SEXTLOAD: // '17' bits known 2411 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2412 return VTBits-Tmp+1; 2413 case ISD::ZEXTLOAD: // '16' bits known 2414 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2415 return VTBits-Tmp; 2416 } 2417 } 2418 } 2419 2420 // Allow the target to implement this method for its nodes. 2421 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2422 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2423 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2424 Op.getOpcode() == ISD::INTRINSIC_VOID) { 2425 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth); 2426 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 2427 } 2428 2429 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2430 // use this information. 2431 APInt KnownZero, KnownOne; 2432 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 2433 2434 APInt Mask; 2435 if (KnownZero.isNegative()) { // sign bit is 0 2436 Mask = KnownZero; 2437 } else if (KnownOne.isNegative()) { // sign bit is 1; 2438 Mask = KnownOne; 2439 } else { 2440 // Nothing known. 2441 return FirstAnswer; 2442 } 2443 2444 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2445 // the number of identical bits in the top of the input value. 2446 Mask = ~Mask; 2447 Mask <<= Mask.getBitWidth()-VTBits; 2448 // Return # leading zeros. We use 'min' here in case Val was zero before 2449 // shifting. We don't want to return '64' as for an i32 "0". 2450 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 2451 } 2452 2453 /// isBaseWithConstantOffset - Return true if the specified operand is an 2454 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an 2455 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same 2456 /// semantics as an ADD. This handles the equivalence: 2457 /// X|Cst == X+Cst iff X&Cst = 0. 2458 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 2459 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 2460 !isa<ConstantSDNode>(Op.getOperand(1))) 2461 return false; 2462 2463 if (Op.getOpcode() == ISD::OR && 2464 !MaskedValueIsZero(Op.getOperand(0), 2465 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 2466 return false; 2467 2468 return true; 2469 } 2470 2471 2472 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 2473 // If we're told that NaNs won't happen, assume they won't. 2474 if (getTarget().Options.NoNaNsFPMath) 2475 return true; 2476 2477 // If the value is a constant, we can obviously see if it is a NaN or not. 2478 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2479 return !C->getValueAPF().isNaN(); 2480 2481 // TODO: Recognize more cases here. 2482 2483 return false; 2484 } 2485 2486 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 2487 // If the value is a constant, we can obviously see if it is a zero or not. 2488 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2489 return !C->isZero(); 2490 2491 // TODO: Recognize more cases here. 2492 switch (Op.getOpcode()) { 2493 default: break; 2494 case ISD::OR: 2495 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2496 return !C->isNullValue(); 2497 break; 2498 } 2499 2500 return false; 2501 } 2502 2503 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 2504 // Check the obvious case. 2505 if (A == B) return true; 2506 2507 // For for negative and positive zero. 2508 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 2509 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 2510 if (CA->isZero() && CB->isZero()) return true; 2511 2512 // Otherwise they may not be equal. 2513 return false; 2514 } 2515 2516 /// getNode - Gets or creates the specified node. 2517 /// 2518 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) { 2519 FoldingSetNodeID ID; 2520 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0); 2521 void *IP = 0; 2522 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2523 return SDValue(E, 0); 2524 2525 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), 2526 DL.getDebugLoc(), getVTList(VT)); 2527 CSEMap.InsertNode(N, IP); 2528 2529 AllNodes.push_back(N); 2530 #ifndef NDEBUG 2531 VerifySDNode(N); 2532 #endif 2533 return SDValue(N, 0); 2534 } 2535 2536 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 2537 EVT VT, SDValue Operand) { 2538 // Constant fold unary operations with an integer constant operand. 2539 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) { 2540 const APInt &Val = C->getAPIntValue(); 2541 switch (Opcode) { 2542 default: break; 2543 case ISD::SIGN_EXTEND: 2544 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT); 2545 case ISD::ANY_EXTEND: 2546 case ISD::ZERO_EXTEND: 2547 case ISD::TRUNCATE: 2548 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT); 2549 case ISD::UINT_TO_FP: 2550 case ISD::SINT_TO_FP: { 2551 APFloat apf(EVTToAPFloatSemantics(VT), 2552 APInt::getNullValue(VT.getSizeInBits())); 2553 (void)apf.convertFromAPInt(Val, 2554 Opcode==ISD::SINT_TO_FP, 2555 APFloat::rmNearestTiesToEven); 2556 return getConstantFP(apf, VT); 2557 } 2558 case ISD::BITCAST: 2559 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 2560 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT); 2561 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 2562 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT); 2563 break; 2564 case ISD::BSWAP: 2565 return getConstant(Val.byteSwap(), VT); 2566 case ISD::CTPOP: 2567 return getConstant(Val.countPopulation(), VT); 2568 case ISD::CTLZ: 2569 case ISD::CTLZ_ZERO_UNDEF: 2570 return getConstant(Val.countLeadingZeros(), VT); 2571 case ISD::CTTZ: 2572 case ISD::CTTZ_ZERO_UNDEF: 2573 return getConstant(Val.countTrailingZeros(), VT); 2574 } 2575 } 2576 2577 // Constant fold unary operations with a floating point constant operand. 2578 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) { 2579 APFloat V = C->getValueAPF(); // make copy 2580 switch (Opcode) { 2581 case ISD::FNEG: 2582 V.changeSign(); 2583 return getConstantFP(V, VT); 2584 case ISD::FABS: 2585 V.clearSign(); 2586 return getConstantFP(V, VT); 2587 case ISD::FCEIL: { 2588 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 2589 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2590 return getConstantFP(V, VT); 2591 break; 2592 } 2593 case ISD::FTRUNC: { 2594 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 2595 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2596 return getConstantFP(V, VT); 2597 break; 2598 } 2599 case ISD::FFLOOR: { 2600 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 2601 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2602 return getConstantFP(V, VT); 2603 break; 2604 } 2605 case ISD::FP_EXTEND: { 2606 bool ignored; 2607 // This can return overflow, underflow, or inexact; we don't care. 2608 // FIXME need to be more flexible about rounding mode. 2609 (void)V.convert(EVTToAPFloatSemantics(VT), 2610 APFloat::rmNearestTiesToEven, &ignored); 2611 return getConstantFP(V, VT); 2612 } 2613 case ISD::FP_TO_SINT: 2614 case ISD::FP_TO_UINT: { 2615 integerPart x[2]; 2616 bool ignored; 2617 assert(integerPartWidth >= 64); 2618 // FIXME need to be more flexible about rounding mode. 2619 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 2620 Opcode==ISD::FP_TO_SINT, 2621 APFloat::rmTowardZero, &ignored); 2622 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 2623 break; 2624 APInt api(VT.getSizeInBits(), x); 2625 return getConstant(api, VT); 2626 } 2627 case ISD::BITCAST: 2628 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 2629 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); 2630 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 2631 return getConstant(V.bitcastToAPInt().getZExtValue(), VT); 2632 break; 2633 } 2634 } 2635 2636 unsigned OpOpcode = Operand.getNode()->getOpcode(); 2637 switch (Opcode) { 2638 case ISD::TokenFactor: 2639 case ISD::MERGE_VALUES: 2640 case ISD::CONCAT_VECTORS: 2641 return Operand; // Factor, merge or concat of one node? No need. 2642 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 2643 case ISD::FP_EXTEND: 2644 assert(VT.isFloatingPoint() && 2645 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 2646 if (Operand.getValueType() == VT) return Operand; // noop conversion. 2647 assert((!VT.isVector() || 2648 VT.getVectorNumElements() == 2649 Operand.getValueType().getVectorNumElements()) && 2650 "Vector element count mismatch!"); 2651 if (Operand.getOpcode() == ISD::UNDEF) 2652 return getUNDEF(VT); 2653 break; 2654 case ISD::SIGN_EXTEND: 2655 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2656 "Invalid SIGN_EXTEND!"); 2657 if (Operand.getValueType() == VT) return Operand; // noop extension 2658 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2659 "Invalid sext node, dst < src!"); 2660 assert((!VT.isVector() || 2661 VT.getVectorNumElements() == 2662 Operand.getValueType().getVectorNumElements()) && 2663 "Vector element count mismatch!"); 2664 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 2665 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2666 else if (OpOpcode == ISD::UNDEF) 2667 // sext(undef) = 0, because the top bits will all be the same. 2668 return getConstant(0, VT); 2669 break; 2670 case ISD::ZERO_EXTEND: 2671 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2672 "Invalid ZERO_EXTEND!"); 2673 if (Operand.getValueType() == VT) return Operand; // noop extension 2674 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2675 "Invalid zext node, dst < src!"); 2676 assert((!VT.isVector() || 2677 VT.getVectorNumElements() == 2678 Operand.getValueType().getVectorNumElements()) && 2679 "Vector element count mismatch!"); 2680 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 2681 return getNode(ISD::ZERO_EXTEND, DL, VT, 2682 Operand.getNode()->getOperand(0)); 2683 else if (OpOpcode == ISD::UNDEF) 2684 // zext(undef) = 0, because the top bits will be zero. 2685 return getConstant(0, VT); 2686 break; 2687 case ISD::ANY_EXTEND: 2688 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2689 "Invalid ANY_EXTEND!"); 2690 if (Operand.getValueType() == VT) return Operand; // noop extension 2691 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2692 "Invalid anyext node, dst < src!"); 2693 assert((!VT.isVector() || 2694 VT.getVectorNumElements() == 2695 Operand.getValueType().getVectorNumElements()) && 2696 "Vector element count mismatch!"); 2697 2698 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2699 OpOpcode == ISD::ANY_EXTEND) 2700 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 2701 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2702 else if (OpOpcode == ISD::UNDEF) 2703 return getUNDEF(VT); 2704 2705 // (ext (trunx x)) -> x 2706 if (OpOpcode == ISD::TRUNCATE) { 2707 SDValue OpOp = Operand.getNode()->getOperand(0); 2708 if (OpOp.getValueType() == VT) 2709 return OpOp; 2710 } 2711 break; 2712 case ISD::TRUNCATE: 2713 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2714 "Invalid TRUNCATE!"); 2715 if (Operand.getValueType() == VT) return Operand; // noop truncate 2716 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) && 2717 "Invalid truncate node, src < dst!"); 2718 assert((!VT.isVector() || 2719 VT.getVectorNumElements() == 2720 Operand.getValueType().getVectorNumElements()) && 2721 "Vector element count mismatch!"); 2722 if (OpOpcode == ISD::TRUNCATE) 2723 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2724 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2725 OpOpcode == ISD::ANY_EXTEND) { 2726 // If the source is smaller than the dest, we still need an extend. 2727 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 2728 .bitsLT(VT.getScalarType())) 2729 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2730 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 2731 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2732 return Operand.getNode()->getOperand(0); 2733 } 2734 if (OpOpcode == ISD::UNDEF) 2735 return getUNDEF(VT); 2736 break; 2737 case ISD::BITCAST: 2738 // Basic sanity checking. 2739 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() 2740 && "Cannot BITCAST between types of different sizes!"); 2741 if (VT == Operand.getValueType()) return Operand; // noop conversion. 2742 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 2743 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 2744 if (OpOpcode == ISD::UNDEF) 2745 return getUNDEF(VT); 2746 break; 2747 case ISD::SCALAR_TO_VECTOR: 2748 assert(VT.isVector() && !Operand.getValueType().isVector() && 2749 (VT.getVectorElementType() == Operand.getValueType() || 2750 (VT.getVectorElementType().isInteger() && 2751 Operand.getValueType().isInteger() && 2752 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 2753 "Illegal SCALAR_TO_VECTOR node!"); 2754 if (OpOpcode == ISD::UNDEF) 2755 return getUNDEF(VT); 2756 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 2757 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 2758 isa<ConstantSDNode>(Operand.getOperand(1)) && 2759 Operand.getConstantOperandVal(1) == 0 && 2760 Operand.getOperand(0).getValueType() == VT) 2761 return Operand.getOperand(0); 2762 break; 2763 case ISD::FNEG: 2764 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 2765 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 2766 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 2767 Operand.getNode()->getOperand(0)); 2768 if (OpOpcode == ISD::FNEG) // --X -> X 2769 return Operand.getNode()->getOperand(0); 2770 break; 2771 case ISD::FABS: 2772 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 2773 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 2774 break; 2775 } 2776 2777 SDNode *N; 2778 SDVTList VTs = getVTList(VT); 2779 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 2780 FoldingSetNodeID ID; 2781 SDValue Ops[1] = { Operand }; 2782 AddNodeIDNode(ID, Opcode, VTs, Ops, 1); 2783 void *IP = 0; 2784 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2785 return SDValue(E, 0); 2786 2787 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 2788 DL.getDebugLoc(), VTs, Operand); 2789 CSEMap.InsertNode(N, IP); 2790 } else { 2791 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 2792 DL.getDebugLoc(), VTs, Operand); 2793 } 2794 2795 AllNodes.push_back(N); 2796 #ifndef NDEBUG 2797 VerifySDNode(N); 2798 #endif 2799 return SDValue(N, 0); 2800 } 2801 2802 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT, 2803 SDNode *Cst1, SDNode *Cst2) { 2804 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs; 2805 SmallVector<SDValue, 4> Outputs; 2806 EVT SVT = VT.getScalarType(); 2807 2808 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1); 2809 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2); 2810 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque())) 2811 return SDValue(); 2812 2813 if (Scalar1 && Scalar2) 2814 // Scalar instruction. 2815 Inputs.push_back(std::make_pair(Scalar1, Scalar2)); 2816 else { 2817 // For vectors extract each constant element into Inputs so we can constant 2818 // fold them individually. 2819 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 2820 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 2821 if (!BV1 || !BV2) 2822 return SDValue(); 2823 2824 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 2825 2826 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 2827 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I)); 2828 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I)); 2829 if (!V1 || !V2) // Not a constant, bail. 2830 return SDValue(); 2831 2832 if (V1->isOpaque() || V2->isOpaque()) 2833 return SDValue(); 2834 2835 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 2836 // FIXME: This is valid and could be handled by truncating the APInts. 2837 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 2838 return SDValue(); 2839 2840 Inputs.push_back(std::make_pair(V1, V2)); 2841 } 2842 } 2843 2844 // We have a number of constant values, constant fold them element by element. 2845 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) { 2846 const APInt &C1 = Inputs[I].first->getAPIntValue(); 2847 const APInt &C2 = Inputs[I].second->getAPIntValue(); 2848 2849 switch (Opcode) { 2850 case ISD::ADD: 2851 Outputs.push_back(getConstant(C1 + C2, SVT)); 2852 break; 2853 case ISD::SUB: 2854 Outputs.push_back(getConstant(C1 - C2, SVT)); 2855 break; 2856 case ISD::MUL: 2857 Outputs.push_back(getConstant(C1 * C2, SVT)); 2858 break; 2859 case ISD::UDIV: 2860 if (!C2.getBoolValue()) 2861 return SDValue(); 2862 Outputs.push_back(getConstant(C1.udiv(C2), SVT)); 2863 break; 2864 case ISD::UREM: 2865 if (!C2.getBoolValue()) 2866 return SDValue(); 2867 Outputs.push_back(getConstant(C1.urem(C2), SVT)); 2868 break; 2869 case ISD::SDIV: 2870 if (!C2.getBoolValue()) 2871 return SDValue(); 2872 Outputs.push_back(getConstant(C1.sdiv(C2), SVT)); 2873 break; 2874 case ISD::SREM: 2875 if (!C2.getBoolValue()) 2876 return SDValue(); 2877 Outputs.push_back(getConstant(C1.srem(C2), SVT)); 2878 break; 2879 case ISD::AND: 2880 Outputs.push_back(getConstant(C1 & C2, SVT)); 2881 break; 2882 case ISD::OR: 2883 Outputs.push_back(getConstant(C1 | C2, SVT)); 2884 break; 2885 case ISD::XOR: 2886 Outputs.push_back(getConstant(C1 ^ C2, SVT)); 2887 break; 2888 case ISD::SHL: 2889 Outputs.push_back(getConstant(C1 << C2, SVT)); 2890 break; 2891 case ISD::SRL: 2892 Outputs.push_back(getConstant(C1.lshr(C2), SVT)); 2893 break; 2894 case ISD::SRA: 2895 Outputs.push_back(getConstant(C1.ashr(C2), SVT)); 2896 break; 2897 case ISD::ROTL: 2898 Outputs.push_back(getConstant(C1.rotl(C2), SVT)); 2899 break; 2900 case ISD::ROTR: 2901 Outputs.push_back(getConstant(C1.rotr(C2), SVT)); 2902 break; 2903 default: 2904 return SDValue(); 2905 } 2906 } 2907 2908 // Handle the scalar case first. 2909 if (Scalar1 && Scalar2) 2910 return Outputs.back(); 2911 2912 // Otherwise build a big vector out of the scalar elements we generated. 2913 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(), 2914 Outputs.size()); 2915 } 2916 2917 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, 2918 SDValue N2) { 2919 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 2920 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 2921 switch (Opcode) { 2922 default: break; 2923 case ISD::TokenFactor: 2924 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 2925 N2.getValueType() == MVT::Other && "Invalid token factor!"); 2926 // Fold trivial token factors. 2927 if (N1.getOpcode() == ISD::EntryToken) return N2; 2928 if (N2.getOpcode() == ISD::EntryToken) return N1; 2929 if (N1 == N2) return N1; 2930 break; 2931 case ISD::CONCAT_VECTORS: 2932 // Concat of UNDEFs is UNDEF. 2933 if (N1.getOpcode() == ISD::UNDEF && 2934 N2.getOpcode() == ISD::UNDEF) 2935 return getUNDEF(VT); 2936 2937 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 2938 // one big BUILD_VECTOR. 2939 if (N1.getOpcode() == ISD::BUILD_VECTOR && 2940 N2.getOpcode() == ISD::BUILD_VECTOR) { 2941 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 2942 N1.getNode()->op_end()); 2943 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 2944 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 2945 } 2946 break; 2947 case ISD::AND: 2948 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2949 assert(N1.getValueType() == N2.getValueType() && 2950 N1.getValueType() == VT && "Binary operator types must match!"); 2951 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 2952 // worth handling here. 2953 if (N2C && N2C->isNullValue()) 2954 return N2; 2955 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 2956 return N1; 2957 break; 2958 case ISD::OR: 2959 case ISD::XOR: 2960 case ISD::ADD: 2961 case ISD::SUB: 2962 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2963 assert(N1.getValueType() == N2.getValueType() && 2964 N1.getValueType() == VT && "Binary operator types must match!"); 2965 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 2966 // it's worth handling here. 2967 if (N2C && N2C->isNullValue()) 2968 return N1; 2969 break; 2970 case ISD::UDIV: 2971 case ISD::UREM: 2972 case ISD::MULHU: 2973 case ISD::MULHS: 2974 case ISD::MUL: 2975 case ISD::SDIV: 2976 case ISD::SREM: 2977 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2978 assert(N1.getValueType() == N2.getValueType() && 2979 N1.getValueType() == VT && "Binary operator types must match!"); 2980 break; 2981 case ISD::FADD: 2982 case ISD::FSUB: 2983 case ISD::FMUL: 2984 case ISD::FDIV: 2985 case ISD::FREM: 2986 if (getTarget().Options.UnsafeFPMath) { 2987 if (Opcode == ISD::FADD) { 2988 // 0+x --> x 2989 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) 2990 if (CFP->getValueAPF().isZero()) 2991 return N2; 2992 // x+0 --> x 2993 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2994 if (CFP->getValueAPF().isZero()) 2995 return N1; 2996 } else if (Opcode == ISD::FSUB) { 2997 // x-0 --> x 2998 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2999 if (CFP->getValueAPF().isZero()) 3000 return N1; 3001 } else if (Opcode == ISD::FMUL) { 3002 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1); 3003 SDValue V = N2; 3004 3005 // If the first operand isn't the constant, try the second 3006 if (!CFP) { 3007 CFP = dyn_cast<ConstantFPSDNode>(N2); 3008 V = N1; 3009 } 3010 3011 if (CFP) { 3012 // 0*x --> 0 3013 if (CFP->isZero()) 3014 return SDValue(CFP,0); 3015 // 1*x --> x 3016 if (CFP->isExactlyValue(1.0)) 3017 return V; 3018 } 3019 } 3020 } 3021 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 3022 assert(N1.getValueType() == N2.getValueType() && 3023 N1.getValueType() == VT && "Binary operator types must match!"); 3024 break; 3025 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 3026 assert(N1.getValueType() == VT && 3027 N1.getValueType().isFloatingPoint() && 3028 N2.getValueType().isFloatingPoint() && 3029 "Invalid FCOPYSIGN!"); 3030 break; 3031 case ISD::SHL: 3032 case ISD::SRA: 3033 case ISD::SRL: 3034 case ISD::ROTL: 3035 case ISD::ROTR: 3036 assert(VT == N1.getValueType() && 3037 "Shift operators return type must be the same as their first arg"); 3038 assert(VT.isInteger() && N2.getValueType().isInteger() && 3039 "Shifts only work on integers"); 3040 assert((!VT.isVector() || VT == N2.getValueType()) && 3041 "Vector shift amounts must be in the same as their first arg"); 3042 // Verify that the shift amount VT is bit enough to hold valid shift 3043 // amounts. This catches things like trying to shift an i1024 value by an 3044 // i8, which is easy to fall into in generic code that uses 3045 // TLI.getShiftAmount(). 3046 assert(N2.getValueType().getSizeInBits() >= 3047 Log2_32_Ceil(N1.getValueType().getSizeInBits()) && 3048 "Invalid use of small shift amount with oversized value!"); 3049 3050 // Always fold shifts of i1 values so the code generator doesn't need to 3051 // handle them. Since we know the size of the shift has to be less than the 3052 // size of the value, the shift/rotate count is guaranteed to be zero. 3053 if (VT == MVT::i1) 3054 return N1; 3055 if (N2C && N2C->isNullValue()) 3056 return N1; 3057 break; 3058 case ISD::FP_ROUND_INREG: { 3059 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3060 assert(VT == N1.getValueType() && "Not an inreg round!"); 3061 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 3062 "Cannot FP_ROUND_INREG integer types"); 3063 assert(EVT.isVector() == VT.isVector() && 3064 "FP_ROUND_INREG type should be vector iff the operand " 3065 "type is vector!"); 3066 assert((!EVT.isVector() || 3067 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3068 "Vector element counts must match in FP_ROUND_INREG"); 3069 assert(EVT.bitsLE(VT) && "Not rounding down!"); 3070 (void)EVT; 3071 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 3072 break; 3073 } 3074 case ISD::FP_ROUND: 3075 assert(VT.isFloatingPoint() && 3076 N1.getValueType().isFloatingPoint() && 3077 VT.bitsLE(N1.getValueType()) && 3078 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); 3079 if (N1.getValueType() == VT) return N1; // noop conversion. 3080 break; 3081 case ISD::AssertSext: 3082 case ISD::AssertZext: { 3083 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3084 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3085 assert(VT.isInteger() && EVT.isInteger() && 3086 "Cannot *_EXTEND_INREG FP types"); 3087 assert(!EVT.isVector() && 3088 "AssertSExt/AssertZExt type should be the vector element type " 3089 "rather than the vector type!"); 3090 assert(EVT.bitsLE(VT) && "Not extending!"); 3091 if (VT == EVT) return N1; // noop assertion. 3092 break; 3093 } 3094 case ISD::SIGN_EXTEND_INREG: { 3095 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3096 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3097 assert(VT.isInteger() && EVT.isInteger() && 3098 "Cannot *_EXTEND_INREG FP types"); 3099 assert(EVT.isVector() == VT.isVector() && 3100 "SIGN_EXTEND_INREG type should be vector iff the operand " 3101 "type is vector!"); 3102 assert((!EVT.isVector() || 3103 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3104 "Vector element counts must match in SIGN_EXTEND_INREG"); 3105 assert(EVT.bitsLE(VT) && "Not extending!"); 3106 if (EVT == VT) return N1; // Not actually extending 3107 3108 if (N1C) { 3109 APInt Val = N1C->getAPIntValue(); 3110 unsigned FromBits = EVT.getScalarType().getSizeInBits(); 3111 Val <<= Val.getBitWidth()-FromBits; 3112 Val = Val.ashr(Val.getBitWidth()-FromBits); 3113 return getConstant(Val, VT); 3114 } 3115 break; 3116 } 3117 case ISD::EXTRACT_VECTOR_ELT: 3118 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 3119 if (N1.getOpcode() == ISD::UNDEF) 3120 return getUNDEF(VT); 3121 3122 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 3123 // expanding copies of large vectors from registers. 3124 if (N2C && 3125 N1.getOpcode() == ISD::CONCAT_VECTORS && 3126 N1.getNumOperands() > 0) { 3127 unsigned Factor = 3128 N1.getOperand(0).getValueType().getVectorNumElements(); 3129 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 3130 N1.getOperand(N2C->getZExtValue() / Factor), 3131 getConstant(N2C->getZExtValue() % Factor, 3132 N2.getValueType())); 3133 } 3134 3135 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 3136 // expanding large vector constants. 3137 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 3138 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 3139 3140 if (VT != Elt.getValueType()) 3141 // If the vector element type is not legal, the BUILD_VECTOR operands 3142 // are promoted and implicitly truncated, and the result implicitly 3143 // extended. Make that explicit here. 3144 Elt = getAnyExtOrTrunc(Elt, DL, VT); 3145 3146 return Elt; 3147 } 3148 3149 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 3150 // operations are lowered to scalars. 3151 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 3152 // If the indices are the same, return the inserted element else 3153 // if the indices are known different, extract the element from 3154 // the original vector. 3155 SDValue N1Op2 = N1.getOperand(2); 3156 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode()); 3157 3158 if (N1Op2C && N2C) { 3159 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 3160 if (VT == N1.getOperand(1).getValueType()) 3161 return N1.getOperand(1); 3162 else 3163 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 3164 } 3165 3166 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 3167 } 3168 } 3169 break; 3170 case ISD::EXTRACT_ELEMENT: 3171 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 3172 assert(!N1.getValueType().isVector() && !VT.isVector() && 3173 (N1.getValueType().isInteger() == VT.isInteger()) && 3174 N1.getValueType() != VT && 3175 "Wrong types for EXTRACT_ELEMENT!"); 3176 3177 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 3178 // 64-bit integers into 32-bit parts. Instead of building the extract of 3179 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 3180 if (N1.getOpcode() == ISD::BUILD_PAIR) 3181 return N1.getOperand(N2C->getZExtValue()); 3182 3183 // EXTRACT_ELEMENT of a constant int is also very common. 3184 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3185 unsigned ElementSize = VT.getSizeInBits(); 3186 unsigned Shift = ElementSize * N2C->getZExtValue(); 3187 APInt ShiftedVal = C->getAPIntValue().lshr(Shift); 3188 return getConstant(ShiftedVal.trunc(ElementSize), VT); 3189 } 3190 break; 3191 case ISD::EXTRACT_SUBVECTOR: { 3192 SDValue Index = N2; 3193 if (VT.isSimple() && N1.getValueType().isSimple()) { 3194 assert(VT.isVector() && N1.getValueType().isVector() && 3195 "Extract subvector VTs must be a vectors!"); 3196 assert(VT.getVectorElementType() == 3197 N1.getValueType().getVectorElementType() && 3198 "Extract subvector VTs must have the same element type!"); 3199 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 3200 "Extract subvector must be from larger vector to smaller vector!"); 3201 3202 if (isa<ConstantSDNode>(Index.getNode())) { 3203 assert((VT.getVectorNumElements() + 3204 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3205 <= N1.getValueType().getVectorNumElements()) 3206 && "Extract subvector overflow!"); 3207 } 3208 3209 // Trivial extraction. 3210 if (VT.getSimpleVT() == N1.getSimpleValueType()) 3211 return N1; 3212 } 3213 break; 3214 } 3215 } 3216 3217 // Perform trivial constant folding. 3218 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode()); 3219 if (SV.getNode()) return SV; 3220 3221 // Canonicalize constant to RHS if commutative. 3222 if (N1C && !N2C && isCommutativeBinOp(Opcode)) { 3223 std::swap(N1C, N2C); 3224 std::swap(N1, N2); 3225 } 3226 3227 // Constant fold FP operations. 3228 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 3229 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 3230 if (N1CFP) { 3231 if (!N2CFP && isCommutativeBinOp(Opcode)) { 3232 // Canonicalize constant to RHS if commutative. 3233 std::swap(N1CFP, N2CFP); 3234 std::swap(N1, N2); 3235 } else if (N2CFP) { 3236 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 3237 APFloat::opStatus s; 3238 switch (Opcode) { 3239 case ISD::FADD: 3240 s = V1.add(V2, APFloat::rmNearestTiesToEven); 3241 if (s != APFloat::opInvalidOp) 3242 return getConstantFP(V1, VT); 3243 break; 3244 case ISD::FSUB: 3245 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 3246 if (s!=APFloat::opInvalidOp) 3247 return getConstantFP(V1, VT); 3248 break; 3249 case ISD::FMUL: 3250 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 3251 if (s!=APFloat::opInvalidOp) 3252 return getConstantFP(V1, VT); 3253 break; 3254 case ISD::FDIV: 3255 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 3256 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 3257 return getConstantFP(V1, VT); 3258 break; 3259 case ISD::FREM : 3260 s = V1.mod(V2, APFloat::rmNearestTiesToEven); 3261 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 3262 return getConstantFP(V1, VT); 3263 break; 3264 case ISD::FCOPYSIGN: 3265 V1.copySign(V2); 3266 return getConstantFP(V1, VT); 3267 default: break; 3268 } 3269 } 3270 3271 if (Opcode == ISD::FP_ROUND) { 3272 APFloat V = N1CFP->getValueAPF(); // make copy 3273 bool ignored; 3274 // This can return overflow, underflow, or inexact; we don't care. 3275 // FIXME need to be more flexible about rounding mode. 3276 (void)V.convert(EVTToAPFloatSemantics(VT), 3277 APFloat::rmNearestTiesToEven, &ignored); 3278 return getConstantFP(V, VT); 3279 } 3280 } 3281 3282 // Canonicalize an UNDEF to the RHS, even over a constant. 3283 if (N1.getOpcode() == ISD::UNDEF) { 3284 if (isCommutativeBinOp(Opcode)) { 3285 std::swap(N1, N2); 3286 } else { 3287 switch (Opcode) { 3288 case ISD::FP_ROUND_INREG: 3289 case ISD::SIGN_EXTEND_INREG: 3290 case ISD::SUB: 3291 case ISD::FSUB: 3292 case ISD::FDIV: 3293 case ISD::FREM: 3294 case ISD::SRA: 3295 return N1; // fold op(undef, arg2) -> undef 3296 case ISD::UDIV: 3297 case ISD::SDIV: 3298 case ISD::UREM: 3299 case ISD::SREM: 3300 case ISD::SRL: 3301 case ISD::SHL: 3302 if (!VT.isVector()) 3303 return getConstant(0, VT); // fold op(undef, arg2) -> 0 3304 // For vectors, we can't easily build an all zero vector, just return 3305 // the LHS. 3306 return N2; 3307 } 3308 } 3309 } 3310 3311 // Fold a bunch of operators when the RHS is undef. 3312 if (N2.getOpcode() == ISD::UNDEF) { 3313 switch (Opcode) { 3314 case ISD::XOR: 3315 if (N1.getOpcode() == ISD::UNDEF) 3316 // Handle undef ^ undef -> 0 special case. This is a common 3317 // idiom (misuse). 3318 return getConstant(0, VT); 3319 // fallthrough 3320 case ISD::ADD: 3321 case ISD::ADDC: 3322 case ISD::ADDE: 3323 case ISD::SUB: 3324 case ISD::UDIV: 3325 case ISD::SDIV: 3326 case ISD::UREM: 3327 case ISD::SREM: 3328 return N2; // fold op(arg1, undef) -> undef 3329 case ISD::FADD: 3330 case ISD::FSUB: 3331 case ISD::FMUL: 3332 case ISD::FDIV: 3333 case ISD::FREM: 3334 if (getTarget().Options.UnsafeFPMath) 3335 return N2; 3336 break; 3337 case ISD::MUL: 3338 case ISD::AND: 3339 case ISD::SRL: 3340 case ISD::SHL: 3341 if (!VT.isVector()) 3342 return getConstant(0, VT); // fold op(arg1, undef) -> 0 3343 // For vectors, we can't easily build an all zero vector, just return 3344 // the LHS. 3345 return N1; 3346 case ISD::OR: 3347 if (!VT.isVector()) 3348 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 3349 // For vectors, we can't easily build an all one vector, just return 3350 // the LHS. 3351 return N1; 3352 case ISD::SRA: 3353 return N1; 3354 } 3355 } 3356 3357 // Memoize this node if possible. 3358 SDNode *N; 3359 SDVTList VTs = getVTList(VT); 3360 if (VT != MVT::Glue) { 3361 SDValue Ops[] = { N1, N2 }; 3362 FoldingSetNodeID ID; 3363 AddNodeIDNode(ID, Opcode, VTs, Ops, 2); 3364 void *IP = 0; 3365 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3366 return SDValue(E, 0); 3367 3368 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 3369 DL.getDebugLoc(), VTs, N1, N2); 3370 CSEMap.InsertNode(N, IP); 3371 } else { 3372 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 3373 DL.getDebugLoc(), VTs, N1, N2); 3374 } 3375 3376 AllNodes.push_back(N); 3377 #ifndef NDEBUG 3378 VerifySDNode(N); 3379 #endif 3380 return SDValue(N, 0); 3381 } 3382 3383 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3384 SDValue N1, SDValue N2, SDValue N3) { 3385 // Perform various simplifications. 3386 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3387 switch (Opcode) { 3388 case ISD::FMA: { 3389 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 3390 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 3391 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 3392 if (N1CFP && N2CFP && N3CFP) { 3393 APFloat V1 = N1CFP->getValueAPF(); 3394 const APFloat &V2 = N2CFP->getValueAPF(); 3395 const APFloat &V3 = N3CFP->getValueAPF(); 3396 APFloat::opStatus s = 3397 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 3398 if (s != APFloat::opInvalidOp) 3399 return getConstantFP(V1, VT); 3400 } 3401 break; 3402 } 3403 case ISD::CONCAT_VECTORS: 3404 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3405 // one big BUILD_VECTOR. 3406 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3407 N2.getOpcode() == ISD::BUILD_VECTOR && 3408 N3.getOpcode() == ISD::BUILD_VECTOR) { 3409 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3410 N1.getNode()->op_end()); 3411 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3412 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end()); 3413 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 3414 } 3415 break; 3416 case ISD::SETCC: { 3417 // Use FoldSetCC to simplify SETCC's. 3418 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL); 3419 if (Simp.getNode()) return Simp; 3420 break; 3421 } 3422 case ISD::SELECT: 3423 if (N1C) { 3424 if (N1C->getZExtValue()) 3425 return N2; // select true, X, Y -> X 3426 return N3; // select false, X, Y -> Y 3427 } 3428 3429 if (N2 == N3) return N2; // select C, X, X -> X 3430 break; 3431 case ISD::VECTOR_SHUFFLE: 3432 llvm_unreachable("should use getVectorShuffle constructor!"); 3433 case ISD::INSERT_SUBVECTOR: { 3434 SDValue Index = N3; 3435 if (VT.isSimple() && N1.getValueType().isSimple() 3436 && N2.getValueType().isSimple()) { 3437 assert(VT.isVector() && N1.getValueType().isVector() && 3438 N2.getValueType().isVector() && 3439 "Insert subvector VTs must be a vectors"); 3440 assert(VT == N1.getValueType() && 3441 "Dest and insert subvector source types must match!"); 3442 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 3443 "Insert subvector must be from smaller vector to larger vector!"); 3444 if (isa<ConstantSDNode>(Index.getNode())) { 3445 assert((N2.getValueType().getVectorNumElements() + 3446 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3447 <= VT.getVectorNumElements()) 3448 && "Insert subvector overflow!"); 3449 } 3450 3451 // Trivial insertion. 3452 if (VT.getSimpleVT() == N2.getSimpleValueType()) 3453 return N2; 3454 } 3455 break; 3456 } 3457 case ISD::BITCAST: 3458 // Fold bit_convert nodes from a type to themselves. 3459 if (N1.getValueType() == VT) 3460 return N1; 3461 break; 3462 } 3463 3464 // Memoize node if it doesn't produce a flag. 3465 SDNode *N; 3466 SDVTList VTs = getVTList(VT); 3467 if (VT != MVT::Glue) { 3468 SDValue Ops[] = { N1, N2, N3 }; 3469 FoldingSetNodeID ID; 3470 AddNodeIDNode(ID, Opcode, VTs, Ops, 3); 3471 void *IP = 0; 3472 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3473 return SDValue(E, 0); 3474 3475 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3476 DL.getDebugLoc(), VTs, N1, N2, N3); 3477 CSEMap.InsertNode(N, IP); 3478 } else { 3479 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3480 DL.getDebugLoc(), VTs, N1, N2, N3); 3481 } 3482 3483 AllNodes.push_back(N); 3484 #ifndef NDEBUG 3485 VerifySDNode(N); 3486 #endif 3487 return SDValue(N, 0); 3488 } 3489 3490 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3491 SDValue N1, SDValue N2, SDValue N3, 3492 SDValue N4) { 3493 SDValue Ops[] = { N1, N2, N3, N4 }; 3494 return getNode(Opcode, DL, VT, Ops, 4); 3495 } 3496 3497 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3498 SDValue N1, SDValue N2, SDValue N3, 3499 SDValue N4, SDValue N5) { 3500 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 3501 return getNode(Opcode, DL, VT, Ops, 5); 3502 } 3503 3504 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 3505 /// the incoming stack arguments to be loaded from the stack. 3506 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 3507 SmallVector<SDValue, 8> ArgChains; 3508 3509 // Include the original chain at the beginning of the list. When this is 3510 // used by target LowerCall hooks, this helps legalize find the 3511 // CALLSEQ_BEGIN node. 3512 ArgChains.push_back(Chain); 3513 3514 // Add a chain value for each stack argument. 3515 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 3516 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 3517 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3518 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3519 if (FI->getIndex() < 0) 3520 ArgChains.push_back(SDValue(L, 1)); 3521 3522 // Build a tokenfactor for all the chains. 3523 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 3524 &ArgChains[0], ArgChains.size()); 3525 } 3526 3527 /// getMemsetValue - Vectorized representation of the memset value 3528 /// operand. 3529 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 3530 SDLoc dl) { 3531 assert(Value.getOpcode() != ISD::UNDEF); 3532 3533 unsigned NumBits = VT.getScalarType().getSizeInBits(); 3534 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3535 assert(C->getAPIntValue().getBitWidth() == 8); 3536 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 3537 if (VT.isInteger()) 3538 return DAG.getConstant(Val, VT); 3539 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT); 3540 } 3541 3542 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value); 3543 if (NumBits > 8) { 3544 // Use a multiplication with 0x010101... to extend the input to the 3545 // required length. 3546 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 3547 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT)); 3548 } 3549 3550 return Value; 3551 } 3552 3553 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3554 /// used when a memcpy is turned into a memset when the source is a constant 3555 /// string ptr. 3556 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG, 3557 const TargetLowering &TLI, StringRef Str) { 3558 // Handle vector with all elements zero. 3559 if (Str.empty()) { 3560 if (VT.isInteger()) 3561 return DAG.getConstant(0, VT); 3562 else if (VT == MVT::f32 || VT == MVT::f64) 3563 return DAG.getConstantFP(0.0, VT); 3564 else if (VT.isVector()) { 3565 unsigned NumElts = VT.getVectorNumElements(); 3566 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 3567 return DAG.getNode(ISD::BITCAST, dl, VT, 3568 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(), 3569 EltVT, NumElts))); 3570 } else 3571 llvm_unreachable("Expected type!"); 3572 } 3573 3574 assert(!VT.isVector() && "Can't handle vector type here!"); 3575 unsigned NumVTBits = VT.getSizeInBits(); 3576 unsigned NumVTBytes = NumVTBits / 8; 3577 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 3578 3579 APInt Val(NumVTBits, 0); 3580 if (TLI.isLittleEndian()) { 3581 for (unsigned i = 0; i != NumBytes; ++i) 3582 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 3583 } else { 3584 for (unsigned i = 0; i != NumBytes; ++i) 3585 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 3586 } 3587 3588 // If the "cost" of materializing the integer immediate is less than the cost 3589 // of a load, then it is cost effective to turn the load into the immediate. 3590 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 3591 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 3592 return DAG.getConstant(Val, VT); 3593 return SDValue(0, 0); 3594 } 3595 3596 /// getMemBasePlusOffset - Returns base and offset node for the 3597 /// 3598 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl, 3599 SelectionDAG &DAG) { 3600 EVT VT = Base.getValueType(); 3601 return DAG.getNode(ISD::ADD, dl, 3602 VT, Base, DAG.getConstant(Offset, VT)); 3603 } 3604 3605 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 3606 /// 3607 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 3608 unsigned SrcDelta = 0; 3609 GlobalAddressSDNode *G = NULL; 3610 if (Src.getOpcode() == ISD::GlobalAddress) 3611 G = cast<GlobalAddressSDNode>(Src); 3612 else if (Src.getOpcode() == ISD::ADD && 3613 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 3614 Src.getOperand(1).getOpcode() == ISD::Constant) { 3615 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 3616 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 3617 } 3618 if (!G) 3619 return false; 3620 3621 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false); 3622 } 3623 3624 /// FindOptimalMemOpLowering - Determines the optimial series memory ops 3625 /// to replace the memset / memcpy. Return true if the number of memory ops 3626 /// is below the threshold. It returns the types of the sequence of 3627 /// memory ops to perform memset / memcpy by reference. 3628 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 3629 unsigned Limit, uint64_t Size, 3630 unsigned DstAlign, unsigned SrcAlign, 3631 bool IsMemset, 3632 bool ZeroMemset, 3633 bool MemcpyStrSrc, 3634 bool AllowOverlap, 3635 SelectionDAG &DAG, 3636 const TargetLowering &TLI) { 3637 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 3638 "Expecting memcpy / memset source to meet alignment requirement!"); 3639 // If 'SrcAlign' is zero, that means the memory operation does not need to 3640 // load the value, i.e. memset or memcpy from constant string. Otherwise, 3641 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 3642 // is the specified alignment of the memory operation. If it is zero, that 3643 // means it's possible to change the alignment of the destination. 3644 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 3645 // not need to be loaded. 3646 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 3647 IsMemset, ZeroMemset, MemcpyStrSrc, 3648 DAG.getMachineFunction()); 3649 3650 if (VT == MVT::Other) { 3651 unsigned AS = 0; 3652 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) || 3653 TLI.allowsUnalignedMemoryAccesses(VT, AS)) { 3654 VT = TLI.getPointerTy(); 3655 } else { 3656 switch (DstAlign & 7) { 3657 case 0: VT = MVT::i64; break; 3658 case 4: VT = MVT::i32; break; 3659 case 2: VT = MVT::i16; break; 3660 default: VT = MVT::i8; break; 3661 } 3662 } 3663 3664 MVT LVT = MVT::i64; 3665 while (!TLI.isTypeLegal(LVT)) 3666 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 3667 assert(LVT.isInteger()); 3668 3669 if (VT.bitsGT(LVT)) 3670 VT = LVT; 3671 } 3672 3673 unsigned NumMemOps = 0; 3674 while (Size != 0) { 3675 unsigned VTSize = VT.getSizeInBits() / 8; 3676 while (VTSize > Size) { 3677 // For now, only use non-vector load / store's for the left-over pieces. 3678 EVT NewVT = VT; 3679 unsigned NewVTSize; 3680 3681 bool Found = false; 3682 if (VT.isVector() || VT.isFloatingPoint()) { 3683 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 3684 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 3685 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 3686 Found = true; 3687 else if (NewVT == MVT::i64 && 3688 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 3689 TLI.isSafeMemOpType(MVT::f64)) { 3690 // i64 is usually not legal on 32-bit targets, but f64 may be. 3691 NewVT = MVT::f64; 3692 Found = true; 3693 } 3694 } 3695 3696 if (!Found) { 3697 do { 3698 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 3699 if (NewVT == MVT::i8) 3700 break; 3701 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 3702 } 3703 NewVTSize = NewVT.getSizeInBits() / 8; 3704 3705 // If the new VT cannot cover all of the remaining bits, then consider 3706 // issuing a (or a pair of) unaligned and overlapping load / store. 3707 // FIXME: Only does this for 64-bit or more since we don't have proper 3708 // cost model for unaligned load / store. 3709 bool Fast; 3710 unsigned AS = 0; 3711 if (NumMemOps && AllowOverlap && 3712 VTSize >= 8 && NewVTSize < Size && 3713 TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast) 3714 VTSize = Size; 3715 else { 3716 VT = NewVT; 3717 VTSize = NewVTSize; 3718 } 3719 } 3720 3721 if (++NumMemOps > Limit) 3722 return false; 3723 3724 MemOps.push_back(VT); 3725 Size -= VTSize; 3726 } 3727 3728 return true; 3729 } 3730 3731 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 3732 SDValue Chain, SDValue Dst, 3733 SDValue Src, uint64_t Size, 3734 unsigned Align, bool isVol, 3735 bool AlwaysInline, 3736 MachinePointerInfo DstPtrInfo, 3737 MachinePointerInfo SrcPtrInfo) { 3738 // Turn a memcpy of undef to nop. 3739 if (Src.getOpcode() == ISD::UNDEF) 3740 return Chain; 3741 3742 // Expand memcpy to a series of load and store ops if the size operand falls 3743 // below a certain threshold. 3744 // TODO: In the AlwaysInline case, if the size is big then generate a loop 3745 // rather than maybe a humongous number of loads and stores. 3746 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3747 std::vector<EVT> MemOps; 3748 bool DstAlignCanChange = false; 3749 MachineFunction &MF = DAG.getMachineFunction(); 3750 MachineFrameInfo *MFI = MF.getFrameInfo(); 3751 bool OptSize = 3752 MF.getFunction()->getAttributes(). 3753 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3754 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3755 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3756 DstAlignCanChange = true; 3757 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3758 if (Align > SrcAlign) 3759 SrcAlign = Align; 3760 StringRef Str; 3761 bool CopyFromStr = isMemSrcFromString(Src, Str); 3762 bool isZeroStr = CopyFromStr && Str.empty(); 3763 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 3764 3765 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3766 (DstAlignCanChange ? 0 : Align), 3767 (isZeroStr ? 0 : SrcAlign), 3768 false, false, CopyFromStr, true, DAG, TLI)) 3769 return SDValue(); 3770 3771 if (DstAlignCanChange) { 3772 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3773 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3774 3775 // Don't promote to an alignment that would require dynamic stack 3776 // realignment. 3777 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 3778 if (!TRI->needsStackRealignment(MF)) 3779 while (NewAlign > Align && 3780 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign)) 3781 NewAlign /= 2; 3782 3783 if (NewAlign > Align) { 3784 // Give the stack frame object a larger alignment if needed. 3785 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3786 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3787 Align = NewAlign; 3788 } 3789 } 3790 3791 SmallVector<SDValue, 8> OutChains; 3792 unsigned NumMemOps = MemOps.size(); 3793 uint64_t SrcOff = 0, DstOff = 0; 3794 for (unsigned i = 0; i != NumMemOps; ++i) { 3795 EVT VT = MemOps[i]; 3796 unsigned VTSize = VT.getSizeInBits() / 8; 3797 SDValue Value, Store; 3798 3799 if (VTSize > Size) { 3800 // Issuing an unaligned load / store pair that overlaps with the previous 3801 // pair. Adjust the offset accordingly. 3802 assert(i == NumMemOps-1 && i != 0); 3803 SrcOff -= VTSize - Size; 3804 DstOff -= VTSize - Size; 3805 } 3806 3807 if (CopyFromStr && 3808 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 3809 // It's unlikely a store of a vector immediate can be done in a single 3810 // instruction. It would require a load from a constantpool first. 3811 // We only handle zero vectors here. 3812 // FIXME: Handle other cases where store of vector immediate is done in 3813 // a single instruction. 3814 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 3815 if (Value.getNode()) 3816 Store = DAG.getStore(Chain, dl, Value, 3817 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3818 DstPtrInfo.getWithOffset(DstOff), isVol, 3819 false, Align); 3820 } 3821 3822 if (!Store.getNode()) { 3823 // The type might not be legal for the target. This should only happen 3824 // if the type is smaller than a legal type, as on PPC, so the right 3825 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 3826 // to Load/Store if NVT==VT. 3827 // FIXME does the case above also need this? 3828 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 3829 assert(NVT.bitsGE(VT)); 3830 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 3831 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 3832 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, 3833 MinAlign(SrcAlign, SrcOff)); 3834 Store = DAG.getTruncStore(Chain, dl, Value, 3835 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3836 DstPtrInfo.getWithOffset(DstOff), VT, isVol, 3837 false, Align); 3838 } 3839 OutChains.push_back(Store); 3840 SrcOff += VTSize; 3841 DstOff += VTSize; 3842 Size -= VTSize; 3843 } 3844 3845 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3846 &OutChains[0], OutChains.size()); 3847 } 3848 3849 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 3850 SDValue Chain, SDValue Dst, 3851 SDValue Src, uint64_t Size, 3852 unsigned Align, bool isVol, 3853 bool AlwaysInline, 3854 MachinePointerInfo DstPtrInfo, 3855 MachinePointerInfo SrcPtrInfo) { 3856 // Turn a memmove of undef to nop. 3857 if (Src.getOpcode() == ISD::UNDEF) 3858 return Chain; 3859 3860 // Expand memmove to a series of load and store ops if the size operand falls 3861 // below a certain threshold. 3862 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3863 std::vector<EVT> MemOps; 3864 bool DstAlignCanChange = false; 3865 MachineFunction &MF = DAG.getMachineFunction(); 3866 MachineFrameInfo *MFI = MF.getFrameInfo(); 3867 bool OptSize = MF.getFunction()->getAttributes(). 3868 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3869 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3870 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3871 DstAlignCanChange = true; 3872 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3873 if (Align > SrcAlign) 3874 SrcAlign = Align; 3875 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 3876 3877 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3878 (DstAlignCanChange ? 0 : Align), SrcAlign, 3879 false, false, false, false, DAG, TLI)) 3880 return SDValue(); 3881 3882 if (DstAlignCanChange) { 3883 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3884 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3885 if (NewAlign > Align) { 3886 // Give the stack frame object a larger alignment if needed. 3887 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3888 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3889 Align = NewAlign; 3890 } 3891 } 3892 3893 uint64_t SrcOff = 0, DstOff = 0; 3894 SmallVector<SDValue, 8> LoadValues; 3895 SmallVector<SDValue, 8> LoadChains; 3896 SmallVector<SDValue, 8> OutChains; 3897 unsigned NumMemOps = MemOps.size(); 3898 for (unsigned i = 0; i < NumMemOps; i++) { 3899 EVT VT = MemOps[i]; 3900 unsigned VTSize = VT.getSizeInBits() / 8; 3901 SDValue Value; 3902 3903 Value = DAG.getLoad(VT, dl, Chain, 3904 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 3905 SrcPtrInfo.getWithOffset(SrcOff), isVol, 3906 false, false, SrcAlign); 3907 LoadValues.push_back(Value); 3908 LoadChains.push_back(Value.getValue(1)); 3909 SrcOff += VTSize; 3910 } 3911 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3912 &LoadChains[0], LoadChains.size()); 3913 OutChains.clear(); 3914 for (unsigned i = 0; i < NumMemOps; i++) { 3915 EVT VT = MemOps[i]; 3916 unsigned VTSize = VT.getSizeInBits() / 8; 3917 SDValue Store; 3918 3919 Store = DAG.getStore(Chain, dl, LoadValues[i], 3920 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3921 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); 3922 OutChains.push_back(Store); 3923 DstOff += VTSize; 3924 } 3925 3926 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3927 &OutChains[0], OutChains.size()); 3928 } 3929 3930 /// \brief Lower the call to 'memset' intrinsic function into a series of store 3931 /// operations. 3932 /// 3933 /// \param DAG Selection DAG where lowered code is placed. 3934 /// \param dl Link to corresponding IR location. 3935 /// \param Chain Control flow dependency. 3936 /// \param Dst Pointer to destination memory location. 3937 /// \param Src Value of byte to write into the memory. 3938 /// \param Size Number of bytes to write. 3939 /// \param Align Alignment of the destination in bytes. 3940 /// \param isVol True if destination is volatile. 3941 /// \param DstPtrInfo IR information on the memory pointer. 3942 /// \returns New head in the control flow, if lowering was successful, empty 3943 /// SDValue otherwise. 3944 /// 3945 /// The function tries to replace 'llvm.memset' intrinsic with several store 3946 /// operations and value calculation code. This is usually profitable for small 3947 /// memory size. 3948 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl, 3949 SDValue Chain, SDValue Dst, 3950 SDValue Src, uint64_t Size, 3951 unsigned Align, bool isVol, 3952 MachinePointerInfo DstPtrInfo) { 3953 // Turn a memset of undef to nop. 3954 if (Src.getOpcode() == ISD::UNDEF) 3955 return Chain; 3956 3957 // Expand memset to a series of load/store ops if the size operand 3958 // falls below a certain threshold. 3959 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3960 std::vector<EVT> MemOps; 3961 bool DstAlignCanChange = false; 3962 MachineFunction &MF = DAG.getMachineFunction(); 3963 MachineFrameInfo *MFI = MF.getFrameInfo(); 3964 bool OptSize = MF.getFunction()->getAttributes(). 3965 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3966 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3967 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3968 DstAlignCanChange = true; 3969 bool IsZeroVal = 3970 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 3971 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 3972 Size, (DstAlignCanChange ? 0 : Align), 0, 3973 true, IsZeroVal, false, true, DAG, TLI)) 3974 return SDValue(); 3975 3976 if (DstAlignCanChange) { 3977 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3978 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3979 if (NewAlign > Align) { 3980 // Give the stack frame object a larger alignment if needed. 3981 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3982 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3983 Align = NewAlign; 3984 } 3985 } 3986 3987 SmallVector<SDValue, 8> OutChains; 3988 uint64_t DstOff = 0; 3989 unsigned NumMemOps = MemOps.size(); 3990 3991 // Find the largest store and generate the bit pattern for it. 3992 EVT LargestVT = MemOps[0]; 3993 for (unsigned i = 1; i < NumMemOps; i++) 3994 if (MemOps[i].bitsGT(LargestVT)) 3995 LargestVT = MemOps[i]; 3996 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 3997 3998 for (unsigned i = 0; i < NumMemOps; i++) { 3999 EVT VT = MemOps[i]; 4000 unsigned VTSize = VT.getSizeInBits() / 8; 4001 if (VTSize > Size) { 4002 // Issuing an unaligned load / store pair that overlaps with the previous 4003 // pair. Adjust the offset accordingly. 4004 assert(i == NumMemOps-1 && i != 0); 4005 DstOff -= VTSize - Size; 4006 } 4007 4008 // If this store is smaller than the largest store see whether we can get 4009 // the smaller value for free with a truncate. 4010 SDValue Value = MemSetValue; 4011 if (VT.bitsLT(LargestVT)) { 4012 if (!LargestVT.isVector() && !VT.isVector() && 4013 TLI.isTruncateFree(LargestVT, VT)) 4014 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 4015 else 4016 Value = getMemsetValue(Src, VT, DAG, dl); 4017 } 4018 assert(Value.getValueType() == VT && "Value with wrong type."); 4019 SDValue Store = DAG.getStore(Chain, dl, Value, 4020 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4021 DstPtrInfo.getWithOffset(DstOff), 4022 isVol, false, Align); 4023 OutChains.push_back(Store); 4024 DstOff += VT.getSizeInBits() / 8; 4025 Size -= VTSize; 4026 } 4027 4028 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4029 &OutChains[0], OutChains.size()); 4030 } 4031 4032 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, 4033 SDValue Src, SDValue Size, 4034 unsigned Align, bool isVol, bool AlwaysInline, 4035 MachinePointerInfo DstPtrInfo, 4036 MachinePointerInfo SrcPtrInfo) { 4037 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4038 4039 // Check to see if we should lower the memcpy to loads and stores first. 4040 // For cases within the target-specified limits, this is the best choice. 4041 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4042 if (ConstantSize) { 4043 // Memcpy with size zero? Just return the original chain. 4044 if (ConstantSize->isNullValue()) 4045 return Chain; 4046 4047 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4048 ConstantSize->getZExtValue(),Align, 4049 isVol, false, DstPtrInfo, SrcPtrInfo); 4050 if (Result.getNode()) 4051 return Result; 4052 } 4053 4054 // Then check to see if we should lower the memcpy with target-specific 4055 // code. If the target chooses to do this, this is the next best. 4056 SDValue Result = 4057 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align, 4058 isVol, AlwaysInline, 4059 DstPtrInfo, SrcPtrInfo); 4060 if (Result.getNode()) 4061 return Result; 4062 4063 // If we really need inline code and the target declined to provide it, 4064 // use a (potentially long) sequence of loads and stores. 4065 if (AlwaysInline) { 4066 assert(ConstantSize && "AlwaysInline requires a constant size!"); 4067 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4068 ConstantSize->getZExtValue(), Align, isVol, 4069 true, DstPtrInfo, SrcPtrInfo); 4070 } 4071 4072 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 4073 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 4074 // respect volatile, so they may do things like read or write memory 4075 // beyond the given memory regions. But fixing this isn't easy, and most 4076 // people don't care. 4077 4078 const TargetLowering *TLI = TM.getTargetLowering(); 4079 4080 // Emit a library call. 4081 TargetLowering::ArgListTy Args; 4082 TargetLowering::ArgListEntry Entry; 4083 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 4084 Entry.Node = Dst; Args.push_back(Entry); 4085 Entry.Node = Src; Args.push_back(Entry); 4086 Entry.Node = Size; Args.push_back(Entry); 4087 // FIXME: pass in SDLoc 4088 TargetLowering:: 4089 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 4090 false, false, false, false, 0, 4091 TLI->getLibcallCallingConv(RTLIB::MEMCPY), 4092 /*isTailCall=*/false, 4093 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 4094 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 4095 TLI->getPointerTy()), 4096 Args, *this, dl); 4097 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4098 4099 return CallResult.second; 4100 } 4101 4102 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, 4103 SDValue Src, SDValue Size, 4104 unsigned Align, bool isVol, 4105 MachinePointerInfo DstPtrInfo, 4106 MachinePointerInfo SrcPtrInfo) { 4107 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4108 4109 // Check to see if we should lower the memmove to loads and stores first. 4110 // For cases within the target-specified limits, this is the best choice. 4111 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4112 if (ConstantSize) { 4113 // Memmove with size zero? Just return the original chain. 4114 if (ConstantSize->isNullValue()) 4115 return Chain; 4116 4117 SDValue Result = 4118 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 4119 ConstantSize->getZExtValue(), Align, isVol, 4120 false, DstPtrInfo, SrcPtrInfo); 4121 if (Result.getNode()) 4122 return Result; 4123 } 4124 4125 // Then check to see if we should lower the memmove with target-specific 4126 // code. If the target chooses to do this, this is the next best. 4127 SDValue Result = 4128 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol, 4129 DstPtrInfo, SrcPtrInfo); 4130 if (Result.getNode()) 4131 return Result; 4132 4133 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 4134 // not be safe. See memcpy above for more details. 4135 4136 const TargetLowering *TLI = TM.getTargetLowering(); 4137 4138 // Emit a library call. 4139 TargetLowering::ArgListTy Args; 4140 TargetLowering::ArgListEntry Entry; 4141 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 4142 Entry.Node = Dst; Args.push_back(Entry); 4143 Entry.Node = Src; Args.push_back(Entry); 4144 Entry.Node = Size; Args.push_back(Entry); 4145 // FIXME: pass in SDLoc 4146 TargetLowering:: 4147 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 4148 false, false, false, false, 0, 4149 TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 4150 /*isTailCall=*/false, 4151 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 4152 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 4153 TLI->getPointerTy()), 4154 Args, *this, dl); 4155 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4156 4157 return CallResult.second; 4158 } 4159 4160 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst, 4161 SDValue Src, SDValue Size, 4162 unsigned Align, bool isVol, 4163 MachinePointerInfo DstPtrInfo) { 4164 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4165 4166 // Check to see if we should lower the memset to stores first. 4167 // For cases within the target-specified limits, this is the best choice. 4168 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4169 if (ConstantSize) { 4170 // Memset with size zero? Just return the original chain. 4171 if (ConstantSize->isNullValue()) 4172 return Chain; 4173 4174 SDValue Result = 4175 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 4176 Align, isVol, DstPtrInfo); 4177 4178 if (Result.getNode()) 4179 return Result; 4180 } 4181 4182 // Then check to see if we should lower the memset with target-specific 4183 // code. If the target chooses to do this, this is the next best. 4184 SDValue Result = 4185 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol, 4186 DstPtrInfo); 4187 if (Result.getNode()) 4188 return Result; 4189 4190 // Emit a library call. 4191 const TargetLowering *TLI = TM.getTargetLowering(); 4192 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext()); 4193 TargetLowering::ArgListTy Args; 4194 TargetLowering::ArgListEntry Entry; 4195 Entry.Node = Dst; Entry.Ty = IntPtrTy; 4196 Args.push_back(Entry); 4197 // Extend or truncate the argument to be an i32 value for the call. 4198 if (Src.getValueType().bitsGT(MVT::i32)) 4199 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src); 4200 else 4201 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src); 4202 Entry.Node = Src; 4203 Entry.Ty = Type::getInt32Ty(*getContext()); 4204 Entry.isSExt = true; 4205 Args.push_back(Entry); 4206 Entry.Node = Size; 4207 Entry.Ty = IntPtrTy; 4208 Entry.isSExt = false; 4209 Args.push_back(Entry); 4210 // FIXME: pass in SDLoc 4211 TargetLowering:: 4212 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 4213 false, false, false, false, 0, 4214 TLI->getLibcallCallingConv(RTLIB::MEMSET), 4215 /*isTailCall=*/false, 4216 /*doesNotReturn*/false, /*isReturnValueUsed=*/false, 4217 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 4218 TLI->getPointerTy()), 4219 Args, *this, dl); 4220 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4221 4222 return CallResult.second; 4223 } 4224 4225 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4226 SDVTList VTList, SDValue *Ops, unsigned NumOps, 4227 MachineMemOperand *MMO, 4228 AtomicOrdering SuccessOrdering, 4229 AtomicOrdering FailureOrdering, 4230 SynchronizationScope SynchScope) { 4231 FoldingSetNodeID ID; 4232 ID.AddInteger(MemVT.getRawBits()); 4233 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4234 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4235 void* IP = 0; 4236 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4237 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4238 return SDValue(E, 0); 4239 } 4240 4241 // Allocate the operands array for the node out of the BumpPtrAllocator, since 4242 // SDNode doesn't have access to it. This memory will be "leaked" when 4243 // the node is deallocated, but recovered when the allocator is released. 4244 // If the number of operands is less than 5 we use AtomicSDNode's internal 4245 // storage. 4246 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : 0; 4247 4248 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), 4249 dl.getDebugLoc(), VTList, MemVT, 4250 Ops, DynOps, NumOps, MMO, 4251 SuccessOrdering, FailureOrdering, 4252 SynchScope); 4253 CSEMap.InsertNode(N, IP); 4254 AllNodes.push_back(N); 4255 return SDValue(N, 0); 4256 } 4257 4258 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4259 SDVTList VTList, SDValue *Ops, unsigned NumOps, 4260 MachineMemOperand *MMO, 4261 AtomicOrdering Ordering, 4262 SynchronizationScope SynchScope) { 4263 return getAtomic(Opcode, dl, MemVT, VTList, Ops, NumOps, MMO, Ordering, 4264 Ordering, SynchScope); 4265 } 4266 4267 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4268 SDValue Chain, SDValue Ptr, SDValue Cmp, 4269 SDValue Swp, MachinePointerInfo PtrInfo, 4270 unsigned Alignment, 4271 AtomicOrdering SuccessOrdering, 4272 AtomicOrdering FailureOrdering, 4273 SynchronizationScope SynchScope) { 4274 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4275 Alignment = getEVTAlignment(MemVT); 4276 4277 MachineFunction &MF = getMachineFunction(); 4278 4279 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE. 4280 // For now, atomics are considered to be volatile always. 4281 // FIXME: Volatile isn't really correct; we should keep track of atomic 4282 // orderings in the memoperand. 4283 unsigned Flags = MachineMemOperand::MOVolatile; 4284 if (Opcode != ISD::ATOMIC_STORE) 4285 Flags |= MachineMemOperand::MOLoad; 4286 if (Opcode != ISD::ATOMIC_LOAD) 4287 Flags |= MachineMemOperand::MOStore; 4288 4289 MachineMemOperand *MMO = 4290 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); 4291 4292 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO, 4293 SuccessOrdering, FailureOrdering, SynchScope); 4294 } 4295 4296 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4297 SDValue Chain, 4298 SDValue Ptr, SDValue Cmp, 4299 SDValue Swp, MachineMemOperand *MMO, 4300 AtomicOrdering SuccessOrdering, 4301 AtomicOrdering FailureOrdering, 4302 SynchronizationScope SynchScope) { 4303 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); 4304 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4305 4306 EVT VT = Cmp.getValueType(); 4307 4308 SDVTList VTs = getVTList(VT, MVT::Other); 4309 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 4310 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, SuccessOrdering, 4311 FailureOrdering, SynchScope); 4312 } 4313 4314 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4315 SDValue Chain, 4316 SDValue Ptr, SDValue Val, 4317 const Value* PtrVal, 4318 unsigned Alignment, 4319 AtomicOrdering Ordering, 4320 SynchronizationScope SynchScope) { 4321 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4322 Alignment = getEVTAlignment(MemVT); 4323 4324 MachineFunction &MF = getMachineFunction(); 4325 // An atomic store does not load. An atomic load does not store. 4326 // (An atomicrmw obviously both loads and stores.) 4327 // For now, atomics are considered to be volatile always, and they are 4328 // chained as such. 4329 // FIXME: Volatile isn't really correct; we should keep track of atomic 4330 // orderings in the memoperand. 4331 unsigned Flags = MachineMemOperand::MOVolatile; 4332 if (Opcode != ISD::ATOMIC_STORE) 4333 Flags |= MachineMemOperand::MOLoad; 4334 if (Opcode != ISD::ATOMIC_LOAD) 4335 Flags |= MachineMemOperand::MOStore; 4336 4337 MachineMemOperand *MMO = 4338 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4339 MemVT.getStoreSize(), Alignment); 4340 4341 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO, 4342 Ordering, SynchScope); 4343 } 4344 4345 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4346 SDValue Chain, 4347 SDValue Ptr, SDValue Val, 4348 MachineMemOperand *MMO, 4349 AtomicOrdering Ordering, 4350 SynchronizationScope SynchScope) { 4351 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 4352 Opcode == ISD::ATOMIC_LOAD_SUB || 4353 Opcode == ISD::ATOMIC_LOAD_AND || 4354 Opcode == ISD::ATOMIC_LOAD_OR || 4355 Opcode == ISD::ATOMIC_LOAD_XOR || 4356 Opcode == ISD::ATOMIC_LOAD_NAND || 4357 Opcode == ISD::ATOMIC_LOAD_MIN || 4358 Opcode == ISD::ATOMIC_LOAD_MAX || 4359 Opcode == ISD::ATOMIC_LOAD_UMIN || 4360 Opcode == ISD::ATOMIC_LOAD_UMAX || 4361 Opcode == ISD::ATOMIC_SWAP || 4362 Opcode == ISD::ATOMIC_STORE) && 4363 "Invalid Atomic Op"); 4364 4365 EVT VT = Val.getValueType(); 4366 4367 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 4368 getVTList(VT, MVT::Other); 4369 SDValue Ops[] = {Chain, Ptr, Val}; 4370 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope); 4371 } 4372 4373 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4374 EVT VT, SDValue Chain, 4375 SDValue Ptr, 4376 const Value* PtrVal, 4377 unsigned Alignment, 4378 AtomicOrdering Ordering, 4379 SynchronizationScope SynchScope) { 4380 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4381 Alignment = getEVTAlignment(MemVT); 4382 4383 MachineFunction &MF = getMachineFunction(); 4384 // An atomic store does not load. An atomic load does not store. 4385 // (An atomicrmw obviously both loads and stores.) 4386 // For now, atomics are considered to be volatile always, and they are 4387 // chained as such. 4388 // FIXME: Volatile isn't really correct; we should keep track of atomic 4389 // orderings in the memoperand. 4390 unsigned Flags = MachineMemOperand::MOVolatile; 4391 if (Opcode != ISD::ATOMIC_STORE) 4392 Flags |= MachineMemOperand::MOLoad; 4393 if (Opcode != ISD::ATOMIC_LOAD) 4394 Flags |= MachineMemOperand::MOStore; 4395 4396 MachineMemOperand *MMO = 4397 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4398 MemVT.getStoreSize(), Alignment); 4399 4400 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO, 4401 Ordering, SynchScope); 4402 } 4403 4404 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4405 EVT VT, SDValue Chain, 4406 SDValue Ptr, 4407 MachineMemOperand *MMO, 4408 AtomicOrdering Ordering, 4409 SynchronizationScope SynchScope) { 4410 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 4411 4412 SDVTList VTs = getVTList(VT, MVT::Other); 4413 SDValue Ops[] = {Chain, Ptr}; 4414 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope); 4415 } 4416 4417 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 4418 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps, 4419 SDLoc dl) { 4420 if (NumOps == 1) 4421 return Ops[0]; 4422 4423 SmallVector<EVT, 4> VTs; 4424 VTs.reserve(NumOps); 4425 for (unsigned i = 0; i < NumOps; ++i) 4426 VTs.push_back(Ops[i].getValueType()); 4427 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps), 4428 Ops, NumOps); 4429 } 4430 4431 SDValue 4432 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, 4433 const EVT *VTs, unsigned NumVTs, 4434 const SDValue *Ops, unsigned NumOps, 4435 EVT MemVT, MachinePointerInfo PtrInfo, 4436 unsigned Align, bool Vol, 4437 bool ReadMem, bool WriteMem) { 4438 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps, 4439 MemVT, PtrInfo, Align, Vol, 4440 ReadMem, WriteMem); 4441 } 4442 4443 SDValue 4444 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4445 const SDValue *Ops, unsigned NumOps, 4446 EVT MemVT, MachinePointerInfo PtrInfo, 4447 unsigned Align, bool Vol, 4448 bool ReadMem, bool WriteMem) { 4449 if (Align == 0) // Ensure that codegen never sees alignment 0 4450 Align = getEVTAlignment(MemVT); 4451 4452 MachineFunction &MF = getMachineFunction(); 4453 unsigned Flags = 0; 4454 if (WriteMem) 4455 Flags |= MachineMemOperand::MOStore; 4456 if (ReadMem) 4457 Flags |= MachineMemOperand::MOLoad; 4458 if (Vol) 4459 Flags |= MachineMemOperand::MOVolatile; 4460 MachineMemOperand *MMO = 4461 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align); 4462 4463 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO); 4464 } 4465 4466 SDValue 4467 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4468 const SDValue *Ops, unsigned NumOps, 4469 EVT MemVT, MachineMemOperand *MMO) { 4470 assert((Opcode == ISD::INTRINSIC_VOID || 4471 Opcode == ISD::INTRINSIC_W_CHAIN || 4472 Opcode == ISD::PREFETCH || 4473 Opcode == ISD::LIFETIME_START || 4474 Opcode == ISD::LIFETIME_END || 4475 (Opcode <= INT_MAX && 4476 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 4477 "Opcode is not a memory-accessing opcode!"); 4478 4479 // Memoize the node unless it returns a flag. 4480 MemIntrinsicSDNode *N; 4481 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4482 FoldingSetNodeID ID; 4483 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4484 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4485 void *IP = 0; 4486 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4487 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 4488 return SDValue(E, 0); 4489 } 4490 4491 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4492 dl.getDebugLoc(), VTList, Ops, 4493 NumOps, MemVT, MMO); 4494 CSEMap.InsertNode(N, IP); 4495 } else { 4496 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4497 dl.getDebugLoc(), VTList, Ops, 4498 NumOps, MemVT, MMO); 4499 } 4500 AllNodes.push_back(N); 4501 return SDValue(N, 0); 4502 } 4503 4504 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4505 /// MachinePointerInfo record from it. This is particularly useful because the 4506 /// code generator has many cases where it doesn't bother passing in a 4507 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4508 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { 4509 // If this is FI+Offset, we can model it. 4510 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 4511 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); 4512 4513 // If this is (FI+Offset1)+Offset2, we can model it. 4514 if (Ptr.getOpcode() != ISD::ADD || 4515 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 4516 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 4517 return MachinePointerInfo(); 4518 4519 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4520 return MachinePointerInfo::getFixedStack(FI, Offset+ 4521 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 4522 } 4523 4524 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4525 /// MachinePointerInfo record from it. This is particularly useful because the 4526 /// code generator has many cases where it doesn't bother passing in a 4527 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4528 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { 4529 // If the 'Offset' value isn't a constant, we can't handle this. 4530 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 4531 return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); 4532 if (OffsetOp.getOpcode() == ISD::UNDEF) 4533 return InferPointerInfo(Ptr); 4534 return MachinePointerInfo(); 4535 } 4536 4537 4538 SDValue 4539 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4540 EVT VT, SDLoc dl, SDValue Chain, 4541 SDValue Ptr, SDValue Offset, 4542 MachinePointerInfo PtrInfo, EVT MemVT, 4543 bool isVolatile, bool isNonTemporal, bool isInvariant, 4544 unsigned Alignment, const MDNode *TBAAInfo, 4545 const MDNode *Ranges) { 4546 assert(Chain.getValueType() == MVT::Other && 4547 "Invalid chain type"); 4548 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4549 Alignment = getEVTAlignment(VT); 4550 4551 unsigned Flags = MachineMemOperand::MOLoad; 4552 if (isVolatile) 4553 Flags |= MachineMemOperand::MOVolatile; 4554 if (isNonTemporal) 4555 Flags |= MachineMemOperand::MONonTemporal; 4556 if (isInvariant) 4557 Flags |= MachineMemOperand::MOInvariant; 4558 4559 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 4560 // clients. 4561 if (PtrInfo.V == 0) 4562 PtrInfo = InferPointerInfo(Ptr, Offset); 4563 4564 MachineFunction &MF = getMachineFunction(); 4565 MachineMemOperand *MMO = 4566 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 4567 TBAAInfo, Ranges); 4568 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 4569 } 4570 4571 SDValue 4572 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4573 EVT VT, SDLoc dl, SDValue Chain, 4574 SDValue Ptr, SDValue Offset, EVT MemVT, 4575 MachineMemOperand *MMO) { 4576 if (VT == MemVT) { 4577 ExtType = ISD::NON_EXTLOAD; 4578 } else if (ExtType == ISD::NON_EXTLOAD) { 4579 assert(VT == MemVT && "Non-extending load from different memory type!"); 4580 } else { 4581 // Extending load. 4582 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 4583 "Should only be an extending load, not truncating!"); 4584 assert(VT.isInteger() == MemVT.isInteger() && 4585 "Cannot convert from FP to Int or Int -> FP!"); 4586 assert(VT.isVector() == MemVT.isVector() && 4587 "Cannot use trunc store to convert to or from a vector!"); 4588 assert((!VT.isVector() || 4589 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 4590 "Cannot use trunc store to change the number of vector elements!"); 4591 } 4592 4593 bool Indexed = AM != ISD::UNINDEXED; 4594 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && 4595 "Unindexed load with an offset!"); 4596 4597 SDVTList VTs = Indexed ? 4598 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 4599 SDValue Ops[] = { Chain, Ptr, Offset }; 4600 FoldingSetNodeID ID; 4601 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); 4602 ID.AddInteger(MemVT.getRawBits()); 4603 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), 4604 MMO->isNonTemporal(), 4605 MMO->isInvariant())); 4606 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4607 void *IP = 0; 4608 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4609 cast<LoadSDNode>(E)->refineAlignment(MMO); 4610 return SDValue(E, 0); 4611 } 4612 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(), 4613 dl.getDebugLoc(), VTs, AM, ExtType, 4614 MemVT, MMO); 4615 CSEMap.InsertNode(N, IP); 4616 AllNodes.push_back(N); 4617 return SDValue(N, 0); 4618 } 4619 4620 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4621 SDValue Chain, SDValue Ptr, 4622 MachinePointerInfo PtrInfo, 4623 bool isVolatile, bool isNonTemporal, 4624 bool isInvariant, unsigned Alignment, 4625 const MDNode *TBAAInfo, 4626 const MDNode *Ranges) { 4627 SDValue Undef = getUNDEF(Ptr.getValueType()); 4628 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4629 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment, 4630 TBAAInfo, Ranges); 4631 } 4632 4633 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4634 SDValue Chain, SDValue Ptr, 4635 MachineMemOperand *MMO) { 4636 SDValue Undef = getUNDEF(Ptr.getValueType()); 4637 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4638 VT, MMO); 4639 } 4640 4641 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 4642 SDValue Chain, SDValue Ptr, 4643 MachinePointerInfo PtrInfo, EVT MemVT, 4644 bool isVolatile, bool isNonTemporal, 4645 unsigned Alignment, const MDNode *TBAAInfo) { 4646 SDValue Undef = getUNDEF(Ptr.getValueType()); 4647 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4648 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment, 4649 TBAAInfo); 4650 } 4651 4652 4653 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 4654 SDValue Chain, SDValue Ptr, EVT MemVT, 4655 MachineMemOperand *MMO) { 4656 SDValue Undef = getUNDEF(Ptr.getValueType()); 4657 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4658 MemVT, MMO); 4659 } 4660 4661 SDValue 4662 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, 4663 SDValue Offset, ISD::MemIndexedMode AM) { 4664 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 4665 assert(LD->getOffset().getOpcode() == ISD::UNDEF && 4666 "Load is already a indexed load!"); 4667 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 4668 LD->getChain(), Base, Offset, LD->getPointerInfo(), 4669 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), 4670 false, LD->getAlignment()); 4671 } 4672 4673 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4674 SDValue Ptr, MachinePointerInfo PtrInfo, 4675 bool isVolatile, bool isNonTemporal, 4676 unsigned Alignment, const MDNode *TBAAInfo) { 4677 assert(Chain.getValueType() == MVT::Other && 4678 "Invalid chain type"); 4679 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4680 Alignment = getEVTAlignment(Val.getValueType()); 4681 4682 unsigned Flags = MachineMemOperand::MOStore; 4683 if (isVolatile) 4684 Flags |= MachineMemOperand::MOVolatile; 4685 if (isNonTemporal) 4686 Flags |= MachineMemOperand::MONonTemporal; 4687 4688 if (PtrInfo.V == 0) 4689 PtrInfo = InferPointerInfo(Ptr); 4690 4691 MachineFunction &MF = getMachineFunction(); 4692 MachineMemOperand *MMO = 4693 MF.getMachineMemOperand(PtrInfo, Flags, 4694 Val.getValueType().getStoreSize(), Alignment, 4695 TBAAInfo); 4696 4697 return getStore(Chain, dl, Val, Ptr, MMO); 4698 } 4699 4700 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4701 SDValue Ptr, MachineMemOperand *MMO) { 4702 assert(Chain.getValueType() == MVT::Other && 4703 "Invalid chain type"); 4704 EVT VT = Val.getValueType(); 4705 SDVTList VTs = getVTList(MVT::Other); 4706 SDValue Undef = getUNDEF(Ptr.getValueType()); 4707 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4708 FoldingSetNodeID ID; 4709 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4710 ID.AddInteger(VT.getRawBits()); 4711 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 4712 MMO->isNonTemporal(), MMO->isInvariant())); 4713 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4714 void *IP = 0; 4715 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4716 cast<StoreSDNode>(E)->refineAlignment(MMO); 4717 return SDValue(E, 0); 4718 } 4719 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4720 dl.getDebugLoc(), VTs, 4721 ISD::UNINDEXED, false, VT, MMO); 4722 CSEMap.InsertNode(N, IP); 4723 AllNodes.push_back(N); 4724 return SDValue(N, 0); 4725 } 4726 4727 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4728 SDValue Ptr, MachinePointerInfo PtrInfo, 4729 EVT SVT,bool isVolatile, bool isNonTemporal, 4730 unsigned Alignment, 4731 const MDNode *TBAAInfo) { 4732 assert(Chain.getValueType() == MVT::Other && 4733 "Invalid chain type"); 4734 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4735 Alignment = getEVTAlignment(SVT); 4736 4737 unsigned Flags = MachineMemOperand::MOStore; 4738 if (isVolatile) 4739 Flags |= MachineMemOperand::MOVolatile; 4740 if (isNonTemporal) 4741 Flags |= MachineMemOperand::MONonTemporal; 4742 4743 if (PtrInfo.V == 0) 4744 PtrInfo = InferPointerInfo(Ptr); 4745 4746 MachineFunction &MF = getMachineFunction(); 4747 MachineMemOperand *MMO = 4748 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, 4749 TBAAInfo); 4750 4751 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 4752 } 4753 4754 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4755 SDValue Ptr, EVT SVT, 4756 MachineMemOperand *MMO) { 4757 EVT VT = Val.getValueType(); 4758 4759 assert(Chain.getValueType() == MVT::Other && 4760 "Invalid chain type"); 4761 if (VT == SVT) 4762 return getStore(Chain, dl, Val, Ptr, MMO); 4763 4764 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 4765 "Should only be a truncating store, not extending!"); 4766 assert(VT.isInteger() == SVT.isInteger() && 4767 "Can't do FP-INT conversion!"); 4768 assert(VT.isVector() == SVT.isVector() && 4769 "Cannot use trunc store to convert to or from a vector!"); 4770 assert((!VT.isVector() || 4771 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 4772 "Cannot use trunc store to change the number of vector elements!"); 4773 4774 SDVTList VTs = getVTList(MVT::Other); 4775 SDValue Undef = getUNDEF(Ptr.getValueType()); 4776 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4777 FoldingSetNodeID ID; 4778 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4779 ID.AddInteger(SVT.getRawBits()); 4780 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(), 4781 MMO->isNonTemporal(), MMO->isInvariant())); 4782 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4783 void *IP = 0; 4784 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4785 cast<StoreSDNode>(E)->refineAlignment(MMO); 4786 return SDValue(E, 0); 4787 } 4788 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4789 dl.getDebugLoc(), VTs, 4790 ISD::UNINDEXED, true, SVT, MMO); 4791 CSEMap.InsertNode(N, IP); 4792 AllNodes.push_back(N); 4793 return SDValue(N, 0); 4794 } 4795 4796 SDValue 4797 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base, 4798 SDValue Offset, ISD::MemIndexedMode AM) { 4799 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 4800 assert(ST->getOffset().getOpcode() == ISD::UNDEF && 4801 "Store is already a indexed store!"); 4802 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 4803 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 4804 FoldingSetNodeID ID; 4805 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4806 ID.AddInteger(ST->getMemoryVT().getRawBits()); 4807 ID.AddInteger(ST->getRawSubclassData()); 4808 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 4809 void *IP = 0; 4810 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4811 return SDValue(E, 0); 4812 4813 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4814 dl.getDebugLoc(), VTs, AM, 4815 ST->isTruncatingStore(), 4816 ST->getMemoryVT(), 4817 ST->getMemOperand()); 4818 CSEMap.InsertNode(N, IP); 4819 AllNodes.push_back(N); 4820 return SDValue(N, 0); 4821 } 4822 4823 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl, 4824 SDValue Chain, SDValue Ptr, 4825 SDValue SV, 4826 unsigned Align) { 4827 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) }; 4828 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4); 4829 } 4830 4831 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4832 const SDUse *Ops, unsigned NumOps) { 4833 switch (NumOps) { 4834 case 0: return getNode(Opcode, DL, VT); 4835 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4836 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4837 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4838 default: break; 4839 } 4840 4841 // Copy from an SDUse array into an SDValue array for use with 4842 // the regular getNode logic. 4843 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps); 4844 return getNode(Opcode, DL, VT, &NewOps[0], NumOps); 4845 } 4846 4847 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4848 const SDValue *Ops, unsigned NumOps) { 4849 switch (NumOps) { 4850 case 0: return getNode(Opcode, DL, VT); 4851 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4852 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4853 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4854 default: break; 4855 } 4856 4857 switch (Opcode) { 4858 default: break; 4859 case ISD::SELECT_CC: { 4860 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 4861 assert(Ops[0].getValueType() == Ops[1].getValueType() && 4862 "LHS and RHS of condition must have same type!"); 4863 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4864 "True and False arms of SelectCC must have same type!"); 4865 assert(Ops[2].getValueType() == VT && 4866 "select_cc node must be of same type as true and false value!"); 4867 break; 4868 } 4869 case ISD::BR_CC: { 4870 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 4871 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4872 "LHS/RHS of comparison should match types!"); 4873 break; 4874 } 4875 } 4876 4877 // Memoize nodes. 4878 SDNode *N; 4879 SDVTList VTs = getVTList(VT); 4880 4881 if (VT != MVT::Glue) { 4882 FoldingSetNodeID ID; 4883 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps); 4884 void *IP = 0; 4885 4886 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4887 return SDValue(E, 0); 4888 4889 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4890 VTs, Ops, NumOps); 4891 CSEMap.InsertNode(N, IP); 4892 } else { 4893 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4894 VTs, Ops, NumOps); 4895 } 4896 4897 AllNodes.push_back(N); 4898 #ifndef NDEBUG 4899 VerifySDNode(N); 4900 #endif 4901 return SDValue(N, 0); 4902 } 4903 4904 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 4905 ArrayRef<EVT> ResultTys, 4906 const SDValue *Ops, unsigned NumOps) { 4907 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()), 4908 Ops, NumOps); 4909 } 4910 4911 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 4912 const EVT *VTs, unsigned NumVTs, 4913 const SDValue *Ops, unsigned NumOps) { 4914 if (NumVTs == 1) 4915 return getNode(Opcode, DL, VTs[0], Ops, NumOps); 4916 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps); 4917 } 4918 4919 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4920 const SDValue *Ops, unsigned NumOps) { 4921 if (VTList.NumVTs == 1) 4922 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps); 4923 4924 #if 0 4925 switch (Opcode) { 4926 // FIXME: figure out how to safely handle things like 4927 // int foo(int x) { return 1 << (x & 255); } 4928 // int bar() { return foo(256); } 4929 case ISD::SRA_PARTS: 4930 case ISD::SRL_PARTS: 4931 case ISD::SHL_PARTS: 4932 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 4933 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 4934 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4935 else if (N3.getOpcode() == ISD::AND) 4936 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 4937 // If the and is only masking out bits that cannot effect the shift, 4938 // eliminate the and. 4939 unsigned NumBits = VT.getScalarType().getSizeInBits()*2; 4940 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 4941 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4942 } 4943 break; 4944 } 4945 #endif 4946 4947 // Memoize the node unless it returns a flag. 4948 SDNode *N; 4949 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4950 FoldingSetNodeID ID; 4951 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4952 void *IP = 0; 4953 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4954 return SDValue(E, 0); 4955 4956 if (NumOps == 1) { 4957 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 4958 DL.getDebugLoc(), VTList, Ops[0]); 4959 } else if (NumOps == 2) { 4960 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 4961 DL.getDebugLoc(), VTList, Ops[0], 4962 Ops[1]); 4963 } else if (NumOps == 3) { 4964 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 4965 DL.getDebugLoc(), VTList, Ops[0], 4966 Ops[1], Ops[2]); 4967 } else { 4968 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4969 VTList, Ops, NumOps); 4970 } 4971 CSEMap.InsertNode(N, IP); 4972 } else { 4973 if (NumOps == 1) { 4974 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 4975 DL.getDebugLoc(), VTList, Ops[0]); 4976 } else if (NumOps == 2) { 4977 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 4978 DL.getDebugLoc(), VTList, Ops[0], 4979 Ops[1]); 4980 } else if (NumOps == 3) { 4981 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 4982 DL.getDebugLoc(), VTList, Ops[0], 4983 Ops[1], Ops[2]); 4984 } else { 4985 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4986 VTList, Ops, NumOps); 4987 } 4988 } 4989 AllNodes.push_back(N); 4990 #ifndef NDEBUG 4991 VerifySDNode(N); 4992 #endif 4993 return SDValue(N, 0); 4994 } 4995 4996 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) { 4997 return getNode(Opcode, DL, VTList, 0, 0); 4998 } 4999 5000 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5001 SDValue N1) { 5002 SDValue Ops[] = { N1 }; 5003 return getNode(Opcode, DL, VTList, Ops, 1); 5004 } 5005 5006 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5007 SDValue N1, SDValue N2) { 5008 SDValue Ops[] = { N1, N2 }; 5009 return getNode(Opcode, DL, VTList, Ops, 2); 5010 } 5011 5012 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5013 SDValue N1, SDValue N2, SDValue N3) { 5014 SDValue Ops[] = { N1, N2, N3 }; 5015 return getNode(Opcode, DL, VTList, Ops, 3); 5016 } 5017 5018 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5019 SDValue N1, SDValue N2, SDValue N3, 5020 SDValue N4) { 5021 SDValue Ops[] = { N1, N2, N3, N4 }; 5022 return getNode(Opcode, DL, VTList, Ops, 4); 5023 } 5024 5025 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5026 SDValue N1, SDValue N2, SDValue N3, 5027 SDValue N4, SDValue N5) { 5028 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5029 return getNode(Opcode, DL, VTList, Ops, 5); 5030 } 5031 5032 SDVTList SelectionDAG::getVTList(EVT VT) { 5033 return makeVTList(SDNode::getValueTypeList(VT), 1); 5034 } 5035 5036 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 5037 FoldingSetNodeID ID; 5038 ID.AddInteger(2U); 5039 ID.AddInteger(VT1.getRawBits()); 5040 ID.AddInteger(VT2.getRawBits()); 5041 5042 void *IP = 0; 5043 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5044 if (Result == NULL) { 5045 EVT *Array = Allocator.Allocate<EVT>(2); 5046 Array[0] = VT1; 5047 Array[1] = VT2; 5048 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 5049 VTListMap.InsertNode(Result, IP); 5050 } 5051 return Result->getSDVTList(); 5052 } 5053 5054 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 5055 FoldingSetNodeID ID; 5056 ID.AddInteger(3U); 5057 ID.AddInteger(VT1.getRawBits()); 5058 ID.AddInteger(VT2.getRawBits()); 5059 ID.AddInteger(VT3.getRawBits()); 5060 5061 void *IP = 0; 5062 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5063 if (Result == NULL) { 5064 EVT *Array = Allocator.Allocate<EVT>(3); 5065 Array[0] = VT1; 5066 Array[1] = VT2; 5067 Array[2] = VT3; 5068 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 5069 VTListMap.InsertNode(Result, IP); 5070 } 5071 return Result->getSDVTList(); 5072 } 5073 5074 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 5075 FoldingSetNodeID ID; 5076 ID.AddInteger(4U); 5077 ID.AddInteger(VT1.getRawBits()); 5078 ID.AddInteger(VT2.getRawBits()); 5079 ID.AddInteger(VT3.getRawBits()); 5080 ID.AddInteger(VT4.getRawBits()); 5081 5082 void *IP = 0; 5083 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5084 if (Result == NULL) { 5085 EVT *Array = Allocator.Allocate<EVT>(4); 5086 Array[0] = VT1; 5087 Array[1] = VT2; 5088 Array[2] = VT3; 5089 Array[3] = VT4; 5090 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 5091 VTListMap.InsertNode(Result, IP); 5092 } 5093 return Result->getSDVTList(); 5094 } 5095 5096 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) { 5097 FoldingSetNodeID ID; 5098 ID.AddInteger(NumVTs); 5099 for (unsigned index = 0; index < NumVTs; index++) { 5100 ID.AddInteger(VTs[index].getRawBits()); 5101 } 5102 5103 void *IP = 0; 5104 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5105 if (Result == NULL) { 5106 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 5107 std::copy(VTs, VTs + NumVTs, Array); 5108 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 5109 VTListMap.InsertNode(Result, IP); 5110 } 5111 return Result->getSDVTList(); 5112 } 5113 5114 5115 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 5116 /// specified operands. If the resultant node already exists in the DAG, 5117 /// this does not modify the specified node, instead it returns the node that 5118 /// already exists. If the resultant node does not exist in the DAG, the 5119 /// input node is returned. As a degenerate case, if you specify the same 5120 /// input operands as the node already has, the input node is returned. 5121 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 5122 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 5123 5124 // Check to see if there is no change. 5125 if (Op == N->getOperand(0)) return N; 5126 5127 // See if the modified node already exists. 5128 void *InsertPos = 0; 5129 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 5130 return Existing; 5131 5132 // Nope it doesn't. Remove the node from its current place in the maps. 5133 if (InsertPos) 5134 if (!RemoveNodeFromCSEMaps(N)) 5135 InsertPos = 0; 5136 5137 // Now we update the operands. 5138 N->OperandList[0].set(Op); 5139 5140 // If this gets put into a CSE map, add it. 5141 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5142 return N; 5143 } 5144 5145 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 5146 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 5147 5148 // Check to see if there is no change. 5149 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 5150 return N; // No operands changed, just return the input node. 5151 5152 // See if the modified node already exists. 5153 void *InsertPos = 0; 5154 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 5155 return Existing; 5156 5157 // Nope it doesn't. Remove the node from its current place in the maps. 5158 if (InsertPos) 5159 if (!RemoveNodeFromCSEMaps(N)) 5160 InsertPos = 0; 5161 5162 // Now we update the operands. 5163 if (N->OperandList[0] != Op1) 5164 N->OperandList[0].set(Op1); 5165 if (N->OperandList[1] != Op2) 5166 N->OperandList[1].set(Op2); 5167 5168 // If this gets put into a CSE map, add it. 5169 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5170 return N; 5171 } 5172 5173 SDNode *SelectionDAG:: 5174 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 5175 SDValue Ops[] = { Op1, Op2, Op3 }; 5176 return UpdateNodeOperands(N, Ops, 3); 5177 } 5178 5179 SDNode *SelectionDAG:: 5180 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5181 SDValue Op3, SDValue Op4) { 5182 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 5183 return UpdateNodeOperands(N, Ops, 4); 5184 } 5185 5186 SDNode *SelectionDAG:: 5187 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5188 SDValue Op3, SDValue Op4, SDValue Op5) { 5189 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 5190 return UpdateNodeOperands(N, Ops, 5); 5191 } 5192 5193 SDNode *SelectionDAG:: 5194 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) { 5195 assert(N->getNumOperands() == NumOps && 5196 "Update with wrong number of operands"); 5197 5198 // Check to see if there is no change. 5199 bool AnyChange = false; 5200 for (unsigned i = 0; i != NumOps; ++i) { 5201 if (Ops[i] != N->getOperand(i)) { 5202 AnyChange = true; 5203 break; 5204 } 5205 } 5206 5207 // No operands changed, just return the input node. 5208 if (!AnyChange) return N; 5209 5210 // See if the modified node already exists. 5211 void *InsertPos = 0; 5212 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos)) 5213 return Existing; 5214 5215 // Nope it doesn't. Remove the node from its current place in the maps. 5216 if (InsertPos) 5217 if (!RemoveNodeFromCSEMaps(N)) 5218 InsertPos = 0; 5219 5220 // Now we update the operands. 5221 for (unsigned i = 0; i != NumOps; ++i) 5222 if (N->OperandList[i] != Ops[i]) 5223 N->OperandList[i].set(Ops[i]); 5224 5225 // If this gets put into a CSE map, add it. 5226 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5227 return N; 5228 } 5229 5230 /// DropOperands - Release the operands and set this node to have 5231 /// zero operands. 5232 void SDNode::DropOperands() { 5233 // Unlike the code in MorphNodeTo that does this, we don't need to 5234 // watch for dead nodes here. 5235 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 5236 SDUse &Use = *I++; 5237 Use.set(SDValue()); 5238 } 5239 } 5240 5241 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 5242 /// machine opcode. 5243 /// 5244 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5245 EVT VT) { 5246 SDVTList VTs = getVTList(VT); 5247 return SelectNodeTo(N, MachineOpc, VTs, 0, 0); 5248 } 5249 5250 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5251 EVT VT, SDValue Op1) { 5252 SDVTList VTs = getVTList(VT); 5253 SDValue Ops[] = { Op1 }; 5254 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 5255 } 5256 5257 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5258 EVT VT, SDValue Op1, 5259 SDValue Op2) { 5260 SDVTList VTs = getVTList(VT); 5261 SDValue Ops[] = { Op1, Op2 }; 5262 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 5263 } 5264 5265 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5266 EVT VT, SDValue Op1, 5267 SDValue Op2, SDValue Op3) { 5268 SDVTList VTs = getVTList(VT); 5269 SDValue Ops[] = { Op1, Op2, Op3 }; 5270 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5271 } 5272 5273 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5274 EVT VT, const SDValue *Ops, 5275 unsigned NumOps) { 5276 SDVTList VTs = getVTList(VT); 5277 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5278 } 5279 5280 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5281 EVT VT1, EVT VT2, const SDValue *Ops, 5282 unsigned NumOps) { 5283 SDVTList VTs = getVTList(VT1, VT2); 5284 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5285 } 5286 5287 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5288 EVT VT1, EVT VT2) { 5289 SDVTList VTs = getVTList(VT1, VT2); 5290 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0); 5291 } 5292 5293 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5294 EVT VT1, EVT VT2, EVT VT3, 5295 const SDValue *Ops, unsigned NumOps) { 5296 SDVTList VTs = getVTList(VT1, VT2, VT3); 5297 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5298 } 5299 5300 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5301 EVT VT1, EVT VT2, EVT VT3, EVT VT4, 5302 const SDValue *Ops, unsigned NumOps) { 5303 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5304 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5305 } 5306 5307 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5308 EVT VT1, EVT VT2, 5309 SDValue Op1) { 5310 SDVTList VTs = getVTList(VT1, VT2); 5311 SDValue Ops[] = { Op1 }; 5312 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 5313 } 5314 5315 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5316 EVT VT1, EVT VT2, 5317 SDValue Op1, SDValue Op2) { 5318 SDVTList VTs = getVTList(VT1, VT2); 5319 SDValue Ops[] = { Op1, Op2 }; 5320 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 5321 } 5322 5323 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5324 EVT VT1, EVT VT2, 5325 SDValue Op1, SDValue Op2, 5326 SDValue Op3) { 5327 SDVTList VTs = getVTList(VT1, VT2); 5328 SDValue Ops[] = { Op1, Op2, Op3 }; 5329 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5330 } 5331 5332 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5333 EVT VT1, EVT VT2, EVT VT3, 5334 SDValue Op1, SDValue Op2, 5335 SDValue Op3) { 5336 SDVTList VTs = getVTList(VT1, VT2, VT3); 5337 SDValue Ops[] = { Op1, Op2, Op3 }; 5338 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5339 } 5340 5341 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5342 SDVTList VTs, const SDValue *Ops, 5343 unsigned NumOps) { 5344 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps); 5345 // Reset the NodeID to -1. 5346 N->setNodeId(-1); 5347 return N; 5348 } 5349 5350 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away 5351 /// the line number information on the merged node since it is not possible to 5352 /// preserve the information that operation is associated with multiple lines. 5353 /// This will make the debugger working better at -O0, were there is a higher 5354 /// probability having other instructions associated with that line. 5355 /// 5356 /// For IROrder, we keep the smaller of the two 5357 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) { 5358 DebugLoc NLoc = N->getDebugLoc(); 5359 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && 5360 (OLoc.getDebugLoc() != NLoc)) { 5361 N->setDebugLoc(DebugLoc()); 5362 } 5363 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 5364 N->setIROrder(Order); 5365 return N; 5366 } 5367 5368 /// MorphNodeTo - This *mutates* the specified node to have the specified 5369 /// return type, opcode, and operands. 5370 /// 5371 /// Note that MorphNodeTo returns the resultant node. If there is already a 5372 /// node of the specified opcode and operands, it returns that node instead of 5373 /// the current one. Note that the SDLoc need not be the same. 5374 /// 5375 /// Using MorphNodeTo is faster than creating a new node and swapping it in 5376 /// with ReplaceAllUsesWith both because it often avoids allocating a new 5377 /// node, and because it doesn't require CSE recalculation for any of 5378 /// the node's users. 5379 /// 5380 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 5381 SDVTList VTs, const SDValue *Ops, 5382 unsigned NumOps) { 5383 // If an identical node already exists, use it. 5384 void *IP = 0; 5385 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 5386 FoldingSetNodeID ID; 5387 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps); 5388 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) 5389 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N)); 5390 } 5391 5392 if (!RemoveNodeFromCSEMaps(N)) 5393 IP = 0; 5394 5395 // Start the morphing. 5396 N->NodeType = Opc; 5397 N->ValueList = VTs.VTs; 5398 N->NumValues = VTs.NumVTs; 5399 5400 // Clear the operands list, updating used nodes to remove this from their 5401 // use list. Keep track of any operands that become dead as a result. 5402 SmallPtrSet<SDNode*, 16> DeadNodeSet; 5403 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 5404 SDUse &Use = *I++; 5405 SDNode *Used = Use.getNode(); 5406 Use.set(SDValue()); 5407 if (Used->use_empty()) 5408 DeadNodeSet.insert(Used); 5409 } 5410 5411 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) { 5412 // Initialize the memory references information. 5413 MN->setMemRefs(0, 0); 5414 // If NumOps is larger than the # of operands we can have in a 5415 // MachineSDNode, reallocate the operand list. 5416 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) { 5417 if (MN->OperandsNeedDelete) 5418 delete[] MN->OperandList; 5419 if (NumOps > array_lengthof(MN->LocalOperands)) 5420 // We're creating a final node that will live unmorphed for the 5421 // remainder of the current SelectionDAG iteration, so we can allocate 5422 // the operands directly out of a pool with no recycling metadata. 5423 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5424 Ops, NumOps); 5425 else 5426 MN->InitOperands(MN->LocalOperands, Ops, NumOps); 5427 MN->OperandsNeedDelete = false; 5428 } else 5429 MN->InitOperands(MN->OperandList, Ops, NumOps); 5430 } else { 5431 // If NumOps is larger than the # of operands we currently have, reallocate 5432 // the operand list. 5433 if (NumOps > N->NumOperands) { 5434 if (N->OperandsNeedDelete) 5435 delete[] N->OperandList; 5436 N->InitOperands(new SDUse[NumOps], Ops, NumOps); 5437 N->OperandsNeedDelete = true; 5438 } else 5439 N->InitOperands(N->OperandList, Ops, NumOps); 5440 } 5441 5442 // Delete any nodes that are still dead after adding the uses for the 5443 // new operands. 5444 if (!DeadNodeSet.empty()) { 5445 SmallVector<SDNode *, 16> DeadNodes; 5446 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(), 5447 E = DeadNodeSet.end(); I != E; ++I) 5448 if ((*I)->use_empty()) 5449 DeadNodes.push_back(*I); 5450 RemoveDeadNodes(DeadNodes); 5451 } 5452 5453 if (IP) 5454 CSEMap.InsertNode(N, IP); // Memoize the new node. 5455 return N; 5456 } 5457 5458 5459 /// getMachineNode - These are used for target selectors to create a new node 5460 /// with specified return type(s), MachineInstr opcode, and operands. 5461 /// 5462 /// Note that getMachineNode returns the resultant node. If there is already a 5463 /// node of the specified opcode and operands, it returns that node instead of 5464 /// the current one. 5465 MachineSDNode * 5466 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) { 5467 SDVTList VTs = getVTList(VT); 5468 return getMachineNode(Opcode, dl, VTs, None); 5469 } 5470 5471 MachineSDNode * 5472 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) { 5473 SDVTList VTs = getVTList(VT); 5474 SDValue Ops[] = { Op1 }; 5475 return getMachineNode(Opcode, dl, VTs, Ops); 5476 } 5477 5478 MachineSDNode * 5479 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5480 SDValue Op1, SDValue Op2) { 5481 SDVTList VTs = getVTList(VT); 5482 SDValue Ops[] = { Op1, Op2 }; 5483 return getMachineNode(Opcode, dl, VTs, Ops); 5484 } 5485 5486 MachineSDNode * 5487 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5488 SDValue Op1, SDValue Op2, SDValue Op3) { 5489 SDVTList VTs = getVTList(VT); 5490 SDValue Ops[] = { Op1, Op2, Op3 }; 5491 return getMachineNode(Opcode, dl, VTs, Ops); 5492 } 5493 5494 MachineSDNode * 5495 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5496 ArrayRef<SDValue> Ops) { 5497 SDVTList VTs = getVTList(VT); 5498 return getMachineNode(Opcode, dl, VTs, Ops); 5499 } 5500 5501 MachineSDNode * 5502 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) { 5503 SDVTList VTs = getVTList(VT1, VT2); 5504 return getMachineNode(Opcode, dl, VTs, None); 5505 } 5506 5507 MachineSDNode * 5508 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5509 EVT VT1, EVT VT2, SDValue Op1) { 5510 SDVTList VTs = getVTList(VT1, VT2); 5511 SDValue Ops[] = { Op1 }; 5512 return getMachineNode(Opcode, dl, VTs, Ops); 5513 } 5514 5515 MachineSDNode * 5516 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5517 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { 5518 SDVTList VTs = getVTList(VT1, VT2); 5519 SDValue Ops[] = { Op1, Op2 }; 5520 return getMachineNode(Opcode, dl, VTs, Ops); 5521 } 5522 5523 MachineSDNode * 5524 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5525 EVT VT1, EVT VT2, SDValue Op1, 5526 SDValue Op2, SDValue Op3) { 5527 SDVTList VTs = getVTList(VT1, VT2); 5528 SDValue Ops[] = { Op1, Op2, Op3 }; 5529 return getMachineNode(Opcode, dl, VTs, Ops); 5530 } 5531 5532 MachineSDNode * 5533 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5534 EVT VT1, EVT VT2, 5535 ArrayRef<SDValue> Ops) { 5536 SDVTList VTs = getVTList(VT1, VT2); 5537 return getMachineNode(Opcode, dl, VTs, Ops); 5538 } 5539 5540 MachineSDNode * 5541 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5542 EVT VT1, EVT VT2, EVT VT3, 5543 SDValue Op1, SDValue Op2) { 5544 SDVTList VTs = getVTList(VT1, VT2, VT3); 5545 SDValue Ops[] = { Op1, Op2 }; 5546 return getMachineNode(Opcode, dl, VTs, Ops); 5547 } 5548 5549 MachineSDNode * 5550 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5551 EVT VT1, EVT VT2, EVT VT3, 5552 SDValue Op1, SDValue Op2, SDValue Op3) { 5553 SDVTList VTs = getVTList(VT1, VT2, VT3); 5554 SDValue Ops[] = { Op1, Op2, Op3 }; 5555 return getMachineNode(Opcode, dl, VTs, Ops); 5556 } 5557 5558 MachineSDNode * 5559 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5560 EVT VT1, EVT VT2, EVT VT3, 5561 ArrayRef<SDValue> Ops) { 5562 SDVTList VTs = getVTList(VT1, VT2, VT3); 5563 return getMachineNode(Opcode, dl, VTs, Ops); 5564 } 5565 5566 MachineSDNode * 5567 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, 5568 EVT VT2, EVT VT3, EVT VT4, 5569 ArrayRef<SDValue> Ops) { 5570 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5571 return getMachineNode(Opcode, dl, VTs, Ops); 5572 } 5573 5574 MachineSDNode * 5575 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5576 ArrayRef<EVT> ResultTys, 5577 ArrayRef<SDValue> Ops) { 5578 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size()); 5579 return getMachineNode(Opcode, dl, VTs, Ops); 5580 } 5581 5582 MachineSDNode * 5583 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs, 5584 ArrayRef<SDValue> OpsArray) { 5585 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 5586 MachineSDNode *N; 5587 void *IP = 0; 5588 const SDValue *Ops = OpsArray.data(); 5589 unsigned NumOps = OpsArray.size(); 5590 5591 if (DoCSE) { 5592 FoldingSetNodeID ID; 5593 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps); 5594 IP = 0; 5595 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 5596 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL)); 5597 } 5598 } 5599 5600 // Allocate a new MachineSDNode. 5601 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(), 5602 DL.getDebugLoc(), VTs); 5603 5604 // Initialize the operands list. 5605 if (NumOps > array_lengthof(N->LocalOperands)) 5606 // We're creating a final node that will live unmorphed for the 5607 // remainder of the current SelectionDAG iteration, so we can allocate 5608 // the operands directly out of a pool with no recycling metadata. 5609 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5610 Ops, NumOps); 5611 else 5612 N->InitOperands(N->LocalOperands, Ops, NumOps); 5613 N->OperandsNeedDelete = false; 5614 5615 if (DoCSE) 5616 CSEMap.InsertNode(N, IP); 5617 5618 AllNodes.push_back(N); 5619 #ifndef NDEBUG 5620 VerifyMachineNode(N); 5621 #endif 5622 return N; 5623 } 5624 5625 /// getTargetExtractSubreg - A convenience function for creating 5626 /// TargetOpcode::EXTRACT_SUBREG nodes. 5627 SDValue 5628 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, 5629 SDValue Operand) { 5630 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5631 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 5632 VT, Operand, SRIdxVal); 5633 return SDValue(Subreg, 0); 5634 } 5635 5636 /// getTargetInsertSubreg - A convenience function for creating 5637 /// TargetOpcode::INSERT_SUBREG nodes. 5638 SDValue 5639 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT, 5640 SDValue Operand, SDValue Subreg) { 5641 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5642 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 5643 VT, Operand, Subreg, SRIdxVal); 5644 return SDValue(Result, 0); 5645 } 5646 5647 /// getNodeIfExists - Get the specified node if it's already available, or 5648 /// else return NULL. 5649 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 5650 const SDValue *Ops, unsigned NumOps) { 5651 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5652 FoldingSetNodeID ID; 5653 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 5654 void *IP = 0; 5655 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5656 return E; 5657 } 5658 return NULL; 5659 } 5660 5661 /// getDbgValue - Creates a SDDbgValue node. 5662 /// 5663 SDDbgValue * 5664 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off, 5665 DebugLoc DL, unsigned O) { 5666 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O); 5667 } 5668 5669 SDDbgValue * 5670 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off, 5671 DebugLoc DL, unsigned O) { 5672 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O); 5673 } 5674 5675 SDDbgValue * 5676 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off, 5677 DebugLoc DL, unsigned O) { 5678 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O); 5679 } 5680 5681 namespace { 5682 5683 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 5684 /// pointed to by a use iterator is deleted, increment the use iterator 5685 /// so that it doesn't dangle. 5686 /// 5687 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 5688 SDNode::use_iterator &UI; 5689 SDNode::use_iterator &UE; 5690 5691 void NodeDeleted(SDNode *N, SDNode *E) override { 5692 // Increment the iterator as needed. 5693 while (UI != UE && N == *UI) 5694 ++UI; 5695 } 5696 5697 public: 5698 RAUWUpdateListener(SelectionDAG &d, 5699 SDNode::use_iterator &ui, 5700 SDNode::use_iterator &ue) 5701 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 5702 }; 5703 5704 } 5705 5706 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5707 /// This can cause recursive merging of nodes in the DAG. 5708 /// 5709 /// This version assumes From has a single result value. 5710 /// 5711 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 5712 SDNode *From = FromN.getNode(); 5713 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 5714 "Cannot replace with this method!"); 5715 assert(From != To.getNode() && "Cannot replace uses of with self"); 5716 5717 // Iterate over all the existing uses of From. New uses will be added 5718 // to the beginning of the use list, which we avoid visiting. 5719 // This specifically avoids visiting uses of From that arise while the 5720 // replacement is happening, because any such uses would be the result 5721 // of CSE: If an existing node looks like From after one of its operands 5722 // is replaced by To, we don't want to replace of all its users with To 5723 // too. See PR3018 for more info. 5724 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5725 RAUWUpdateListener Listener(*this, UI, UE); 5726 while (UI != UE) { 5727 SDNode *User = *UI; 5728 5729 // This node is about to morph, remove its old self from the CSE maps. 5730 RemoveNodeFromCSEMaps(User); 5731 5732 // A user can appear in a use list multiple times, and when this 5733 // happens the uses are usually next to each other in the list. 5734 // To help reduce the number of CSE recomputations, process all 5735 // the uses of this user that we can find this way. 5736 do { 5737 SDUse &Use = UI.getUse(); 5738 ++UI; 5739 Use.set(To); 5740 } while (UI != UE && *UI == User); 5741 5742 // Now that we have modified User, add it back to the CSE maps. If it 5743 // already exists there, recursively merge the results together. 5744 AddModifiedNodeToCSEMaps(User); 5745 } 5746 5747 // If we just RAUW'd the root, take note. 5748 if (FromN == getRoot()) 5749 setRoot(To); 5750 } 5751 5752 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5753 /// This can cause recursive merging of nodes in the DAG. 5754 /// 5755 /// This version assumes that for each value of From, there is a 5756 /// corresponding value in To in the same position with the same type. 5757 /// 5758 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 5759 #ifndef NDEBUG 5760 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 5761 assert((!From->hasAnyUseOfValue(i) || 5762 From->getValueType(i) == To->getValueType(i)) && 5763 "Cannot use this version of ReplaceAllUsesWith!"); 5764 #endif 5765 5766 // Handle the trivial case. 5767 if (From == To) 5768 return; 5769 5770 // Iterate over just the existing users of From. See the comments in 5771 // the ReplaceAllUsesWith above. 5772 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5773 RAUWUpdateListener Listener(*this, UI, UE); 5774 while (UI != UE) { 5775 SDNode *User = *UI; 5776 5777 // This node is about to morph, remove its old self from the CSE maps. 5778 RemoveNodeFromCSEMaps(User); 5779 5780 // A user can appear in a use list multiple times, and when this 5781 // happens the uses are usually next to each other in the list. 5782 // To help reduce the number of CSE recomputations, process all 5783 // the uses of this user that we can find this way. 5784 do { 5785 SDUse &Use = UI.getUse(); 5786 ++UI; 5787 Use.setNode(To); 5788 } while (UI != UE && *UI == User); 5789 5790 // Now that we have modified User, add it back to the CSE maps. If it 5791 // already exists there, recursively merge the results together. 5792 AddModifiedNodeToCSEMaps(User); 5793 } 5794 5795 // If we just RAUW'd the root, take note. 5796 if (From == getRoot().getNode()) 5797 setRoot(SDValue(To, getRoot().getResNo())); 5798 } 5799 5800 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5801 /// This can cause recursive merging of nodes in the DAG. 5802 /// 5803 /// This version can replace From with any result values. To must match the 5804 /// number and types of values returned by From. 5805 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 5806 if (From->getNumValues() == 1) // Handle the simple case efficiently. 5807 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 5808 5809 // Iterate over just the existing users of From. See the comments in 5810 // the ReplaceAllUsesWith above. 5811 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5812 RAUWUpdateListener Listener(*this, UI, UE); 5813 while (UI != UE) { 5814 SDNode *User = *UI; 5815 5816 // This node is about to morph, remove its old self from the CSE maps. 5817 RemoveNodeFromCSEMaps(User); 5818 5819 // A user can appear in a use list multiple times, and when this 5820 // happens the uses are usually next to each other in the list. 5821 // To help reduce the number of CSE recomputations, process all 5822 // the uses of this user that we can find this way. 5823 do { 5824 SDUse &Use = UI.getUse(); 5825 const SDValue &ToOp = To[Use.getResNo()]; 5826 ++UI; 5827 Use.set(ToOp); 5828 } while (UI != UE && *UI == User); 5829 5830 // Now that we have modified User, add it back to the CSE maps. If it 5831 // already exists there, recursively merge the results together. 5832 AddModifiedNodeToCSEMaps(User); 5833 } 5834 5835 // If we just RAUW'd the root, take note. 5836 if (From == getRoot().getNode()) 5837 setRoot(SDValue(To[getRoot().getResNo()])); 5838 } 5839 5840 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 5841 /// uses of other values produced by From.getNode() alone. The Deleted 5842 /// vector is handled the same way as for ReplaceAllUsesWith. 5843 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 5844 // Handle the really simple, really trivial case efficiently. 5845 if (From == To) return; 5846 5847 // Handle the simple, trivial, case efficiently. 5848 if (From.getNode()->getNumValues() == 1) { 5849 ReplaceAllUsesWith(From, To); 5850 return; 5851 } 5852 5853 // Iterate over just the existing users of From. See the comments in 5854 // the ReplaceAllUsesWith above. 5855 SDNode::use_iterator UI = From.getNode()->use_begin(), 5856 UE = From.getNode()->use_end(); 5857 RAUWUpdateListener Listener(*this, UI, UE); 5858 while (UI != UE) { 5859 SDNode *User = *UI; 5860 bool UserRemovedFromCSEMaps = false; 5861 5862 // A user can appear in a use list multiple times, and when this 5863 // happens the uses are usually next to each other in the list. 5864 // To help reduce the number of CSE recomputations, process all 5865 // the uses of this user that we can find this way. 5866 do { 5867 SDUse &Use = UI.getUse(); 5868 5869 // Skip uses of different values from the same node. 5870 if (Use.getResNo() != From.getResNo()) { 5871 ++UI; 5872 continue; 5873 } 5874 5875 // If this node hasn't been modified yet, it's still in the CSE maps, 5876 // so remove its old self from the CSE maps. 5877 if (!UserRemovedFromCSEMaps) { 5878 RemoveNodeFromCSEMaps(User); 5879 UserRemovedFromCSEMaps = true; 5880 } 5881 5882 ++UI; 5883 Use.set(To); 5884 } while (UI != UE && *UI == User); 5885 5886 // We are iterating over all uses of the From node, so if a use 5887 // doesn't use the specific value, no changes are made. 5888 if (!UserRemovedFromCSEMaps) 5889 continue; 5890 5891 // Now that we have modified User, add it back to the CSE maps. If it 5892 // already exists there, recursively merge the results together. 5893 AddModifiedNodeToCSEMaps(User); 5894 } 5895 5896 // If we just RAUW'd the root, take note. 5897 if (From == getRoot()) 5898 setRoot(To); 5899 } 5900 5901 namespace { 5902 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 5903 /// to record information about a use. 5904 struct UseMemo { 5905 SDNode *User; 5906 unsigned Index; 5907 SDUse *Use; 5908 }; 5909 5910 /// operator< - Sort Memos by User. 5911 bool operator<(const UseMemo &L, const UseMemo &R) { 5912 return (intptr_t)L.User < (intptr_t)R.User; 5913 } 5914 } 5915 5916 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 5917 /// uses of other values produced by From.getNode() alone. The same value 5918 /// may appear in both the From and To list. The Deleted vector is 5919 /// handled the same way as for ReplaceAllUsesWith. 5920 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 5921 const SDValue *To, 5922 unsigned Num){ 5923 // Handle the simple, trivial case efficiently. 5924 if (Num == 1) 5925 return ReplaceAllUsesOfValueWith(*From, *To); 5926 5927 // Read up all the uses and make records of them. This helps 5928 // processing new uses that are introduced during the 5929 // replacement process. 5930 SmallVector<UseMemo, 4> Uses; 5931 for (unsigned i = 0; i != Num; ++i) { 5932 unsigned FromResNo = From[i].getResNo(); 5933 SDNode *FromNode = From[i].getNode(); 5934 for (SDNode::use_iterator UI = FromNode->use_begin(), 5935 E = FromNode->use_end(); UI != E; ++UI) { 5936 SDUse &Use = UI.getUse(); 5937 if (Use.getResNo() == FromResNo) { 5938 UseMemo Memo = { *UI, i, &Use }; 5939 Uses.push_back(Memo); 5940 } 5941 } 5942 } 5943 5944 // Sort the uses, so that all the uses from a given User are together. 5945 std::sort(Uses.begin(), Uses.end()); 5946 5947 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 5948 UseIndex != UseIndexEnd; ) { 5949 // We know that this user uses some value of From. If it is the right 5950 // value, update it. 5951 SDNode *User = Uses[UseIndex].User; 5952 5953 // This node is about to morph, remove its old self from the CSE maps. 5954 RemoveNodeFromCSEMaps(User); 5955 5956 // The Uses array is sorted, so all the uses for a given User 5957 // are next to each other in the list. 5958 // To help reduce the number of CSE recomputations, process all 5959 // the uses of this user that we can find this way. 5960 do { 5961 unsigned i = Uses[UseIndex].Index; 5962 SDUse &Use = *Uses[UseIndex].Use; 5963 ++UseIndex; 5964 5965 Use.set(To[i]); 5966 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 5967 5968 // Now that we have modified User, add it back to the CSE maps. If it 5969 // already exists there, recursively merge the results together. 5970 AddModifiedNodeToCSEMaps(User); 5971 } 5972 } 5973 5974 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 5975 /// based on their topological order. It returns the maximum id and a vector 5976 /// of the SDNodes* in assigned order by reference. 5977 unsigned SelectionDAG::AssignTopologicalOrder() { 5978 5979 unsigned DAGSize = 0; 5980 5981 // SortedPos tracks the progress of the algorithm. Nodes before it are 5982 // sorted, nodes after it are unsorted. When the algorithm completes 5983 // it is at the end of the list. 5984 allnodes_iterator SortedPos = allnodes_begin(); 5985 5986 // Visit all the nodes. Move nodes with no operands to the front of 5987 // the list immediately. Annotate nodes that do have operands with their 5988 // operand count. Before we do this, the Node Id fields of the nodes 5989 // may contain arbitrary values. After, the Node Id fields for nodes 5990 // before SortedPos will contain the topological sort index, and the 5991 // Node Id fields for nodes At SortedPos and after will contain the 5992 // count of outstanding operands. 5993 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 5994 SDNode *N = I++; 5995 checkForCycles(N); 5996 unsigned Degree = N->getNumOperands(); 5997 if (Degree == 0) { 5998 // A node with no uses, add it to the result array immediately. 5999 N->setNodeId(DAGSize++); 6000 allnodes_iterator Q = N; 6001 if (Q != SortedPos) 6002 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 6003 assert(SortedPos != AllNodes.end() && "Overran node list"); 6004 ++SortedPos; 6005 } else { 6006 // Temporarily use the Node Id as scratch space for the degree count. 6007 N->setNodeId(Degree); 6008 } 6009 } 6010 6011 // Visit all the nodes. As we iterate, move nodes into sorted order, 6012 // such that by the time the end is reached all nodes will be sorted. 6013 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) { 6014 SDNode *N = I; 6015 checkForCycles(N); 6016 // N is in sorted position, so all its uses have one less operand 6017 // that needs to be sorted. 6018 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6019 UI != UE; ++UI) { 6020 SDNode *P = *UI; 6021 unsigned Degree = P->getNodeId(); 6022 assert(Degree != 0 && "Invalid node degree"); 6023 --Degree; 6024 if (Degree == 0) { 6025 // All of P's operands are sorted, so P may sorted now. 6026 P->setNodeId(DAGSize++); 6027 if (P != SortedPos) 6028 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 6029 assert(SortedPos != AllNodes.end() && "Overran node list"); 6030 ++SortedPos; 6031 } else { 6032 // Update P's outstanding operand count. 6033 P->setNodeId(Degree); 6034 } 6035 } 6036 if (I == SortedPos) { 6037 #ifndef NDEBUG 6038 SDNode *S = ++I; 6039 dbgs() << "Overran sorted position:\n"; 6040 S->dumprFull(); 6041 #endif 6042 llvm_unreachable(0); 6043 } 6044 } 6045 6046 assert(SortedPos == AllNodes.end() && 6047 "Topological sort incomplete!"); 6048 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 6049 "First node in topological sort is not the entry token!"); 6050 assert(AllNodes.front().getNodeId() == 0 && 6051 "First node in topological sort has non-zero id!"); 6052 assert(AllNodes.front().getNumOperands() == 0 && 6053 "First node in topological sort has operands!"); 6054 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 6055 "Last node in topologic sort has unexpected id!"); 6056 assert(AllNodes.back().use_empty() && 6057 "Last node in topologic sort has users!"); 6058 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 6059 return DAGSize; 6060 } 6061 6062 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 6063 /// value is produced by SD. 6064 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 6065 DbgInfo->add(DB, SD, isParameter); 6066 if (SD) 6067 SD->setHasDebugValue(true); 6068 } 6069 6070 /// TransferDbgValues - Transfer SDDbgValues. 6071 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 6072 if (From == To || !From.getNode()->getHasDebugValue()) 6073 return; 6074 SDNode *FromNode = From.getNode(); 6075 SDNode *ToNode = To.getNode(); 6076 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 6077 SmallVector<SDDbgValue *, 2> ClonedDVs; 6078 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 6079 I != E; ++I) { 6080 SDDbgValue *Dbg = *I; 6081 if (Dbg->getKind() == SDDbgValue::SDNODE) { 6082 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(), 6083 Dbg->getOffset(), Dbg->getDebugLoc(), 6084 Dbg->getOrder()); 6085 ClonedDVs.push_back(Clone); 6086 } 6087 } 6088 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(), 6089 E = ClonedDVs.end(); I != E; ++I) 6090 AddDbgValue(*I, ToNode, false); 6091 } 6092 6093 //===----------------------------------------------------------------------===// 6094 // SDNode Class 6095 //===----------------------------------------------------------------------===// 6096 6097 HandleSDNode::~HandleSDNode() { 6098 DropOperands(); 6099 } 6100 6101 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 6102 DebugLoc DL, const GlobalValue *GA, 6103 EVT VT, int64_t o, unsigned char TF) 6104 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 6105 TheGlobal = GA; 6106 } 6107 6108 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, 6109 SDValue X, unsigned SrcAS, 6110 unsigned DestAS) 6111 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X), 6112 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 6113 6114 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6115 EVT memvt, MachineMemOperand *mmo) 6116 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 6117 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6118 MMO->isNonTemporal(), MMO->isInvariant()); 6119 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6120 assert(isNonTemporal() == MMO->isNonTemporal() && 6121 "Non-temporal encoding error!"); 6122 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 6123 } 6124 6125 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6126 const SDValue *Ops, unsigned NumOps, EVT memvt, 6127 MachineMemOperand *mmo) 6128 : SDNode(Opc, Order, dl, VTs, Ops, NumOps), 6129 MemoryVT(memvt), MMO(mmo) { 6130 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6131 MMO->isNonTemporal(), MMO->isInvariant()); 6132 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6133 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 6134 } 6135 6136 /// Profile - Gather unique data for the node. 6137 /// 6138 void SDNode::Profile(FoldingSetNodeID &ID) const { 6139 AddNodeIDNode(ID, this); 6140 } 6141 6142 namespace { 6143 struct EVTArray { 6144 std::vector<EVT> VTs; 6145 6146 EVTArray() { 6147 VTs.reserve(MVT::LAST_VALUETYPE); 6148 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 6149 VTs.push_back(MVT((MVT::SimpleValueType)i)); 6150 } 6151 }; 6152 } 6153 6154 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 6155 static ManagedStatic<EVTArray> SimpleVTArray; 6156 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 6157 6158 /// getValueTypeList - Return a pointer to the specified value type. 6159 /// 6160 const EVT *SDNode::getValueTypeList(EVT VT) { 6161 if (VT.isExtended()) { 6162 sys::SmartScopedLock<true> Lock(*VTMutex); 6163 return &(*EVTs->insert(VT).first); 6164 } else { 6165 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 6166 "Value type out of range!"); 6167 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 6168 } 6169 } 6170 6171 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 6172 /// indicated value. This method ignores uses of other values defined by this 6173 /// operation. 6174 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 6175 assert(Value < getNumValues() && "Bad value!"); 6176 6177 // TODO: Only iterate over uses of a given value of the node 6178 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 6179 if (UI.getUse().getResNo() == Value) { 6180 if (NUses == 0) 6181 return false; 6182 --NUses; 6183 } 6184 } 6185 6186 // Found exactly the right number of uses? 6187 return NUses == 0; 6188 } 6189 6190 6191 /// hasAnyUseOfValue - Return true if there are any use of the indicated 6192 /// value. This method ignores uses of other values defined by this operation. 6193 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 6194 assert(Value < getNumValues() && "Bad value!"); 6195 6196 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 6197 if (UI.getUse().getResNo() == Value) 6198 return true; 6199 6200 return false; 6201 } 6202 6203 6204 /// isOnlyUserOf - Return true if this node is the only use of N. 6205 /// 6206 bool SDNode::isOnlyUserOf(SDNode *N) const { 6207 bool Seen = false; 6208 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 6209 SDNode *User = *I; 6210 if (User == this) 6211 Seen = true; 6212 else 6213 return false; 6214 } 6215 6216 return Seen; 6217 } 6218 6219 /// isOperand - Return true if this node is an operand of N. 6220 /// 6221 bool SDValue::isOperandOf(SDNode *N) const { 6222 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6223 if (*this == N->getOperand(i)) 6224 return true; 6225 return false; 6226 } 6227 6228 bool SDNode::isOperandOf(SDNode *N) const { 6229 for (unsigned i = 0, e = N->NumOperands; i != e; ++i) 6230 if (this == N->OperandList[i].getNode()) 6231 return true; 6232 return false; 6233 } 6234 6235 /// reachesChainWithoutSideEffects - Return true if this operand (which must 6236 /// be a chain) reaches the specified operand without crossing any 6237 /// side-effecting instructions on any chain path. In practice, this looks 6238 /// through token factors and non-volatile loads. In order to remain efficient, 6239 /// this only looks a couple of nodes in, it does not do an exhaustive search. 6240 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 6241 unsigned Depth) const { 6242 if (*this == Dest) return true; 6243 6244 // Don't search too deeply, we just want to be able to see through 6245 // TokenFactor's etc. 6246 if (Depth == 0) return false; 6247 6248 // If this is a token factor, all inputs to the TF happen in parallel. If any 6249 // of the operands of the TF does not reach dest, then we cannot do the xform. 6250 if (getOpcode() == ISD::TokenFactor) { 6251 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 6252 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 6253 return false; 6254 return true; 6255 } 6256 6257 // Loads don't have side effects, look through them. 6258 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 6259 if (!Ld->isVolatile()) 6260 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 6261 } 6262 return false; 6263 } 6264 6265 /// hasPredecessor - Return true if N is a predecessor of this node. 6266 /// N is either an operand of this node, or can be reached by recursively 6267 /// traversing up the operands. 6268 /// NOTE: This is an expensive method. Use it carefully. 6269 bool SDNode::hasPredecessor(const SDNode *N) const { 6270 SmallPtrSet<const SDNode *, 32> Visited; 6271 SmallVector<const SDNode *, 16> Worklist; 6272 return hasPredecessorHelper(N, Visited, Worklist); 6273 } 6274 6275 bool 6276 SDNode::hasPredecessorHelper(const SDNode *N, 6277 SmallPtrSet<const SDNode *, 32> &Visited, 6278 SmallVectorImpl<const SDNode *> &Worklist) const { 6279 if (Visited.empty()) { 6280 Worklist.push_back(this); 6281 } else { 6282 // Take a look in the visited set. If we've already encountered this node 6283 // we needn't search further. 6284 if (Visited.count(N)) 6285 return true; 6286 } 6287 6288 // Haven't visited N yet. Continue the search. 6289 while (!Worklist.empty()) { 6290 const SDNode *M = Worklist.pop_back_val(); 6291 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 6292 SDNode *Op = M->getOperand(i).getNode(); 6293 if (Visited.insert(Op)) 6294 Worklist.push_back(Op); 6295 if (Op == N) 6296 return true; 6297 } 6298 } 6299 6300 return false; 6301 } 6302 6303 uint64_t SDNode::getConstantOperandVal(unsigned Num) const { 6304 assert(Num < NumOperands && "Invalid child # of SDNode!"); 6305 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue(); 6306 } 6307 6308 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 6309 assert(N->getNumValues() == 1 && 6310 "Can't unroll a vector with multiple results!"); 6311 6312 EVT VT = N->getValueType(0); 6313 unsigned NE = VT.getVectorNumElements(); 6314 EVT EltVT = VT.getVectorElementType(); 6315 SDLoc dl(N); 6316 6317 SmallVector<SDValue, 8> Scalars; 6318 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 6319 6320 // If ResNE is 0, fully unroll the vector op. 6321 if (ResNE == 0) 6322 ResNE = NE; 6323 else if (NE > ResNE) 6324 NE = ResNE; 6325 6326 unsigned i; 6327 for (i= 0; i != NE; ++i) { 6328 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 6329 SDValue Operand = N->getOperand(j); 6330 EVT OperandVT = Operand.getValueType(); 6331 if (OperandVT.isVector()) { 6332 // A vector operand; extract a single element. 6333 const TargetLowering *TLI = TM.getTargetLowering(); 6334 EVT OperandEltVT = OperandVT.getVectorElementType(); 6335 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, 6336 OperandEltVT, 6337 Operand, 6338 getConstant(i, TLI->getVectorIdxTy())); 6339 } else { 6340 // A scalar operand; just use it as is. 6341 Operands[j] = Operand; 6342 } 6343 } 6344 6345 switch (N->getOpcode()) { 6346 default: 6347 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6348 &Operands[0], Operands.size())); 6349 break; 6350 case ISD::VSELECT: 6351 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, 6352 &Operands[0], Operands.size())); 6353 break; 6354 case ISD::SHL: 6355 case ISD::SRA: 6356 case ISD::SRL: 6357 case ISD::ROTL: 6358 case ISD::ROTR: 6359 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 6360 getShiftAmountOperand(Operands[0].getValueType(), 6361 Operands[1]))); 6362 break; 6363 case ISD::SIGN_EXTEND_INREG: 6364 case ISD::FP_ROUND_INREG: { 6365 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 6366 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6367 Operands[0], 6368 getValueType(ExtVT))); 6369 } 6370 } 6371 } 6372 6373 for (; i < ResNE; ++i) 6374 Scalars.push_back(getUNDEF(EltVT)); 6375 6376 return getNode(ISD::BUILD_VECTOR, dl, 6377 EVT::getVectorVT(*getContext(), EltVT, ResNE), 6378 &Scalars[0], Scalars.size()); 6379 } 6380 6381 6382 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 6383 /// location that is 'Dist' units away from the location that the 'Base' load 6384 /// is loading from. 6385 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 6386 unsigned Bytes, int Dist) const { 6387 if (LD->getChain() != Base->getChain()) 6388 return false; 6389 EVT VT = LD->getValueType(0); 6390 if (VT.getSizeInBits() / 8 != Bytes) 6391 return false; 6392 6393 SDValue Loc = LD->getOperand(1); 6394 SDValue BaseLoc = Base->getOperand(1); 6395 if (Loc.getOpcode() == ISD::FrameIndex) { 6396 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6397 return false; 6398 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo(); 6399 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6400 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6401 int FS = MFI->getObjectSize(FI); 6402 int BFS = MFI->getObjectSize(BFI); 6403 if (FS != BFS || FS != (int)Bytes) return false; 6404 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6405 } 6406 6407 // Handle X+C 6408 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6409 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6410 return true; 6411 6412 const GlobalValue *GV1 = NULL; 6413 const GlobalValue *GV2 = NULL; 6414 int64_t Offset1 = 0; 6415 int64_t Offset2 = 0; 6416 const TargetLowering *TLI = TM.getTargetLowering(); 6417 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6418 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6419 if (isGA1 && isGA2 && GV1 == GV2) 6420 return Offset1 == (Offset2 + Dist*Bytes); 6421 return false; 6422 } 6423 6424 6425 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 6426 /// it cannot be inferred. 6427 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 6428 // If this is a GlobalAddress + cst, return the alignment. 6429 const GlobalValue *GV; 6430 int64_t GVOffset = 0; 6431 const TargetLowering *TLI = TM.getTargetLowering(); 6432 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 6433 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType()); 6434 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 6435 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne, 6436 TLI->getDataLayout()); 6437 unsigned AlignBits = KnownZero.countTrailingOnes(); 6438 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 6439 if (Align) 6440 return MinAlign(Align, GVOffset); 6441 } 6442 6443 // If this is a direct reference to a stack slot, use information about the 6444 // stack slot's alignment. 6445 int FrameIdx = 1 << 31; 6446 int64_t FrameOffset = 0; 6447 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 6448 FrameIdx = FI->getIndex(); 6449 } else if (isBaseWithConstantOffset(Ptr) && 6450 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 6451 // Handle FI+Cst 6452 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6453 FrameOffset = Ptr.getConstantOperandVal(1); 6454 } 6455 6456 if (FrameIdx != (1 << 31)) { 6457 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo(); 6458 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 6459 FrameOffset); 6460 return FIInfoAlign; 6461 } 6462 6463 return 0; 6464 } 6465 6466 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 6467 /// which is split (or expanded) into two not necessarily identical pieces. 6468 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 6469 // Currently all types are split in half. 6470 EVT LoVT, HiVT; 6471 if (!VT.isVector()) { 6472 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 6473 } else { 6474 unsigned NumElements = VT.getVectorNumElements(); 6475 assert(!(NumElements & 1) && "Splitting vector, but not in half!"); 6476 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 6477 NumElements/2); 6478 } 6479 return std::make_pair(LoVT, HiVT); 6480 } 6481 6482 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 6483 /// low/high part. 6484 std::pair<SDValue, SDValue> 6485 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 6486 const EVT &HiVT) { 6487 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 6488 N.getValueType().getVectorNumElements() && 6489 "More vector elements requested than available!"); 6490 SDValue Lo, Hi; 6491 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 6492 getConstant(0, TLI->getVectorIdxTy())); 6493 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 6494 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy())); 6495 return std::make_pair(Lo, Hi); 6496 } 6497 6498 // getAddressSpace - Return the address space this GlobalAddress belongs to. 6499 unsigned GlobalAddressSDNode::getAddressSpace() const { 6500 return getGlobal()->getType()->getAddressSpace(); 6501 } 6502 6503 6504 Type *ConstantPoolSDNode::getType() const { 6505 if (isMachineConstantPoolEntry()) 6506 return Val.MachineCPVal->getType(); 6507 return Val.ConstVal->getType(); 6508 } 6509 6510 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 6511 APInt &SplatUndef, 6512 unsigned &SplatBitSize, 6513 bool &HasAnyUndefs, 6514 unsigned MinSplatBits, 6515 bool isBigEndian) const { 6516 EVT VT = getValueType(0); 6517 assert(VT.isVector() && "Expected a vector type"); 6518 unsigned sz = VT.getSizeInBits(); 6519 if (MinSplatBits > sz) 6520 return false; 6521 6522 SplatValue = APInt(sz, 0); 6523 SplatUndef = APInt(sz, 0); 6524 6525 // Get the bits. Bits with undefined values (when the corresponding element 6526 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 6527 // in SplatValue. If any of the values are not constant, give up and return 6528 // false. 6529 unsigned int nOps = getNumOperands(); 6530 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 6531 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); 6532 6533 for (unsigned j = 0; j < nOps; ++j) { 6534 unsigned i = isBigEndian ? nOps-1-j : j; 6535 SDValue OpVal = getOperand(i); 6536 unsigned BitPos = j * EltBitSize; 6537 6538 if (OpVal.getOpcode() == ISD::UNDEF) 6539 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 6540 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 6541 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 6542 zextOrTrunc(sz) << BitPos; 6543 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 6544 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 6545 else 6546 return false; 6547 } 6548 6549 // The build_vector is all constants or undefs. Find the smallest element 6550 // size that splats the vector. 6551 6552 HasAnyUndefs = (SplatUndef != 0); 6553 while (sz > 8) { 6554 6555 unsigned HalfSize = sz / 2; 6556 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 6557 APInt LowValue = SplatValue.trunc(HalfSize); 6558 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 6559 APInt LowUndef = SplatUndef.trunc(HalfSize); 6560 6561 // If the two halves do not match (ignoring undef bits), stop here. 6562 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 6563 MinSplatBits > HalfSize) 6564 break; 6565 6566 SplatValue = HighValue | LowValue; 6567 SplatUndef = HighUndef & LowUndef; 6568 6569 sz = HalfSize; 6570 } 6571 6572 SplatBitSize = sz; 6573 return true; 6574 } 6575 6576 ConstantSDNode *BuildVectorSDNode::isConstantSplat() const { 6577 SDValue Op0 = getOperand(0); 6578 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 6579 SDValue Opi = getOperand(i); 6580 unsigned Opc = Opi.getOpcode(); 6581 if ((Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) || 6582 Opi != Op0) 6583 return nullptr; 6584 } 6585 6586 return cast<ConstantSDNode>(Op0); 6587 } 6588 6589 bool BuildVectorSDNode::isConstant() const { 6590 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 6591 unsigned Opc = getOperand(i).getOpcode(); 6592 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 6593 return false; 6594 } 6595 return true; 6596 } 6597 6598 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 6599 // Find the first non-undef value in the shuffle mask. 6600 unsigned i, e; 6601 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 6602 /* search */; 6603 6604 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 6605 6606 // Make sure all remaining elements are either undef or the same as the first 6607 // non-undef value. 6608 for (int Idx = Mask[i]; i != e; ++i) 6609 if (Mask[i] >= 0 && Mask[i] != Idx) 6610 return false; 6611 return true; 6612 } 6613 6614 #ifdef XDEBUG 6615 static void checkForCyclesHelper(const SDNode *N, 6616 SmallPtrSet<const SDNode*, 32> &Visited, 6617 SmallPtrSet<const SDNode*, 32> &Checked) { 6618 // If this node has already been checked, don't check it again. 6619 if (Checked.count(N)) 6620 return; 6621 6622 // If a node has already been visited on this depth-first walk, reject it as 6623 // a cycle. 6624 if (!Visited.insert(N)) { 6625 dbgs() << "Offending node:\n"; 6626 N->dumprFull(); 6627 errs() << "Detected cycle in SelectionDAG\n"; 6628 abort(); 6629 } 6630 6631 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6632 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked); 6633 6634 Checked.insert(N); 6635 Visited.erase(N); 6636 } 6637 #endif 6638 6639 void llvm::checkForCycles(const llvm::SDNode *N) { 6640 #ifdef XDEBUG 6641 assert(N && "Checking nonexistent SDNode"); 6642 SmallPtrSet<const SDNode*, 32> visited; 6643 SmallPtrSet<const SDNode*, 32> checked; 6644 checkForCyclesHelper(N, visited, checked); 6645 #endif 6646 } 6647 6648 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) { 6649 checkForCycles(DAG->getRoot().getNode()); 6650 } 6651