1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeOrdering.h" 16 #include "SDNodeDbgValue.h" 17 #include "llvm/CallingConv.h" 18 #include "llvm/Constants.h" 19 #include "llvm/DebugInfo.h" 20 #include "llvm/DerivedTypes.h" 21 #include "llvm/Function.h" 22 #include "llvm/GlobalAlias.h" 23 #include "llvm/GlobalVariable.h" 24 #include "llvm/Intrinsics.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/Assembly/Writer.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineConstantPool.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineModuleInfo.h" 31 #include "llvm/Target/TargetRegisterInfo.h" 32 #include "llvm/Target/TargetData.h" 33 #include "llvm/Target/TargetLowering.h" 34 #include "llvm/Target/TargetSelectionDAGInfo.h" 35 #include "llvm/Target/TargetOptions.h" 36 #include "llvm/Target/TargetInstrInfo.h" 37 #include "llvm/Target/TargetIntrinsicInfo.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/ManagedStatic.h" 43 #include "llvm/Support/MathExtras.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Support/Mutex.h" 46 #include "llvm/ADT/SetVector.h" 47 #include "llvm/ADT/SmallPtrSet.h" 48 #include "llvm/ADT/SmallSet.h" 49 #include "llvm/ADT/SmallVector.h" 50 #include "llvm/ADT/StringExtras.h" 51 #include <algorithm> 52 #include <cmath> 53 using namespace llvm; 54 55 /// makeVTList - Return an instance of the SDVTList struct initialized with the 56 /// specified members. 57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 58 SDVTList Res = {VTs, NumVTs}; 59 return Res; 60 } 61 62 static const fltSemantics *EVTToAPFloatSemantics(EVT VT) { 63 switch (VT.getSimpleVT().SimpleTy) { 64 default: llvm_unreachable("Unknown FP format"); 65 case MVT::f16: return &APFloat::IEEEhalf; 66 case MVT::f32: return &APFloat::IEEEsingle; 67 case MVT::f64: return &APFloat::IEEEdouble; 68 case MVT::f80: return &APFloat::x87DoubleExtended; 69 case MVT::f128: return &APFloat::IEEEquad; 70 case MVT::ppcf128: return &APFloat::PPCDoubleDouble; 71 } 72 } 73 74 // Default null implementations of the callbacks. 75 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 76 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 77 78 //===----------------------------------------------------------------------===// 79 // ConstantFPSDNode Class 80 //===----------------------------------------------------------------------===// 81 82 /// isExactlyValue - We don't rely on operator== working on double values, as 83 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 84 /// As such, this method can be used to do an exact bit-for-bit comparison of 85 /// two floating point values. 86 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 87 return getValueAPF().bitwiseIsEqual(V); 88 } 89 90 bool ConstantFPSDNode::isValueValidForType(EVT VT, 91 const APFloat& Val) { 92 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 93 94 // PPC long double cannot be converted to any other type. 95 if (VT == MVT::ppcf128 || 96 &Val.getSemantics() == &APFloat::PPCDoubleDouble) 97 return false; 98 99 // convert modifies in place, so make a copy. 100 APFloat Val2 = APFloat(Val); 101 bool losesInfo; 102 (void) Val2.convert(*EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 103 &losesInfo); 104 return !losesInfo; 105 } 106 107 //===----------------------------------------------------------------------===// 108 // ISD Namespace 109 //===----------------------------------------------------------------------===// 110 111 /// isBuildVectorAllOnes - Return true if the specified node is a 112 /// BUILD_VECTOR where all of the elements are ~0 or undef. 113 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 114 // Look through a bit convert. 115 if (N->getOpcode() == ISD::BITCAST) 116 N = N->getOperand(0).getNode(); 117 118 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 119 120 unsigned i = 0, e = N->getNumOperands(); 121 122 // Skip over all of the undef values. 123 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 124 ++i; 125 126 // Do not accept an all-undef vector. 127 if (i == e) return false; 128 129 // Do not accept build_vectors that aren't all constants or which have non-~0 130 // elements. We have to be a bit careful here, as the type of the constant 131 // may not be the same as the type of the vector elements due to type 132 // legalization (the elements are promoted to a legal type for the target and 133 // a vector of a type may be legal when the base element type is not). 134 // We only want to check enough bits to cover the vector elements, because 135 // we care if the resultant vector is all ones, not whether the individual 136 // constants are. 137 SDValue NotZero = N->getOperand(i); 138 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 139 if (isa<ConstantSDNode>(NotZero)) { 140 if (cast<ConstantSDNode>(NotZero)->getAPIntValue().countTrailingOnes() < 141 EltSize) 142 return false; 143 } else if (isa<ConstantFPSDNode>(NotZero)) { 144 if (cast<ConstantFPSDNode>(NotZero)->getValueAPF() 145 .bitcastToAPInt().countTrailingOnes() < EltSize) 146 return false; 147 } else 148 return false; 149 150 // Okay, we have at least one ~0 value, check to see if the rest match or are 151 // undefs. Even with the above element type twiddling, this should be OK, as 152 // the same type legalization should have applied to all the elements. 153 for (++i; i != e; ++i) 154 if (N->getOperand(i) != NotZero && 155 N->getOperand(i).getOpcode() != ISD::UNDEF) 156 return false; 157 return true; 158 } 159 160 161 /// isBuildVectorAllZeros - Return true if the specified node is a 162 /// BUILD_VECTOR where all of the elements are 0 or undef. 163 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 164 // Look through a bit convert. 165 if (N->getOpcode() == ISD::BITCAST) 166 N = N->getOperand(0).getNode(); 167 168 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 169 170 unsigned i = 0, e = N->getNumOperands(); 171 172 // Skip over all of the undef values. 173 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 174 ++i; 175 176 // Do not accept an all-undef vector. 177 if (i == e) return false; 178 179 // Do not accept build_vectors that aren't all constants or which have non-0 180 // elements. 181 SDValue Zero = N->getOperand(i); 182 if (isa<ConstantSDNode>(Zero)) { 183 if (!cast<ConstantSDNode>(Zero)->isNullValue()) 184 return false; 185 } else if (isa<ConstantFPSDNode>(Zero)) { 186 if (!cast<ConstantFPSDNode>(Zero)->getValueAPF().isPosZero()) 187 return false; 188 } else 189 return false; 190 191 // Okay, we have at least one 0 value, check to see if the rest match or are 192 // undefs. 193 for (++i; i != e; ++i) 194 if (N->getOperand(i) != Zero && 195 N->getOperand(i).getOpcode() != ISD::UNDEF) 196 return false; 197 return true; 198 } 199 200 /// isScalarToVector - Return true if the specified node is a 201 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low 202 /// element is not an undef. 203 bool ISD::isScalarToVector(const SDNode *N) { 204 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) 205 return true; 206 207 if (N->getOpcode() != ISD::BUILD_VECTOR) 208 return false; 209 if (N->getOperand(0).getOpcode() == ISD::UNDEF) 210 return false; 211 unsigned NumElems = N->getNumOperands(); 212 if (NumElems == 1) 213 return false; 214 for (unsigned i = 1; i < NumElems; ++i) { 215 SDValue V = N->getOperand(i); 216 if (V.getOpcode() != ISD::UNDEF) 217 return false; 218 } 219 return true; 220 } 221 222 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) 223 /// when given the operation for (X op Y). 224 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 225 // To perform this operation, we just need to swap the L and G bits of the 226 // operation. 227 unsigned OldL = (Operation >> 2) & 1; 228 unsigned OldG = (Operation >> 1) & 1; 229 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 230 (OldL << 1) | // New G bit 231 (OldG << 2)); // New L bit. 232 } 233 234 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where 235 /// 'op' is a valid SetCC operation. 236 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 237 unsigned Operation = Op; 238 if (isInteger) 239 Operation ^= 7; // Flip L, G, E bits, but not U. 240 else 241 Operation ^= 15; // Flip all of the condition bits. 242 243 if (Operation > ISD::SETTRUE2) 244 Operation &= ~8; // Don't let N and U bits get set. 245 246 return ISD::CondCode(Operation); 247 } 248 249 250 /// isSignedOp - For an integer comparison, return 1 if the comparison is a 251 /// signed operation and 2 if the result is an unsigned comparison. Return zero 252 /// if the operation does not depend on the sign of the input (setne and seteq). 253 static int isSignedOp(ISD::CondCode Opcode) { 254 switch (Opcode) { 255 default: llvm_unreachable("Illegal integer setcc operation!"); 256 case ISD::SETEQ: 257 case ISD::SETNE: return 0; 258 case ISD::SETLT: 259 case ISD::SETLE: 260 case ISD::SETGT: 261 case ISD::SETGE: return 1; 262 case ISD::SETULT: 263 case ISD::SETULE: 264 case ISD::SETUGT: 265 case ISD::SETUGE: return 2; 266 } 267 } 268 269 /// getSetCCOrOperation - Return the result of a logical OR between different 270 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function 271 /// returns SETCC_INVALID if it is not possible to represent the resultant 272 /// comparison. 273 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 274 bool isInteger) { 275 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 276 // Cannot fold a signed integer setcc with an unsigned integer setcc. 277 return ISD::SETCC_INVALID; 278 279 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 280 281 // If the N and U bits get set then the resultant comparison DOES suddenly 282 // care about orderedness, and is true when ordered. 283 if (Op > ISD::SETTRUE2) 284 Op &= ~16; // Clear the U bit if the N bit is set. 285 286 // Canonicalize illegal integer setcc's. 287 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 288 Op = ISD::SETNE; 289 290 return ISD::CondCode(Op); 291 } 292 293 /// getSetCCAndOperation - Return the result of a logical AND between different 294 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This 295 /// function returns zero if it is not possible to represent the resultant 296 /// comparison. 297 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 298 bool isInteger) { 299 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 300 // Cannot fold a signed setcc with an unsigned setcc. 301 return ISD::SETCC_INVALID; 302 303 // Combine all of the condition bits. 304 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 305 306 // Canonicalize illegal integer setcc's. 307 if (isInteger) { 308 switch (Result) { 309 default: break; 310 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 311 case ISD::SETOEQ: // SETEQ & SETU[LG]E 312 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 313 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 314 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 315 } 316 } 317 318 return Result; 319 } 320 321 //===----------------------------------------------------------------------===// 322 // SDNode Profile Support 323 //===----------------------------------------------------------------------===// 324 325 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 326 /// 327 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 328 ID.AddInteger(OpC); 329 } 330 331 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 332 /// solely with their pointer. 333 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 334 ID.AddPointer(VTList.VTs); 335 } 336 337 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 338 /// 339 static void AddNodeIDOperands(FoldingSetNodeID &ID, 340 const SDValue *Ops, unsigned NumOps) { 341 for (; NumOps; --NumOps, ++Ops) { 342 ID.AddPointer(Ops->getNode()); 343 ID.AddInteger(Ops->getResNo()); 344 } 345 } 346 347 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 348 /// 349 static void AddNodeIDOperands(FoldingSetNodeID &ID, 350 const SDUse *Ops, unsigned NumOps) { 351 for (; NumOps; --NumOps, ++Ops) { 352 ID.AddPointer(Ops->getNode()); 353 ID.AddInteger(Ops->getResNo()); 354 } 355 } 356 357 static void AddNodeIDNode(FoldingSetNodeID &ID, 358 unsigned short OpC, SDVTList VTList, 359 const SDValue *OpList, unsigned N) { 360 AddNodeIDOpcode(ID, OpC); 361 AddNodeIDValueTypes(ID, VTList); 362 AddNodeIDOperands(ID, OpList, N); 363 } 364 365 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to 366 /// the NodeID data. 367 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 368 switch (N->getOpcode()) { 369 case ISD::TargetExternalSymbol: 370 case ISD::ExternalSymbol: 371 llvm_unreachable("Should only be used on nodes with operands"); 372 default: break; // Normal nodes don't need extra info. 373 case ISD::TargetConstant: 374 case ISD::Constant: 375 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue()); 376 break; 377 case ISD::TargetConstantFP: 378 case ISD::ConstantFP: { 379 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 380 break; 381 } 382 case ISD::TargetGlobalAddress: 383 case ISD::GlobalAddress: 384 case ISD::TargetGlobalTLSAddress: 385 case ISD::GlobalTLSAddress: { 386 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 387 ID.AddPointer(GA->getGlobal()); 388 ID.AddInteger(GA->getOffset()); 389 ID.AddInteger(GA->getTargetFlags()); 390 break; 391 } 392 case ISD::BasicBlock: 393 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 394 break; 395 case ISD::Register: 396 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 397 break; 398 case ISD::RegisterMask: 399 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 400 break; 401 case ISD::SRCVALUE: 402 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 403 break; 404 case ISD::FrameIndex: 405 case ISD::TargetFrameIndex: 406 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 407 break; 408 case ISD::JumpTable: 409 case ISD::TargetJumpTable: 410 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 411 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 412 break; 413 case ISD::ConstantPool: 414 case ISD::TargetConstantPool: { 415 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 416 ID.AddInteger(CP->getAlignment()); 417 ID.AddInteger(CP->getOffset()); 418 if (CP->isMachineConstantPoolEntry()) 419 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 420 else 421 ID.AddPointer(CP->getConstVal()); 422 ID.AddInteger(CP->getTargetFlags()); 423 break; 424 } 425 case ISD::LOAD: { 426 const LoadSDNode *LD = cast<LoadSDNode>(N); 427 ID.AddInteger(LD->getMemoryVT().getRawBits()); 428 ID.AddInteger(LD->getRawSubclassData()); 429 break; 430 } 431 case ISD::STORE: { 432 const StoreSDNode *ST = cast<StoreSDNode>(N); 433 ID.AddInteger(ST->getMemoryVT().getRawBits()); 434 ID.AddInteger(ST->getRawSubclassData()); 435 break; 436 } 437 case ISD::ATOMIC_CMP_SWAP: 438 case ISD::ATOMIC_SWAP: 439 case ISD::ATOMIC_LOAD_ADD: 440 case ISD::ATOMIC_LOAD_SUB: 441 case ISD::ATOMIC_LOAD_AND: 442 case ISD::ATOMIC_LOAD_OR: 443 case ISD::ATOMIC_LOAD_XOR: 444 case ISD::ATOMIC_LOAD_NAND: 445 case ISD::ATOMIC_LOAD_MIN: 446 case ISD::ATOMIC_LOAD_MAX: 447 case ISD::ATOMIC_LOAD_UMIN: 448 case ISD::ATOMIC_LOAD_UMAX: 449 case ISD::ATOMIC_LOAD: 450 case ISD::ATOMIC_STORE: { 451 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 452 ID.AddInteger(AT->getMemoryVT().getRawBits()); 453 ID.AddInteger(AT->getRawSubclassData()); 454 break; 455 } 456 case ISD::VECTOR_SHUFFLE: { 457 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 458 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 459 i != e; ++i) 460 ID.AddInteger(SVN->getMaskElt(i)); 461 break; 462 } 463 case ISD::TargetBlockAddress: 464 case ISD::BlockAddress: { 465 ID.AddPointer(cast<BlockAddressSDNode>(N)->getBlockAddress()); 466 ID.AddInteger(cast<BlockAddressSDNode>(N)->getTargetFlags()); 467 break; 468 } 469 } // end switch (N->getOpcode()) 470 } 471 472 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 473 /// data. 474 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 475 AddNodeIDOpcode(ID, N->getOpcode()); 476 // Add the return value info. 477 AddNodeIDValueTypes(ID, N->getVTList()); 478 // Add the operand info. 479 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands()); 480 481 // Handle SDNode leafs with special info. 482 AddNodeIDCustom(ID, N); 483 } 484 485 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in 486 /// the CSE map that carries volatility, temporalness, indexing mode, and 487 /// extension/truncation information. 488 /// 489 static inline unsigned 490 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile, 491 bool isNonTemporal, bool isInvariant) { 492 assert((ConvType & 3) == ConvType && 493 "ConvType may not require more than 2 bits!"); 494 assert((AM & 7) == AM && 495 "AM may not require more than 3 bits!"); 496 return ConvType | 497 (AM << 2) | 498 (isVolatile << 5) | 499 (isNonTemporal << 6) | 500 (isInvariant << 7); 501 } 502 503 //===----------------------------------------------------------------------===// 504 // SelectionDAG Class 505 //===----------------------------------------------------------------------===// 506 507 /// doNotCSE - Return true if CSE should not be performed for this node. 508 static bool doNotCSE(SDNode *N) { 509 if (N->getValueType(0) == MVT::Glue) 510 return true; // Never CSE anything that produces a flag. 511 512 switch (N->getOpcode()) { 513 default: break; 514 case ISD::HANDLENODE: 515 case ISD::EH_LABEL: 516 return true; // Never CSE these nodes. 517 } 518 519 // Check that remaining values produced are not flags. 520 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 521 if (N->getValueType(i) == MVT::Glue) 522 return true; // Never CSE anything that produces a flag. 523 524 return false; 525 } 526 527 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 528 /// SelectionDAG. 529 void SelectionDAG::RemoveDeadNodes() { 530 // Create a dummy node (which is not added to allnodes), that adds a reference 531 // to the root node, preventing it from being deleted. 532 HandleSDNode Dummy(getRoot()); 533 534 SmallVector<SDNode*, 128> DeadNodes; 535 536 // Add all obviously-dead nodes to the DeadNodes worklist. 537 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I) 538 if (I->use_empty()) 539 DeadNodes.push_back(I); 540 541 RemoveDeadNodes(DeadNodes); 542 543 // If the root changed (e.g. it was a dead load, update the root). 544 setRoot(Dummy.getValue()); 545 } 546 547 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 548 /// given list, and any nodes that become unreachable as a result. 549 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 550 551 // Process the worklist, deleting the nodes and adding their uses to the 552 // worklist. 553 while (!DeadNodes.empty()) { 554 SDNode *N = DeadNodes.pop_back_val(); 555 556 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 557 DUL->NodeDeleted(N, 0); 558 559 // Take the node out of the appropriate CSE map. 560 RemoveNodeFromCSEMaps(N); 561 562 // Next, brutally remove the operand list. This is safe to do, as there are 563 // no cycles in the graph. 564 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 565 SDUse &Use = *I++; 566 SDNode *Operand = Use.getNode(); 567 Use.set(SDValue()); 568 569 // Now that we removed this operand, see if there are no uses of it left. 570 if (Operand->use_empty()) 571 DeadNodes.push_back(Operand); 572 } 573 574 DeallocateNode(N); 575 } 576 } 577 578 void SelectionDAG::RemoveDeadNode(SDNode *N){ 579 SmallVector<SDNode*, 16> DeadNodes(1, N); 580 581 // Create a dummy node that adds a reference to the root node, preventing 582 // it from being deleted. (This matters if the root is an operand of the 583 // dead node.) 584 HandleSDNode Dummy(getRoot()); 585 586 RemoveDeadNodes(DeadNodes); 587 } 588 589 void SelectionDAG::DeleteNode(SDNode *N) { 590 // First take this out of the appropriate CSE map. 591 RemoveNodeFromCSEMaps(N); 592 593 // Finally, remove uses due to operands of this node, remove from the 594 // AllNodes list, and delete the node. 595 DeleteNodeNotInCSEMaps(N); 596 } 597 598 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 599 assert(N != AllNodes.begin() && "Cannot delete the entry node!"); 600 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 601 602 // Drop all of the operands and decrement used node's use counts. 603 N->DropOperands(); 604 605 DeallocateNode(N); 606 } 607 608 void SelectionDAG::DeallocateNode(SDNode *N) { 609 if (N->OperandsNeedDelete) 610 delete[] N->OperandList; 611 612 // Set the opcode to DELETED_NODE to help catch bugs when node 613 // memory is reallocated. 614 N->NodeType = ISD::DELETED_NODE; 615 616 NodeAllocator.Deallocate(AllNodes.remove(N)); 617 618 // Remove the ordering of this node. 619 Ordering->remove(N); 620 621 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them. 622 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N); 623 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i) 624 DbgVals[i]->setIsInvalidated(); 625 } 626 627 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 628 /// correspond to it. This is useful when we're about to delete or repurpose 629 /// the node. We don't want future request for structurally identical nodes 630 /// to return N anymore. 631 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 632 bool Erased = false; 633 switch (N->getOpcode()) { 634 case ISD::HANDLENODE: return false; // noop. 635 case ISD::CONDCODE: 636 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 637 "Cond code doesn't exist!"); 638 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0; 639 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0; 640 break; 641 case ISD::ExternalSymbol: 642 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 643 break; 644 case ISD::TargetExternalSymbol: { 645 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 646 Erased = TargetExternalSymbols.erase( 647 std::pair<std::string,unsigned char>(ESN->getSymbol(), 648 ESN->getTargetFlags())); 649 break; 650 } 651 case ISD::VALUETYPE: { 652 EVT VT = cast<VTSDNode>(N)->getVT(); 653 if (VT.isExtended()) { 654 Erased = ExtendedValueTypeNodes.erase(VT); 655 } else { 656 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0; 657 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0; 658 } 659 break; 660 } 661 default: 662 // Remove it from the CSE Map. 663 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 664 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 665 Erased = CSEMap.RemoveNode(N); 666 break; 667 } 668 #ifndef NDEBUG 669 // Verify that the node was actually in one of the CSE maps, unless it has a 670 // flag result (which cannot be CSE'd) or is one of the special cases that are 671 // not subject to CSE. 672 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 673 !N->isMachineOpcode() && !doNotCSE(N)) { 674 N->dump(this); 675 dbgs() << "\n"; 676 llvm_unreachable("Node is not in map!"); 677 } 678 #endif 679 return Erased; 680 } 681 682 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 683 /// maps and modified in place. Add it back to the CSE maps, unless an identical 684 /// node already exists, in which case transfer all its users to the existing 685 /// node. This transfer can potentially trigger recursive merging. 686 /// 687 void 688 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 689 // For node types that aren't CSE'd, just act as if no identical node 690 // already exists. 691 if (!doNotCSE(N)) { 692 SDNode *Existing = CSEMap.GetOrInsertNode(N); 693 if (Existing != N) { 694 // If there was already an existing matching node, use ReplaceAllUsesWith 695 // to replace the dead one with the existing one. This can cause 696 // recursive merging of other unrelated nodes down the line. 697 ReplaceAllUsesWith(N, Existing); 698 699 // N is now dead. Inform the listeners and delete it. 700 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 701 DUL->NodeDeleted(N, Existing); 702 DeleteNodeNotInCSEMaps(N); 703 return; 704 } 705 } 706 707 // If the node doesn't already exist, we updated it. Inform listeners. 708 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 709 DUL->NodeUpdated(N); 710 } 711 712 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 713 /// were replaced with those specified. If this node is never memoized, 714 /// return null, otherwise return a pointer to the slot it would take. If a 715 /// node already exists with these operands, the slot will be non-null. 716 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 717 void *&InsertPos) { 718 if (doNotCSE(N)) 719 return 0; 720 721 SDValue Ops[] = { Op }; 722 FoldingSetNodeID ID; 723 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1); 724 AddNodeIDCustom(ID, N); 725 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 726 return Node; 727 } 728 729 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 730 /// were replaced with those specified. If this node is never memoized, 731 /// return null, otherwise return a pointer to the slot it would take. If a 732 /// node already exists with these operands, the slot will be non-null. 733 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 734 SDValue Op1, SDValue Op2, 735 void *&InsertPos) { 736 if (doNotCSE(N)) 737 return 0; 738 739 SDValue Ops[] = { Op1, Op2 }; 740 FoldingSetNodeID ID; 741 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2); 742 AddNodeIDCustom(ID, N); 743 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 744 return Node; 745 } 746 747 748 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 749 /// were replaced with those specified. If this node is never memoized, 750 /// return null, otherwise return a pointer to the slot it would take. If a 751 /// node already exists with these operands, the slot will be non-null. 752 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 753 const SDValue *Ops,unsigned NumOps, 754 void *&InsertPos) { 755 if (doNotCSE(N)) 756 return 0; 757 758 FoldingSetNodeID ID; 759 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps); 760 AddNodeIDCustom(ID, N); 761 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 762 return Node; 763 } 764 765 #ifndef NDEBUG 766 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid. 767 static void VerifyNodeCommon(SDNode *N) { 768 switch (N->getOpcode()) { 769 default: 770 break; 771 case ISD::BUILD_PAIR: { 772 EVT VT = N->getValueType(0); 773 assert(N->getNumValues() == 1 && "Too many results!"); 774 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 775 "Wrong return type!"); 776 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 777 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 778 "Mismatched operand types!"); 779 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 780 "Wrong operand type!"); 781 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 782 "Wrong return type size"); 783 break; 784 } 785 case ISD::BUILD_VECTOR: { 786 assert(N->getNumValues() == 1 && "Too many results!"); 787 assert(N->getValueType(0).isVector() && "Wrong return type!"); 788 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 789 "Wrong number of operands!"); 790 EVT EltVT = N->getValueType(0).getVectorElementType(); 791 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 792 assert((I->getValueType() == EltVT || 793 (EltVT.isInteger() && I->getValueType().isInteger() && 794 EltVT.bitsLE(I->getValueType()))) && 795 "Wrong operand type!"); 796 assert(I->getValueType() == N->getOperand(0).getValueType() && 797 "Operands must all have the same type"); 798 } 799 break; 800 } 801 } 802 } 803 804 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 805 static void VerifySDNode(SDNode *N) { 806 // The SDNode allocators cannot be used to allocate nodes with fields that are 807 // not present in an SDNode! 808 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!"); 809 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!"); 810 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!"); 811 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!"); 812 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!"); 813 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!"); 814 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!"); 815 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!"); 816 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!"); 817 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!"); 818 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!"); 819 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!"); 820 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!"); 821 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!"); 822 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!"); 823 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!"); 824 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!"); 825 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!"); 826 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!"); 827 828 VerifyNodeCommon(N); 829 } 830 831 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is 832 /// invalid. 833 static void VerifyMachineNode(SDNode *N) { 834 // The MachineNode allocators cannot be used to allocate nodes with fields 835 // that are not present in a MachineNode! 836 // Currently there are no such nodes. 837 838 VerifyNodeCommon(N); 839 } 840 #endif // NDEBUG 841 842 /// getEVTAlignment - Compute the default alignment value for the 843 /// given type. 844 /// 845 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 846 Type *Ty = VT == MVT::iPTR ? 847 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 848 VT.getTypeForEVT(*getContext()); 849 850 return TLI.getTargetData()->getABITypeAlignment(Ty); 851 } 852 853 // EntryNode could meaningfully have debug info if we can find it... 854 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 855 : TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()), 856 OptLevel(OL), EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)), 857 Root(getEntryNode()), Ordering(0), UpdateListeners(0) { 858 AllNodes.push_back(&EntryNode); 859 Ordering = new SDNodeOrdering(); 860 DbgInfo = new SDDbgInfo(); 861 } 862 863 void SelectionDAG::init(MachineFunction &mf) { 864 MF = &mf; 865 Context = &mf.getFunction()->getContext(); 866 } 867 868 SelectionDAG::~SelectionDAG() { 869 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 870 allnodes_clear(); 871 delete Ordering; 872 delete DbgInfo; 873 } 874 875 void SelectionDAG::allnodes_clear() { 876 assert(&*AllNodes.begin() == &EntryNode); 877 AllNodes.remove(AllNodes.begin()); 878 while (!AllNodes.empty()) 879 DeallocateNode(AllNodes.begin()); 880 } 881 882 void SelectionDAG::clear() { 883 allnodes_clear(); 884 OperandAllocator.Reset(); 885 CSEMap.clear(); 886 887 ExtendedValueTypeNodes.clear(); 888 ExternalSymbols.clear(); 889 TargetExternalSymbols.clear(); 890 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 891 static_cast<CondCodeSDNode*>(0)); 892 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 893 static_cast<SDNode*>(0)); 894 895 EntryNode.UseList = 0; 896 AllNodes.push_back(&EntryNode); 897 Root = getEntryNode(); 898 Ordering->clear(); 899 DbgInfo->clear(); 900 } 901 902 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) { 903 return VT.bitsGT(Op.getValueType()) ? 904 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 905 getNode(ISD::TRUNCATE, DL, VT, Op); 906 } 907 908 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) { 909 return VT.bitsGT(Op.getValueType()) ? 910 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 911 getNode(ISD::TRUNCATE, DL, VT, Op); 912 } 913 914 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) { 915 return VT.bitsGT(Op.getValueType()) ? 916 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 917 getNode(ISD::TRUNCATE, DL, VT, Op); 918 } 919 920 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, EVT VT) { 921 assert(!VT.isVector() && 922 "getZeroExtendInReg should use the vector element type instead of " 923 "the vector type!"); 924 if (Op.getValueType() == VT) return Op; 925 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 926 APInt Imm = APInt::getLowBitsSet(BitWidth, 927 VT.getSizeInBits()); 928 return getNode(ISD::AND, DL, Op.getValueType(), Op, 929 getConstant(Imm, Op.getValueType())); 930 } 931 932 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 933 /// 934 SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, EVT VT) { 935 EVT EltVT = VT.getScalarType(); 936 SDValue NegOne = 937 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 938 return getNode(ISD::XOR, DL, VT, Val, NegOne); 939 } 940 941 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) { 942 EVT EltVT = VT.getScalarType(); 943 assert((EltVT.getSizeInBits() >= 64 || 944 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 945 "getConstant with a uint64_t value that doesn't fit in the type!"); 946 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT); 947 } 948 949 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) { 950 return getConstant(*ConstantInt::get(*Context, Val), VT, isT); 951 } 952 953 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) { 954 assert(VT.isInteger() && "Cannot create FP integer constant!"); 955 956 EVT EltVT = VT.getScalarType(); 957 const ConstantInt *Elt = &Val; 958 959 // In some cases the vector type is legal but the element type is illegal and 960 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 961 // inserted value (the type does not need to match the vector element type). 962 // Any extra bits introduced will be truncated away. 963 if (VT.isVector() && TLI.getTypeAction(*getContext(), EltVT) == 964 TargetLowering::TypePromoteInteger) { 965 EltVT = TLI.getTypeToTransformTo(*getContext(), EltVT); 966 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); 967 Elt = ConstantInt::get(*getContext(), NewVal); 968 } 969 970 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 971 "APInt size does not match type size!"); 972 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 973 FoldingSetNodeID ID; 974 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 975 ID.AddPointer(Elt); 976 void *IP = 0; 977 SDNode *N = NULL; 978 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 979 if (!VT.isVector()) 980 return SDValue(N, 0); 981 982 if (!N) { 983 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT); 984 CSEMap.InsertNode(N, IP); 985 AllNodes.push_back(N); 986 } 987 988 SDValue Result(N, 0); 989 if (VT.isVector()) { 990 SmallVector<SDValue, 8> Ops; 991 Ops.assign(VT.getVectorNumElements(), Result); 992 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size()); 993 } 994 return Result; 995 } 996 997 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { 998 return getConstant(Val, TLI.getPointerTy(), isTarget); 999 } 1000 1001 1002 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) { 1003 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget); 1004 } 1005 1006 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){ 1007 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1008 1009 EVT EltVT = VT.getScalarType(); 1010 1011 // Do the map lookup using the actual bit pattern for the floating point 1012 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1013 // we don't have issues with SNANs. 1014 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1015 FoldingSetNodeID ID; 1016 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 1017 ID.AddPointer(&V); 1018 void *IP = 0; 1019 SDNode *N = NULL; 1020 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1021 if (!VT.isVector()) 1022 return SDValue(N, 0); 1023 1024 if (!N) { 1025 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT); 1026 CSEMap.InsertNode(N, IP); 1027 AllNodes.push_back(N); 1028 } 1029 1030 SDValue Result(N, 0); 1031 if (VT.isVector()) { 1032 SmallVector<SDValue, 8> Ops; 1033 Ops.assign(VT.getVectorNumElements(), Result); 1034 // FIXME DebugLoc info might be appropriate here 1035 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size()); 1036 } 1037 return Result; 1038 } 1039 1040 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) { 1041 EVT EltVT = VT.getScalarType(); 1042 if (EltVT==MVT::f32) 1043 return getConstantFP(APFloat((float)Val), VT, isTarget); 1044 else if (EltVT==MVT::f64) 1045 return getConstantFP(APFloat(Val), VT, isTarget); 1046 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::f16) { 1047 bool ignored; 1048 APFloat apf = APFloat(Val); 1049 apf.convert(*EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1050 &ignored); 1051 return getConstantFP(apf, VT, isTarget); 1052 } else 1053 llvm_unreachable("Unsupported type in getConstantFP"); 1054 } 1055 1056 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL, 1057 EVT VT, int64_t Offset, 1058 bool isTargetGA, 1059 unsigned char TargetFlags) { 1060 assert((TargetFlags == 0 || isTargetGA) && 1061 "Cannot set target flags on target-independent globals"); 1062 1063 // Truncate (with sign-extension) the offset value to the pointer size. 1064 EVT PTy = TLI.getPointerTy(); 1065 unsigned BitWidth = PTy.getSizeInBits(); 1066 if (BitWidth < 64) 1067 Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth)); 1068 1069 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1070 if (!GVar) { 1071 // If GV is an alias then use the aliasee for determining thread-localness. 1072 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 1073 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)); 1074 } 1075 1076 unsigned Opc; 1077 if (GVar && GVar->isThreadLocal()) 1078 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1079 else 1080 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1081 1082 FoldingSetNodeID ID; 1083 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1084 ID.AddPointer(GV); 1085 ID.AddInteger(Offset); 1086 ID.AddInteger(TargetFlags); 1087 void *IP = 0; 1088 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1089 return SDValue(E, 0); 1090 1091 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL, GV, VT, 1092 Offset, TargetFlags); 1093 CSEMap.InsertNode(N, IP); 1094 AllNodes.push_back(N); 1095 return SDValue(N, 0); 1096 } 1097 1098 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1099 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1100 FoldingSetNodeID ID; 1101 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1102 ID.AddInteger(FI); 1103 void *IP = 0; 1104 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1105 return SDValue(E, 0); 1106 1107 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget); 1108 CSEMap.InsertNode(N, IP); 1109 AllNodes.push_back(N); 1110 return SDValue(N, 0); 1111 } 1112 1113 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1114 unsigned char TargetFlags) { 1115 assert((TargetFlags == 0 || isTarget) && 1116 "Cannot set target flags on target-independent jump tables"); 1117 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1118 FoldingSetNodeID ID; 1119 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1120 ID.AddInteger(JTI); 1121 ID.AddInteger(TargetFlags); 1122 void *IP = 0; 1123 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1124 return SDValue(E, 0); 1125 1126 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget, 1127 TargetFlags); 1128 CSEMap.InsertNode(N, IP); 1129 AllNodes.push_back(N); 1130 return SDValue(N, 0); 1131 } 1132 1133 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1134 unsigned Alignment, int Offset, 1135 bool isTarget, 1136 unsigned char TargetFlags) { 1137 assert((TargetFlags == 0 || isTarget) && 1138 "Cannot set target flags on target-independent globals"); 1139 if (Alignment == 0) 1140 Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); 1141 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1142 FoldingSetNodeID ID; 1143 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1144 ID.AddInteger(Alignment); 1145 ID.AddInteger(Offset); 1146 ID.AddPointer(C); 1147 ID.AddInteger(TargetFlags); 1148 void *IP = 0; 1149 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1150 return SDValue(E, 0); 1151 1152 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1153 Alignment, TargetFlags); 1154 CSEMap.InsertNode(N, IP); 1155 AllNodes.push_back(N); 1156 return SDValue(N, 0); 1157 } 1158 1159 1160 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1161 unsigned Alignment, int Offset, 1162 bool isTarget, 1163 unsigned char TargetFlags) { 1164 assert((TargetFlags == 0 || isTarget) && 1165 "Cannot set target flags on target-independent globals"); 1166 if (Alignment == 0) 1167 Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); 1168 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1169 FoldingSetNodeID ID; 1170 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1171 ID.AddInteger(Alignment); 1172 ID.AddInteger(Offset); 1173 C->addSelectionDAGCSEId(ID); 1174 ID.AddInteger(TargetFlags); 1175 void *IP = 0; 1176 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1177 return SDValue(E, 0); 1178 1179 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1180 Alignment, TargetFlags); 1181 CSEMap.InsertNode(N, IP); 1182 AllNodes.push_back(N); 1183 return SDValue(N, 0); 1184 } 1185 1186 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1187 FoldingSetNodeID ID; 1188 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0); 1189 ID.AddPointer(MBB); 1190 void *IP = 0; 1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1192 return SDValue(E, 0); 1193 1194 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB); 1195 CSEMap.InsertNode(N, IP); 1196 AllNodes.push_back(N); 1197 return SDValue(N, 0); 1198 } 1199 1200 SDValue SelectionDAG::getValueType(EVT VT) { 1201 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1202 ValueTypeNodes.size()) 1203 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1204 1205 SDNode *&N = VT.isExtended() ? 1206 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1207 1208 if (N) return SDValue(N, 0); 1209 N = new (NodeAllocator) VTSDNode(VT); 1210 AllNodes.push_back(N); 1211 return SDValue(N, 0); 1212 } 1213 1214 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1215 SDNode *&N = ExternalSymbols[Sym]; 1216 if (N) return SDValue(N, 0); 1217 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT); 1218 AllNodes.push_back(N); 1219 return SDValue(N, 0); 1220 } 1221 1222 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1223 unsigned char TargetFlags) { 1224 SDNode *&N = 1225 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1226 TargetFlags)]; 1227 if (N) return SDValue(N, 0); 1228 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); 1229 AllNodes.push_back(N); 1230 return SDValue(N, 0); 1231 } 1232 1233 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1234 if ((unsigned)Cond >= CondCodeNodes.size()) 1235 CondCodeNodes.resize(Cond+1); 1236 1237 if (CondCodeNodes[Cond] == 0) { 1238 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond); 1239 CondCodeNodes[Cond] = N; 1240 AllNodes.push_back(N); 1241 } 1242 1243 return SDValue(CondCodeNodes[Cond], 0); 1244 } 1245 1246 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in 1247 // the shuffle mask M that point at N1 to point at N2, and indices that point 1248 // N2 to point at N1. 1249 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) { 1250 std::swap(N1, N2); 1251 int NElts = M.size(); 1252 for (int i = 0; i != NElts; ++i) { 1253 if (M[i] >= NElts) 1254 M[i] -= NElts; 1255 else if (M[i] >= 0) 1256 M[i] += NElts; 1257 } 1258 } 1259 1260 SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1, 1261 SDValue N2, const int *Mask) { 1262 assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE"); 1263 assert(VT.isVector() && N1.getValueType().isVector() && 1264 "Vector Shuffle VTs must be a vectors"); 1265 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() 1266 && "Vector Shuffle VTs must have same element type"); 1267 1268 // Canonicalize shuffle undef, undef -> undef 1269 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) 1270 return getUNDEF(VT); 1271 1272 // Validate that all indices in Mask are within the range of the elements 1273 // input to the shuffle. 1274 unsigned NElts = VT.getVectorNumElements(); 1275 SmallVector<int, 8> MaskVec; 1276 for (unsigned i = 0; i != NElts; ++i) { 1277 assert(Mask[i] < (int)(NElts * 2) && "Index out of range"); 1278 MaskVec.push_back(Mask[i]); 1279 } 1280 1281 // Canonicalize shuffle v, v -> v, undef 1282 if (N1 == N2) { 1283 N2 = getUNDEF(VT); 1284 for (unsigned i = 0; i != NElts; ++i) 1285 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts; 1286 } 1287 1288 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1289 if (N1.getOpcode() == ISD::UNDEF) 1290 commuteShuffle(N1, N2, MaskVec); 1291 1292 // Canonicalize all index into lhs, -> shuffle lhs, undef 1293 // Canonicalize all index into rhs, -> shuffle rhs, undef 1294 bool AllLHS = true, AllRHS = true; 1295 bool N2Undef = N2.getOpcode() == ISD::UNDEF; 1296 for (unsigned i = 0; i != NElts; ++i) { 1297 if (MaskVec[i] >= (int)NElts) { 1298 if (N2Undef) 1299 MaskVec[i] = -1; 1300 else 1301 AllLHS = false; 1302 } else if (MaskVec[i] >= 0) { 1303 AllRHS = false; 1304 } 1305 } 1306 if (AllLHS && AllRHS) 1307 return getUNDEF(VT); 1308 if (AllLHS && !N2Undef) 1309 N2 = getUNDEF(VT); 1310 if (AllRHS) { 1311 N1 = getUNDEF(VT); 1312 commuteShuffle(N1, N2, MaskVec); 1313 } 1314 1315 // If Identity shuffle, or all shuffle in to undef, return that node. 1316 bool AllUndef = true; 1317 bool Identity = true; 1318 for (unsigned i = 0; i != NElts; ++i) { 1319 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false; 1320 if (MaskVec[i] >= 0) AllUndef = false; 1321 } 1322 if (Identity && NElts == N1.getValueType().getVectorNumElements()) 1323 return N1; 1324 if (AllUndef) 1325 return getUNDEF(VT); 1326 1327 FoldingSetNodeID ID; 1328 SDValue Ops[2] = { N1, N2 }; 1329 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2); 1330 for (unsigned i = 0; i != NElts; ++i) 1331 ID.AddInteger(MaskVec[i]); 1332 1333 void* IP = 0; 1334 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1335 return SDValue(E, 0); 1336 1337 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1338 // SDNode doesn't have access to it. This memory will be "leaked" when 1339 // the node is deallocated, but recovered when the NodeAllocator is released. 1340 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1341 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int)); 1342 1343 ShuffleVectorSDNode *N = 1344 new (NodeAllocator) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc); 1345 CSEMap.InsertNode(N, IP); 1346 AllNodes.push_back(N); 1347 return SDValue(N, 0); 1348 } 1349 1350 SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl, 1351 SDValue Val, SDValue DTy, 1352 SDValue STy, SDValue Rnd, SDValue Sat, 1353 ISD::CvtCode Code) { 1354 // If the src and dest types are the same and the conversion is between 1355 // integer types of the same sign or two floats, no conversion is necessary. 1356 if (DTy == STy && 1357 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF)) 1358 return Val; 1359 1360 FoldingSetNodeID ID; 1361 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; 1362 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5); 1363 void* IP = 0; 1364 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1365 return SDValue(E, 0); 1366 1367 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl, Ops, 5, 1368 Code); 1369 CSEMap.InsertNode(N, IP); 1370 AllNodes.push_back(N); 1371 return SDValue(N, 0); 1372 } 1373 1374 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1375 FoldingSetNodeID ID; 1376 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0); 1377 ID.AddInteger(RegNo); 1378 void *IP = 0; 1379 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1380 return SDValue(E, 0); 1381 1382 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT); 1383 CSEMap.InsertNode(N, IP); 1384 AllNodes.push_back(N); 1385 return SDValue(N, 0); 1386 } 1387 1388 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1389 FoldingSetNodeID ID; 1390 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0); 1391 ID.AddPointer(RegMask); 1392 void *IP = 0; 1393 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1394 return SDValue(E, 0); 1395 1396 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask); 1397 CSEMap.InsertNode(N, IP); 1398 AllNodes.push_back(N); 1399 return SDValue(N, 0); 1400 } 1401 1402 SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) { 1403 FoldingSetNodeID ID; 1404 SDValue Ops[] = { Root }; 1405 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1); 1406 ID.AddPointer(Label); 1407 void *IP = 0; 1408 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1409 return SDValue(E, 0); 1410 1411 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label); 1412 CSEMap.InsertNode(N, IP); 1413 AllNodes.push_back(N); 1414 return SDValue(N, 0); 1415 } 1416 1417 1418 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1419 bool isTarget, 1420 unsigned char TargetFlags) { 1421 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1422 1423 FoldingSetNodeID ID; 1424 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1425 ID.AddPointer(BA); 1426 ID.AddInteger(TargetFlags); 1427 void *IP = 0; 1428 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1429 return SDValue(E, 0); 1430 1431 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, TargetFlags); 1432 CSEMap.InsertNode(N, IP); 1433 AllNodes.push_back(N); 1434 return SDValue(N, 0); 1435 } 1436 1437 SDValue SelectionDAG::getSrcValue(const Value *V) { 1438 assert((!V || V->getType()->isPointerTy()) && 1439 "SrcValue is not a pointer?"); 1440 1441 FoldingSetNodeID ID; 1442 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0); 1443 ID.AddPointer(V); 1444 1445 void *IP = 0; 1446 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1447 return SDValue(E, 0); 1448 1449 SDNode *N = new (NodeAllocator) SrcValueSDNode(V); 1450 CSEMap.InsertNode(N, IP); 1451 AllNodes.push_back(N); 1452 return SDValue(N, 0); 1453 } 1454 1455 /// getMDNode - Return an MDNodeSDNode which holds an MDNode. 1456 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1457 FoldingSetNodeID ID; 1458 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0); 1459 ID.AddPointer(MD); 1460 1461 void *IP = 0; 1462 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1463 return SDValue(E, 0); 1464 1465 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); 1466 CSEMap.InsertNode(N, IP); 1467 AllNodes.push_back(N); 1468 return SDValue(N, 0); 1469 } 1470 1471 1472 /// getShiftAmountOperand - Return the specified value casted to 1473 /// the target's desired shift amount type. 1474 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1475 EVT OpTy = Op.getValueType(); 1476 MVT ShTy = TLI.getShiftAmountTy(LHSTy); 1477 if (OpTy == ShTy || OpTy.isVector()) return Op; 1478 1479 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 1480 return getNode(Opcode, Op.getDebugLoc(), ShTy, Op); 1481 } 1482 1483 /// CreateStackTemporary - Create a stack temporary, suitable for holding the 1484 /// specified value type. 1485 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1486 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1487 unsigned ByteSize = VT.getStoreSize(); 1488 Type *Ty = VT.getTypeForEVT(*getContext()); 1489 unsigned StackAlign = 1490 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign); 1491 1492 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 1493 return getFrameIndex(FrameIdx, TLI.getPointerTy()); 1494 } 1495 1496 /// CreateStackTemporary - Create a stack temporary suitable for holding 1497 /// either of the specified value types. 1498 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1499 unsigned Bytes = std::max(VT1.getStoreSizeInBits(), 1500 VT2.getStoreSizeInBits())/8; 1501 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1502 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1503 const TargetData *TD = TLI.getTargetData(); 1504 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), 1505 TD->getPrefTypeAlignment(Ty2)); 1506 1507 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1508 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); 1509 return getFrameIndex(FrameIdx, TLI.getPointerTy()); 1510 } 1511 1512 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, 1513 SDValue N2, ISD::CondCode Cond, DebugLoc dl) { 1514 // These setcc operations always fold. 1515 switch (Cond) { 1516 default: break; 1517 case ISD::SETFALSE: 1518 case ISD::SETFALSE2: return getConstant(0, VT); 1519 case ISD::SETTRUE: 1520 case ISD::SETTRUE2: return getConstant(1, VT); 1521 1522 case ISD::SETOEQ: 1523 case ISD::SETOGT: 1524 case ISD::SETOGE: 1525 case ISD::SETOLT: 1526 case ISD::SETOLE: 1527 case ISD::SETONE: 1528 case ISD::SETO: 1529 case ISD::SETUO: 1530 case ISD::SETUEQ: 1531 case ISD::SETUNE: 1532 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1533 break; 1534 } 1535 1536 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) { 1537 const APInt &C2 = N2C->getAPIntValue(); 1538 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1539 const APInt &C1 = N1C->getAPIntValue(); 1540 1541 switch (Cond) { 1542 default: llvm_unreachable("Unknown integer setcc!"); 1543 case ISD::SETEQ: return getConstant(C1 == C2, VT); 1544 case ISD::SETNE: return getConstant(C1 != C2, VT); 1545 case ISD::SETULT: return getConstant(C1.ult(C2), VT); 1546 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT); 1547 case ISD::SETULE: return getConstant(C1.ule(C2), VT); 1548 case ISD::SETUGE: return getConstant(C1.uge(C2), VT); 1549 case ISD::SETLT: return getConstant(C1.slt(C2), VT); 1550 case ISD::SETGT: return getConstant(C1.sgt(C2), VT); 1551 case ISD::SETLE: return getConstant(C1.sle(C2), VT); 1552 case ISD::SETGE: return getConstant(C1.sge(C2), VT); 1553 } 1554 } 1555 } 1556 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1557 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) { 1558 // No compile time operations on this type yet. 1559 if (N1C->getValueType(0) == MVT::ppcf128) 1560 return SDValue(); 1561 1562 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1563 switch (Cond) { 1564 default: break; 1565 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1566 return getUNDEF(VT); 1567 // fall through 1568 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT); 1569 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1570 return getUNDEF(VT); 1571 // fall through 1572 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1573 R==APFloat::cmpLessThan, VT); 1574 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1575 return getUNDEF(VT); 1576 // fall through 1577 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT); 1578 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1579 return getUNDEF(VT); 1580 // fall through 1581 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT); 1582 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1583 return getUNDEF(VT); 1584 // fall through 1585 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1586 R==APFloat::cmpEqual, VT); 1587 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1588 return getUNDEF(VT); 1589 // fall through 1590 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1591 R==APFloat::cmpEqual, VT); 1592 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT); 1593 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT); 1594 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1595 R==APFloat::cmpEqual, VT); 1596 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT); 1597 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1598 R==APFloat::cmpLessThan, VT); 1599 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1600 R==APFloat::cmpUnordered, VT); 1601 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT); 1602 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT); 1603 } 1604 } else { 1605 // Ensure that the constant occurs on the RHS. 1606 return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond)); 1607 } 1608 } 1609 1610 // Could not fold it. 1611 return SDValue(); 1612 } 1613 1614 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1615 /// use this predicate to simplify operations downstream. 1616 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1617 // This predicate is not safe for vector operations. 1618 if (Op.getValueType().isVector()) 1619 return false; 1620 1621 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1622 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 1623 } 1624 1625 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1626 /// this predicate to simplify operations downstream. Mask is known to be zero 1627 /// for bits that V cannot have. 1628 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1629 unsigned Depth) const { 1630 APInt KnownZero, KnownOne; 1631 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 1632 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1633 return (KnownZero & Mask) == Mask; 1634 } 1635 1636 /// ComputeMaskedBits - Determine which of the bits specified in Mask are 1637 /// known to be either zero or one and return them in the KnownZero/KnownOne 1638 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit 1639 /// processing. 1640 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, 1641 APInt &KnownOne, unsigned Depth) const { 1642 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1643 1644 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 1645 if (Depth == 6) 1646 return; // Limit search depth. 1647 1648 APInt KnownZero2, KnownOne2; 1649 1650 switch (Op.getOpcode()) { 1651 case ISD::Constant: 1652 // We know all of the bits for a constant! 1653 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 1654 KnownZero = ~KnownOne; 1655 return; 1656 case ISD::AND: 1657 // If either the LHS or the RHS are Zero, the result is zero. 1658 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1659 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1660 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1661 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1662 1663 // Output known-1 bits are only known if set in both the LHS & RHS. 1664 KnownOne &= KnownOne2; 1665 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1666 KnownZero |= KnownZero2; 1667 return; 1668 case ISD::OR: 1669 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1670 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1671 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1672 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1673 1674 // Output known-0 bits are only known if clear in both the LHS & RHS. 1675 KnownZero &= KnownZero2; 1676 // Output known-1 are known to be set if set in either the LHS | RHS. 1677 KnownOne |= KnownOne2; 1678 return; 1679 case ISD::XOR: { 1680 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1681 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1682 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1683 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1684 1685 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1686 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1687 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1688 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1689 KnownZero = KnownZeroOut; 1690 return; 1691 } 1692 case ISD::MUL: { 1693 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1694 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1695 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1696 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1697 1698 // If low bits are zero in either operand, output low known-0 bits. 1699 // Also compute a conserative estimate for high known-0 bits. 1700 // More trickiness is possible, but this is sufficient for the 1701 // interesting case of alignment computation. 1702 KnownOne.clearAllBits(); 1703 unsigned TrailZ = KnownZero.countTrailingOnes() + 1704 KnownZero2.countTrailingOnes(); 1705 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 1706 KnownZero2.countLeadingOnes(), 1707 BitWidth) - BitWidth; 1708 1709 TrailZ = std::min(TrailZ, BitWidth); 1710 LeadZ = std::min(LeadZ, BitWidth); 1711 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 1712 APInt::getHighBitsSet(BitWidth, LeadZ); 1713 return; 1714 } 1715 case ISD::UDIV: { 1716 // For the purposes of computing leading zeros we can conservatively 1717 // treat a udiv as a logical right shift by the power of 2 known to 1718 // be less than the denominator. 1719 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1720 unsigned LeadZ = KnownZero2.countLeadingOnes(); 1721 1722 KnownOne2.clearAllBits(); 1723 KnownZero2.clearAllBits(); 1724 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1725 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 1726 if (RHSUnknownLeadingOnes != BitWidth) 1727 LeadZ = std::min(BitWidth, 1728 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 1729 1730 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 1731 return; 1732 } 1733 case ISD::SELECT: 1734 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 1735 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1736 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1737 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1738 1739 // Only known if known in both the LHS and RHS. 1740 KnownOne &= KnownOne2; 1741 KnownZero &= KnownZero2; 1742 return; 1743 case ISD::SELECT_CC: 1744 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 1745 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 1746 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1747 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1748 1749 // Only known if known in both the LHS and RHS. 1750 KnownOne &= KnownOne2; 1751 KnownZero &= KnownZero2; 1752 return; 1753 case ISD::SADDO: 1754 case ISD::UADDO: 1755 case ISD::SSUBO: 1756 case ISD::USUBO: 1757 case ISD::SMULO: 1758 case ISD::UMULO: 1759 if (Op.getResNo() != 1) 1760 return; 1761 // The boolean result conforms to getBooleanContents. Fall through. 1762 case ISD::SETCC: 1763 // If we know the result of a setcc has the top bits zero, use this info. 1764 if (TLI.getBooleanContents(Op.getValueType().isVector()) == 1765 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1) 1766 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 1767 return; 1768 case ISD::SHL: 1769 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1770 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1771 unsigned ShAmt = SA->getZExtValue(); 1772 1773 // If the shift count is an invalid immediate, don't do anything. 1774 if (ShAmt >= BitWidth) 1775 return; 1776 1777 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1778 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1779 KnownZero <<= ShAmt; 1780 KnownOne <<= ShAmt; 1781 // low bits known zero. 1782 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); 1783 } 1784 return; 1785 case ISD::SRL: 1786 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1787 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1788 unsigned ShAmt = SA->getZExtValue(); 1789 1790 // If the shift count is an invalid immediate, don't do anything. 1791 if (ShAmt >= BitWidth) 1792 return; 1793 1794 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1795 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1796 KnownZero = KnownZero.lshr(ShAmt); 1797 KnownOne = KnownOne.lshr(ShAmt); 1798 1799 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1800 KnownZero |= HighBits; // High bits known zero. 1801 } 1802 return; 1803 case ISD::SRA: 1804 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1805 unsigned ShAmt = SA->getZExtValue(); 1806 1807 // If the shift count is an invalid immediate, don't do anything. 1808 if (ShAmt >= BitWidth) 1809 return; 1810 1811 // If any of the demanded bits are produced by the sign extension, we also 1812 // demand the input sign bit. 1813 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1814 1815 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1816 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1817 KnownZero = KnownZero.lshr(ShAmt); 1818 KnownOne = KnownOne.lshr(ShAmt); 1819 1820 // Handle the sign bits. 1821 APInt SignBit = APInt::getSignBit(BitWidth); 1822 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask. 1823 1824 if (KnownZero.intersects(SignBit)) { 1825 KnownZero |= HighBits; // New bits are known zero. 1826 } else if (KnownOne.intersects(SignBit)) { 1827 KnownOne |= HighBits; // New bits are known one. 1828 } 1829 } 1830 return; 1831 case ISD::SIGN_EXTEND_INREG: { 1832 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1833 unsigned EBits = EVT.getScalarType().getSizeInBits(); 1834 1835 // Sign extension. Compute the demanded bits in the result that are not 1836 // present in the input. 1837 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 1838 1839 APInt InSignBit = APInt::getSignBit(EBits); 1840 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 1841 1842 // If the sign extended bits are demanded, we know that the sign 1843 // bit is demanded. 1844 InSignBit = InSignBit.zext(BitWidth); 1845 if (NewBits.getBoolValue()) 1846 InputDemandedBits |= InSignBit; 1847 1848 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1849 KnownOne &= InputDemandedBits; 1850 KnownZero &= InputDemandedBits; 1851 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1852 1853 // If the sign bit of the input is known set or clear, then we know the 1854 // top bits of the result. 1855 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 1856 KnownZero |= NewBits; 1857 KnownOne &= ~NewBits; 1858 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1859 KnownOne |= NewBits; 1860 KnownZero &= ~NewBits; 1861 } else { // Input sign bit unknown 1862 KnownZero &= ~NewBits; 1863 KnownOne &= ~NewBits; 1864 } 1865 return; 1866 } 1867 case ISD::CTTZ: 1868 case ISD::CTTZ_ZERO_UNDEF: 1869 case ISD::CTLZ: 1870 case ISD::CTLZ_ZERO_UNDEF: 1871 case ISD::CTPOP: { 1872 unsigned LowBits = Log2_32(BitWidth)+1; 1873 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1874 KnownOne.clearAllBits(); 1875 return; 1876 } 1877 case ISD::LOAD: { 1878 LoadSDNode *LD = cast<LoadSDNode>(Op); 1879 if (ISD::isZEXTLoad(Op.getNode())) { 1880 EVT VT = LD->getMemoryVT(); 1881 unsigned MemBits = VT.getScalarType().getSizeInBits(); 1882 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 1883 } else if (const MDNode *Ranges = LD->getRanges()) { 1884 computeMaskedBitsLoad(*Ranges, KnownZero); 1885 } 1886 return; 1887 } 1888 case ISD::ZERO_EXTEND: { 1889 EVT InVT = Op.getOperand(0).getValueType(); 1890 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1891 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 1892 KnownZero = KnownZero.trunc(InBits); 1893 KnownOne = KnownOne.trunc(InBits); 1894 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1895 KnownZero = KnownZero.zext(BitWidth); 1896 KnownOne = KnownOne.zext(BitWidth); 1897 KnownZero |= NewBits; 1898 return; 1899 } 1900 case ISD::SIGN_EXTEND: { 1901 EVT InVT = Op.getOperand(0).getValueType(); 1902 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1903 APInt InSignBit = APInt::getSignBit(InBits); 1904 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 1905 1906 KnownZero = KnownZero.trunc(InBits); 1907 KnownOne = KnownOne.trunc(InBits); 1908 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1909 1910 // Note if the sign bit is known to be zero or one. 1911 bool SignBitKnownZero = KnownZero.isNegative(); 1912 bool SignBitKnownOne = KnownOne.isNegative(); 1913 assert(!(SignBitKnownZero && SignBitKnownOne) && 1914 "Sign bit can't be known to be both zero and one!"); 1915 1916 KnownZero = KnownZero.zext(BitWidth); 1917 KnownOne = KnownOne.zext(BitWidth); 1918 1919 // If the sign bit is known zero or one, the top bits match. 1920 if (SignBitKnownZero) 1921 KnownZero |= NewBits; 1922 else if (SignBitKnownOne) 1923 KnownOne |= NewBits; 1924 return; 1925 } 1926 case ISD::ANY_EXTEND: { 1927 EVT InVT = Op.getOperand(0).getValueType(); 1928 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1929 KnownZero = KnownZero.trunc(InBits); 1930 KnownOne = KnownOne.trunc(InBits); 1931 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1932 KnownZero = KnownZero.zext(BitWidth); 1933 KnownOne = KnownOne.zext(BitWidth); 1934 return; 1935 } 1936 case ISD::TRUNCATE: { 1937 EVT InVT = Op.getOperand(0).getValueType(); 1938 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1939 KnownZero = KnownZero.zext(InBits); 1940 KnownOne = KnownOne.zext(InBits); 1941 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1942 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1943 KnownZero = KnownZero.trunc(BitWidth); 1944 KnownOne = KnownOne.trunc(BitWidth); 1945 break; 1946 } 1947 case ISD::AssertZext: { 1948 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1949 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 1950 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1951 KnownZero |= (~InMask); 1952 return; 1953 } 1954 case ISD::FGETSIGN: 1955 // All bits are zero except the low bit. 1956 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 1957 return; 1958 1959 case ISD::SUB: { 1960 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) { 1961 // We know that the top bits of C-X are clear if X contains less bits 1962 // than C (i.e. no wrap-around can happen). For example, 20-X is 1963 // positive if we can prove that X is >= 0 and < 16. 1964 if (CLHS->getAPIntValue().isNonNegative()) { 1965 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 1966 // NLZ can't be BitWidth with no sign bit 1967 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 1968 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1969 1970 // If all of the MaskV bits are known to be zero, then we know the 1971 // output top bits are zero, because we now know that the output is 1972 // from [0-C]. 1973 if ((KnownZero2 & MaskV) == MaskV) { 1974 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 1975 // Top bits known zero. 1976 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 1977 } 1978 } 1979 } 1980 } 1981 // fall through 1982 case ISD::ADD: 1983 case ISD::ADDE: { 1984 // Output known-0 bits are known if clear or set in both the low clear bits 1985 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 1986 // low 3 bits clear. 1987 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1988 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1989 unsigned KnownZeroOut = KnownZero2.countTrailingOnes(); 1990 1991 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1992 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1993 KnownZeroOut = std::min(KnownZeroOut, 1994 KnownZero2.countTrailingOnes()); 1995 1996 if (Op.getOpcode() == ISD::ADD) { 1997 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut); 1998 return; 1999 } 2000 2001 // With ADDE, a carry bit may be added in, so we can only use this 2002 // information if we know (at least) that the low two bits are clear. We 2003 // then return to the caller that the low bit is unknown but that other bits 2004 // are known zero. 2005 if (KnownZeroOut >= 2) // ADDE 2006 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut); 2007 return; 2008 } 2009 case ISD::SREM: 2010 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2011 const APInt &RA = Rem->getAPIntValue().abs(); 2012 if (RA.isPowerOf2()) { 2013 APInt LowBits = RA - 1; 2014 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 2015 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1); 2016 2017 // The low bits of the first operand are unchanged by the srem. 2018 KnownZero = KnownZero2 & LowBits; 2019 KnownOne = KnownOne2 & LowBits; 2020 2021 // If the first operand is non-negative or has all low bits zero, then 2022 // the upper bits are all zero. 2023 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2024 KnownZero |= ~LowBits; 2025 2026 // If the first operand is negative and not all low bits are zero, then 2027 // the upper bits are all one. 2028 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2029 KnownOne |= ~LowBits; 2030 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2031 } 2032 } 2033 return; 2034 case ISD::UREM: { 2035 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2036 const APInt &RA = Rem->getAPIntValue(); 2037 if (RA.isPowerOf2()) { 2038 APInt LowBits = (RA - 1); 2039 KnownZero |= ~LowBits; 2040 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1); 2041 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2042 break; 2043 } 2044 } 2045 2046 // Since the result is less than or equal to either operand, any leading 2047 // zero bits in either operand must also exist in the result. 2048 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2049 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2050 2051 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2052 KnownZero2.countLeadingOnes()); 2053 KnownOne.clearAllBits(); 2054 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2055 return; 2056 } 2057 case ISD::FrameIndex: 2058 case ISD::TargetFrameIndex: 2059 if (unsigned Align = InferPtrAlignment(Op)) { 2060 // The low bits are known zero if the pointer is aligned. 2061 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2062 return; 2063 } 2064 break; 2065 2066 default: 2067 if (Op.getOpcode() < ISD::BUILTIN_OP_END) 2068 break; 2069 // Fallthrough 2070 case ISD::INTRINSIC_WO_CHAIN: 2071 case ISD::INTRINSIC_W_CHAIN: 2072 case ISD::INTRINSIC_VOID: 2073 // Allow the target to implement this method for its nodes. 2074 TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2075 return; 2076 } 2077 } 2078 2079 /// ComputeNumSignBits - Return the number of times the sign bit of the 2080 /// register is replicated into the other bits. We know that at least 1 bit 2081 /// is always equal to the sign bit (itself), but other cases can give us 2082 /// information. For example, immediately after an "SRA X, 2", we know that 2083 /// the top 3 bits are all equal to each other, so we return 3. 2084 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ 2085 EVT VT = Op.getValueType(); 2086 assert(VT.isInteger() && "Invalid VT!"); 2087 unsigned VTBits = VT.getScalarType().getSizeInBits(); 2088 unsigned Tmp, Tmp2; 2089 unsigned FirstAnswer = 1; 2090 2091 if (Depth == 6) 2092 return 1; // Limit search depth. 2093 2094 switch (Op.getOpcode()) { 2095 default: break; 2096 case ISD::AssertSext: 2097 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2098 return VTBits-Tmp+1; 2099 case ISD::AssertZext: 2100 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2101 return VTBits-Tmp; 2102 2103 case ISD::Constant: { 2104 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2105 return Val.getNumSignBits(); 2106 } 2107 2108 case ISD::SIGN_EXTEND: 2109 Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 2110 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2111 2112 case ISD::SIGN_EXTEND_INREG: 2113 // Max of the input and what this extends. 2114 Tmp = 2115 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits(); 2116 Tmp = VTBits-Tmp+1; 2117 2118 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2119 return std::max(Tmp, Tmp2); 2120 2121 case ISD::SRA: 2122 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2123 // SRA X, C -> adds C sign bits. 2124 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2125 Tmp += C->getZExtValue(); 2126 if (Tmp > VTBits) Tmp = VTBits; 2127 } 2128 return Tmp; 2129 case ISD::SHL: 2130 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2131 // shl destroys sign bits. 2132 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2133 if (C->getZExtValue() >= VTBits || // Bad shift. 2134 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 2135 return Tmp - C->getZExtValue(); 2136 } 2137 break; 2138 case ISD::AND: 2139 case ISD::OR: 2140 case ISD::XOR: // NOT is handled here. 2141 // Logical binary ops preserve the number of sign bits at the worst. 2142 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2143 if (Tmp != 1) { 2144 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2145 FirstAnswer = std::min(Tmp, Tmp2); 2146 // We computed what we know about the sign bits as our first 2147 // answer. Now proceed to the generic code that uses 2148 // ComputeMaskedBits, and pick whichever answer is better. 2149 } 2150 break; 2151 2152 case ISD::SELECT: 2153 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2154 if (Tmp == 1) return 1; // Early out. 2155 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2156 return std::min(Tmp, Tmp2); 2157 2158 case ISD::SADDO: 2159 case ISD::UADDO: 2160 case ISD::SSUBO: 2161 case ISD::USUBO: 2162 case ISD::SMULO: 2163 case ISD::UMULO: 2164 if (Op.getResNo() != 1) 2165 break; 2166 // The boolean result conforms to getBooleanContents. Fall through. 2167 case ISD::SETCC: 2168 // If setcc returns 0/-1, all bits are sign bits. 2169 if (TLI.getBooleanContents(Op.getValueType().isVector()) == 2170 TargetLowering::ZeroOrNegativeOneBooleanContent) 2171 return VTBits; 2172 break; 2173 case ISD::ROTL: 2174 case ISD::ROTR: 2175 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2176 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2177 2178 // Handle rotate right by N like a rotate left by 32-N. 2179 if (Op.getOpcode() == ISD::ROTR) 2180 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2181 2182 // If we aren't rotating out all of the known-in sign bits, return the 2183 // number that are left. This handles rotl(sext(x), 1) for example. 2184 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2185 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2186 } 2187 break; 2188 case ISD::ADD: 2189 // Add can have at most one carry bit. Thus we know that the output 2190 // is, at worst, one more bit than the inputs. 2191 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2192 if (Tmp == 1) return 1; // Early out. 2193 2194 // Special case decrementing a value (ADD X, -1): 2195 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2196 if (CRHS->isAllOnesValue()) { 2197 APInt KnownZero, KnownOne; 2198 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2199 2200 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2201 // sign bits set. 2202 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2203 return VTBits; 2204 2205 // If we are subtracting one from a positive number, there is no carry 2206 // out of the result. 2207 if (KnownZero.isNegative()) 2208 return Tmp; 2209 } 2210 2211 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2212 if (Tmp2 == 1) return 1; 2213 return std::min(Tmp, Tmp2)-1; 2214 2215 case ISD::SUB: 2216 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2217 if (Tmp2 == 1) return 1; 2218 2219 // Handle NEG. 2220 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) 2221 if (CLHS->isNullValue()) { 2222 APInt KnownZero, KnownOne; 2223 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2224 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2225 // sign bits set. 2226 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2227 return VTBits; 2228 2229 // If the input is known to be positive (the sign bit is known clear), 2230 // the output of the NEG has the same number of sign bits as the input. 2231 if (KnownZero.isNegative()) 2232 return Tmp2; 2233 2234 // Otherwise, we treat this like a SUB. 2235 } 2236 2237 // Sub can have at most one carry bit. Thus we know that the output 2238 // is, at worst, one more bit than the inputs. 2239 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2240 if (Tmp == 1) return 1; // Early out. 2241 return std::min(Tmp, Tmp2)-1; 2242 case ISD::TRUNCATE: 2243 // FIXME: it's tricky to do anything useful for this, but it is an important 2244 // case for targets like X86. 2245 break; 2246 } 2247 2248 // Handle LOADX separately here. EXTLOAD case will fallthrough. 2249 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 2250 unsigned ExtType = LD->getExtensionType(); 2251 switch (ExtType) { 2252 default: break; 2253 case ISD::SEXTLOAD: // '17' bits known 2254 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2255 return VTBits-Tmp+1; 2256 case ISD::ZEXTLOAD: // '16' bits known 2257 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2258 return VTBits-Tmp; 2259 } 2260 } 2261 2262 // Allow the target to implement this method for its nodes. 2263 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2264 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2265 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2266 Op.getOpcode() == ISD::INTRINSIC_VOID) { 2267 unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth); 2268 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 2269 } 2270 2271 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2272 // use this information. 2273 APInt KnownZero, KnownOne; 2274 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 2275 2276 APInt Mask; 2277 if (KnownZero.isNegative()) { // sign bit is 0 2278 Mask = KnownZero; 2279 } else if (KnownOne.isNegative()) { // sign bit is 1; 2280 Mask = KnownOne; 2281 } else { 2282 // Nothing known. 2283 return FirstAnswer; 2284 } 2285 2286 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2287 // the number of identical bits in the top of the input value. 2288 Mask = ~Mask; 2289 Mask <<= Mask.getBitWidth()-VTBits; 2290 // Return # leading zeros. We use 'min' here in case Val was zero before 2291 // shifting. We don't want to return '64' as for an i32 "0". 2292 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 2293 } 2294 2295 /// isBaseWithConstantOffset - Return true if the specified operand is an 2296 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an 2297 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same 2298 /// semantics as an ADD. This handles the equivalence: 2299 /// X|Cst == X+Cst iff X&Cst = 0. 2300 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 2301 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 2302 !isa<ConstantSDNode>(Op.getOperand(1))) 2303 return false; 2304 2305 if (Op.getOpcode() == ISD::OR && 2306 !MaskedValueIsZero(Op.getOperand(0), 2307 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 2308 return false; 2309 2310 return true; 2311 } 2312 2313 2314 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 2315 // If we're told that NaNs won't happen, assume they won't. 2316 if (getTarget().Options.NoNaNsFPMath) 2317 return true; 2318 2319 // If the value is a constant, we can obviously see if it is a NaN or not. 2320 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2321 return !C->getValueAPF().isNaN(); 2322 2323 // TODO: Recognize more cases here. 2324 2325 return false; 2326 } 2327 2328 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 2329 // If the value is a constant, we can obviously see if it is a zero or not. 2330 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2331 return !C->isZero(); 2332 2333 // TODO: Recognize more cases here. 2334 switch (Op.getOpcode()) { 2335 default: break; 2336 case ISD::OR: 2337 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2338 return !C->isNullValue(); 2339 break; 2340 } 2341 2342 return false; 2343 } 2344 2345 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 2346 // Check the obvious case. 2347 if (A == B) return true; 2348 2349 // For for negative and positive zero. 2350 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 2351 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 2352 if (CA->isZero() && CB->isZero()) return true; 2353 2354 // Otherwise they may not be equal. 2355 return false; 2356 } 2357 2358 /// getNode - Gets or creates the specified node. 2359 /// 2360 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) { 2361 FoldingSetNodeID ID; 2362 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0); 2363 void *IP = 0; 2364 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2365 return SDValue(E, 0); 2366 2367 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL, getVTList(VT)); 2368 CSEMap.InsertNode(N, IP); 2369 2370 AllNodes.push_back(N); 2371 #ifndef NDEBUG 2372 VerifySDNode(N); 2373 #endif 2374 return SDValue(N, 0); 2375 } 2376 2377 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, 2378 EVT VT, SDValue Operand) { 2379 // Constant fold unary operations with an integer constant operand. 2380 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) { 2381 const APInt &Val = C->getAPIntValue(); 2382 switch (Opcode) { 2383 default: break; 2384 case ISD::SIGN_EXTEND: 2385 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT); 2386 case ISD::ANY_EXTEND: 2387 case ISD::ZERO_EXTEND: 2388 case ISD::TRUNCATE: 2389 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT); 2390 case ISD::UINT_TO_FP: 2391 case ISD::SINT_TO_FP: { 2392 // No compile time operations on ppcf128. 2393 if (VT == MVT::ppcf128) break; 2394 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2395 (void)apf.convertFromAPInt(Val, 2396 Opcode==ISD::SINT_TO_FP, 2397 APFloat::rmNearestTiesToEven); 2398 return getConstantFP(apf, VT); 2399 } 2400 case ISD::BITCAST: 2401 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 2402 return getConstantFP(Val.bitsToFloat(), VT); 2403 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 2404 return getConstantFP(Val.bitsToDouble(), VT); 2405 break; 2406 case ISD::BSWAP: 2407 return getConstant(Val.byteSwap(), VT); 2408 case ISD::CTPOP: 2409 return getConstant(Val.countPopulation(), VT); 2410 case ISD::CTLZ: 2411 case ISD::CTLZ_ZERO_UNDEF: 2412 return getConstant(Val.countLeadingZeros(), VT); 2413 case ISD::CTTZ: 2414 case ISD::CTTZ_ZERO_UNDEF: 2415 return getConstant(Val.countTrailingZeros(), VT); 2416 } 2417 } 2418 2419 // Constant fold unary operations with a floating point constant operand. 2420 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) { 2421 APFloat V = C->getValueAPF(); // make copy 2422 if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) { 2423 switch (Opcode) { 2424 case ISD::FNEG: 2425 V.changeSign(); 2426 return getConstantFP(V, VT); 2427 case ISD::FABS: 2428 V.clearSign(); 2429 return getConstantFP(V, VT); 2430 case ISD::FP_EXTEND: { 2431 bool ignored; 2432 // This can return overflow, underflow, or inexact; we don't care. 2433 // FIXME need to be more flexible about rounding mode. 2434 (void)V.convert(*EVTToAPFloatSemantics(VT), 2435 APFloat::rmNearestTiesToEven, &ignored); 2436 return getConstantFP(V, VT); 2437 } 2438 case ISD::FP_TO_SINT: 2439 case ISD::FP_TO_UINT: { 2440 integerPart x[2]; 2441 bool ignored; 2442 assert(integerPartWidth >= 64); 2443 // FIXME need to be more flexible about rounding mode. 2444 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 2445 Opcode==ISD::FP_TO_SINT, 2446 APFloat::rmTowardZero, &ignored); 2447 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 2448 break; 2449 APInt api(VT.getSizeInBits(), x); 2450 return getConstant(api, VT); 2451 } 2452 case ISD::BITCAST: 2453 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 2454 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); 2455 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 2456 return getConstant(V.bitcastToAPInt().getZExtValue(), VT); 2457 break; 2458 } 2459 } 2460 } 2461 2462 unsigned OpOpcode = Operand.getNode()->getOpcode(); 2463 switch (Opcode) { 2464 case ISD::TokenFactor: 2465 case ISD::MERGE_VALUES: 2466 case ISD::CONCAT_VECTORS: 2467 return Operand; // Factor, merge or concat of one node? No need. 2468 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 2469 case ISD::FP_EXTEND: 2470 assert(VT.isFloatingPoint() && 2471 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 2472 if (Operand.getValueType() == VT) return Operand; // noop conversion. 2473 assert((!VT.isVector() || 2474 VT.getVectorNumElements() == 2475 Operand.getValueType().getVectorNumElements()) && 2476 "Vector element count mismatch!"); 2477 if (Operand.getOpcode() == ISD::UNDEF) 2478 return getUNDEF(VT); 2479 break; 2480 case ISD::SIGN_EXTEND: 2481 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2482 "Invalid SIGN_EXTEND!"); 2483 if (Operand.getValueType() == VT) return Operand; // noop extension 2484 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2485 "Invalid sext node, dst < src!"); 2486 assert((!VT.isVector() || 2487 VT.getVectorNumElements() == 2488 Operand.getValueType().getVectorNumElements()) && 2489 "Vector element count mismatch!"); 2490 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 2491 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2492 else if (OpOpcode == ISD::UNDEF) 2493 // sext(undef) = 0, because the top bits will all be the same. 2494 return getConstant(0, VT); 2495 break; 2496 case ISD::ZERO_EXTEND: 2497 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2498 "Invalid ZERO_EXTEND!"); 2499 if (Operand.getValueType() == VT) return Operand; // noop extension 2500 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2501 "Invalid zext node, dst < src!"); 2502 assert((!VT.isVector() || 2503 VT.getVectorNumElements() == 2504 Operand.getValueType().getVectorNumElements()) && 2505 "Vector element count mismatch!"); 2506 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 2507 return getNode(ISD::ZERO_EXTEND, DL, VT, 2508 Operand.getNode()->getOperand(0)); 2509 else if (OpOpcode == ISD::UNDEF) 2510 // zext(undef) = 0, because the top bits will be zero. 2511 return getConstant(0, VT); 2512 break; 2513 case ISD::ANY_EXTEND: 2514 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2515 "Invalid ANY_EXTEND!"); 2516 if (Operand.getValueType() == VT) return Operand; // noop extension 2517 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2518 "Invalid anyext node, dst < src!"); 2519 assert((!VT.isVector() || 2520 VT.getVectorNumElements() == 2521 Operand.getValueType().getVectorNumElements()) && 2522 "Vector element count mismatch!"); 2523 2524 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2525 OpOpcode == ISD::ANY_EXTEND) 2526 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 2527 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2528 else if (OpOpcode == ISD::UNDEF) 2529 return getUNDEF(VT); 2530 2531 // (ext (trunx x)) -> x 2532 if (OpOpcode == ISD::TRUNCATE) { 2533 SDValue OpOp = Operand.getNode()->getOperand(0); 2534 if (OpOp.getValueType() == VT) 2535 return OpOp; 2536 } 2537 break; 2538 case ISD::TRUNCATE: 2539 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2540 "Invalid TRUNCATE!"); 2541 if (Operand.getValueType() == VT) return Operand; // noop truncate 2542 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) && 2543 "Invalid truncate node, src < dst!"); 2544 assert((!VT.isVector() || 2545 VT.getVectorNumElements() == 2546 Operand.getValueType().getVectorNumElements()) && 2547 "Vector element count mismatch!"); 2548 if (OpOpcode == ISD::TRUNCATE) 2549 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2550 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2551 OpOpcode == ISD::ANY_EXTEND) { 2552 // If the source is smaller than the dest, we still need an extend. 2553 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 2554 .bitsLT(VT.getScalarType())) 2555 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2556 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 2557 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2558 return Operand.getNode()->getOperand(0); 2559 } 2560 if (OpOpcode == ISD::UNDEF) 2561 return getUNDEF(VT); 2562 break; 2563 case ISD::BITCAST: 2564 // Basic sanity checking. 2565 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() 2566 && "Cannot BITCAST between types of different sizes!"); 2567 if (VT == Operand.getValueType()) return Operand; // noop conversion. 2568 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 2569 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 2570 if (OpOpcode == ISD::UNDEF) 2571 return getUNDEF(VT); 2572 break; 2573 case ISD::SCALAR_TO_VECTOR: 2574 assert(VT.isVector() && !Operand.getValueType().isVector() && 2575 (VT.getVectorElementType() == Operand.getValueType() || 2576 (VT.getVectorElementType().isInteger() && 2577 Operand.getValueType().isInteger() && 2578 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 2579 "Illegal SCALAR_TO_VECTOR node!"); 2580 if (OpOpcode == ISD::UNDEF) 2581 return getUNDEF(VT); 2582 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 2583 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 2584 isa<ConstantSDNode>(Operand.getOperand(1)) && 2585 Operand.getConstantOperandVal(1) == 0 && 2586 Operand.getOperand(0).getValueType() == VT) 2587 return Operand.getOperand(0); 2588 break; 2589 case ISD::FNEG: 2590 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 2591 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 2592 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 2593 Operand.getNode()->getOperand(0)); 2594 if (OpOpcode == ISD::FNEG) // --X -> X 2595 return Operand.getNode()->getOperand(0); 2596 break; 2597 case ISD::FABS: 2598 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 2599 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 2600 break; 2601 } 2602 2603 SDNode *N; 2604 SDVTList VTs = getVTList(VT); 2605 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 2606 FoldingSetNodeID ID; 2607 SDValue Ops[1] = { Operand }; 2608 AddNodeIDNode(ID, Opcode, VTs, Ops, 1); 2609 void *IP = 0; 2610 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2611 return SDValue(E, 0); 2612 2613 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand); 2614 CSEMap.InsertNode(N, IP); 2615 } else { 2616 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand); 2617 } 2618 2619 AllNodes.push_back(N); 2620 #ifndef NDEBUG 2621 VerifySDNode(N); 2622 #endif 2623 return SDValue(N, 0); 2624 } 2625 2626 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, 2627 EVT VT, 2628 ConstantSDNode *Cst1, 2629 ConstantSDNode *Cst2) { 2630 const APInt &C1 = Cst1->getAPIntValue(), &C2 = Cst2->getAPIntValue(); 2631 2632 switch (Opcode) { 2633 case ISD::ADD: return getConstant(C1 + C2, VT); 2634 case ISD::SUB: return getConstant(C1 - C2, VT); 2635 case ISD::MUL: return getConstant(C1 * C2, VT); 2636 case ISD::UDIV: 2637 if (C2.getBoolValue()) return getConstant(C1.udiv(C2), VT); 2638 break; 2639 case ISD::UREM: 2640 if (C2.getBoolValue()) return getConstant(C1.urem(C2), VT); 2641 break; 2642 case ISD::SDIV: 2643 if (C2.getBoolValue()) return getConstant(C1.sdiv(C2), VT); 2644 break; 2645 case ISD::SREM: 2646 if (C2.getBoolValue()) return getConstant(C1.srem(C2), VT); 2647 break; 2648 case ISD::AND: return getConstant(C1 & C2, VT); 2649 case ISD::OR: return getConstant(C1 | C2, VT); 2650 case ISD::XOR: return getConstant(C1 ^ C2, VT); 2651 case ISD::SHL: return getConstant(C1 << C2, VT); 2652 case ISD::SRL: return getConstant(C1.lshr(C2), VT); 2653 case ISD::SRA: return getConstant(C1.ashr(C2), VT); 2654 case ISD::ROTL: return getConstant(C1.rotl(C2), VT); 2655 case ISD::ROTR: return getConstant(C1.rotr(C2), VT); 2656 default: break; 2657 } 2658 2659 return SDValue(); 2660 } 2661 2662 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 2663 SDValue N1, SDValue N2) { 2664 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 2665 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 2666 switch (Opcode) { 2667 default: break; 2668 case ISD::TokenFactor: 2669 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 2670 N2.getValueType() == MVT::Other && "Invalid token factor!"); 2671 // Fold trivial token factors. 2672 if (N1.getOpcode() == ISD::EntryToken) return N2; 2673 if (N2.getOpcode() == ISD::EntryToken) return N1; 2674 if (N1 == N2) return N1; 2675 break; 2676 case ISD::CONCAT_VECTORS: 2677 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 2678 // one big BUILD_VECTOR. 2679 if (N1.getOpcode() == ISD::BUILD_VECTOR && 2680 N2.getOpcode() == ISD::BUILD_VECTOR) { 2681 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 2682 N1.getNode()->op_end()); 2683 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 2684 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 2685 } 2686 break; 2687 case ISD::AND: 2688 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2689 assert(N1.getValueType() == N2.getValueType() && 2690 N1.getValueType() == VT && "Binary operator types must match!"); 2691 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 2692 // worth handling here. 2693 if (N2C && N2C->isNullValue()) 2694 return N2; 2695 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 2696 return N1; 2697 break; 2698 case ISD::OR: 2699 case ISD::XOR: 2700 case ISD::ADD: 2701 case ISD::SUB: 2702 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2703 assert(N1.getValueType() == N2.getValueType() && 2704 N1.getValueType() == VT && "Binary operator types must match!"); 2705 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 2706 // it's worth handling here. 2707 if (N2C && N2C->isNullValue()) 2708 return N1; 2709 break; 2710 case ISD::UDIV: 2711 case ISD::UREM: 2712 case ISD::MULHU: 2713 case ISD::MULHS: 2714 case ISD::MUL: 2715 case ISD::SDIV: 2716 case ISD::SREM: 2717 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2718 assert(N1.getValueType() == N2.getValueType() && 2719 N1.getValueType() == VT && "Binary operator types must match!"); 2720 break; 2721 case ISD::FADD: 2722 case ISD::FSUB: 2723 case ISD::FMUL: 2724 case ISD::FDIV: 2725 case ISD::FREM: 2726 if (getTarget().Options.UnsafeFPMath) { 2727 if (Opcode == ISD::FADD) { 2728 // 0+x --> x 2729 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) 2730 if (CFP->getValueAPF().isZero()) 2731 return N2; 2732 // x+0 --> x 2733 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2734 if (CFP->getValueAPF().isZero()) 2735 return N1; 2736 } else if (Opcode == ISD::FSUB) { 2737 // x-0 --> x 2738 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2739 if (CFP->getValueAPF().isZero()) 2740 return N1; 2741 } 2742 } 2743 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 2744 assert(N1.getValueType() == N2.getValueType() && 2745 N1.getValueType() == VT && "Binary operator types must match!"); 2746 break; 2747 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 2748 assert(N1.getValueType() == VT && 2749 N1.getValueType().isFloatingPoint() && 2750 N2.getValueType().isFloatingPoint() && 2751 "Invalid FCOPYSIGN!"); 2752 break; 2753 case ISD::SHL: 2754 case ISD::SRA: 2755 case ISD::SRL: 2756 case ISD::ROTL: 2757 case ISD::ROTR: 2758 assert(VT == N1.getValueType() && 2759 "Shift operators return type must be the same as their first arg"); 2760 assert(VT.isInteger() && N2.getValueType().isInteger() && 2761 "Shifts only work on integers"); 2762 // Verify that the shift amount VT is bit enough to hold valid shift 2763 // amounts. This catches things like trying to shift an i1024 value by an 2764 // i8, which is easy to fall into in generic code that uses 2765 // TLI.getShiftAmount(). 2766 assert(N2.getValueType().getSizeInBits() >= 2767 Log2_32_Ceil(N1.getValueType().getSizeInBits()) && 2768 "Invalid use of small shift amount with oversized value!"); 2769 2770 // Always fold shifts of i1 values so the code generator doesn't need to 2771 // handle them. Since we know the size of the shift has to be less than the 2772 // size of the value, the shift/rotate count is guaranteed to be zero. 2773 if (VT == MVT::i1) 2774 return N1; 2775 if (N2C && N2C->isNullValue()) 2776 return N1; 2777 break; 2778 case ISD::FP_ROUND_INREG: { 2779 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2780 assert(VT == N1.getValueType() && "Not an inreg round!"); 2781 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 2782 "Cannot FP_ROUND_INREG integer types"); 2783 assert(EVT.isVector() == VT.isVector() && 2784 "FP_ROUND_INREG type should be vector iff the operand " 2785 "type is vector!"); 2786 assert((!EVT.isVector() || 2787 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 2788 "Vector element counts must match in FP_ROUND_INREG"); 2789 assert(EVT.bitsLE(VT) && "Not rounding down!"); 2790 (void)EVT; 2791 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 2792 break; 2793 } 2794 case ISD::FP_ROUND: 2795 assert(VT.isFloatingPoint() && 2796 N1.getValueType().isFloatingPoint() && 2797 VT.bitsLE(N1.getValueType()) && 2798 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); 2799 if (N1.getValueType() == VT) return N1; // noop conversion. 2800 break; 2801 case ISD::AssertSext: 2802 case ISD::AssertZext: { 2803 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2804 assert(VT == N1.getValueType() && "Not an inreg extend!"); 2805 assert(VT.isInteger() && EVT.isInteger() && 2806 "Cannot *_EXTEND_INREG FP types"); 2807 assert(!EVT.isVector() && 2808 "AssertSExt/AssertZExt type should be the vector element type " 2809 "rather than the vector type!"); 2810 assert(EVT.bitsLE(VT) && "Not extending!"); 2811 if (VT == EVT) return N1; // noop assertion. 2812 break; 2813 } 2814 case ISD::SIGN_EXTEND_INREG: { 2815 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2816 assert(VT == N1.getValueType() && "Not an inreg extend!"); 2817 assert(VT.isInteger() && EVT.isInteger() && 2818 "Cannot *_EXTEND_INREG FP types"); 2819 assert(EVT.isVector() == VT.isVector() && 2820 "SIGN_EXTEND_INREG type should be vector iff the operand " 2821 "type is vector!"); 2822 assert((!EVT.isVector() || 2823 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 2824 "Vector element counts must match in SIGN_EXTEND_INREG"); 2825 assert(EVT.bitsLE(VT) && "Not extending!"); 2826 if (EVT == VT) return N1; // Not actually extending 2827 2828 if (N1C) { 2829 APInt Val = N1C->getAPIntValue(); 2830 unsigned FromBits = EVT.getScalarType().getSizeInBits(); 2831 Val <<= Val.getBitWidth()-FromBits; 2832 Val = Val.ashr(Val.getBitWidth()-FromBits); 2833 return getConstant(Val, VT); 2834 } 2835 break; 2836 } 2837 case ISD::EXTRACT_VECTOR_ELT: 2838 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 2839 if (N1.getOpcode() == ISD::UNDEF) 2840 return getUNDEF(VT); 2841 2842 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 2843 // expanding copies of large vectors from registers. 2844 if (N2C && 2845 N1.getOpcode() == ISD::CONCAT_VECTORS && 2846 N1.getNumOperands() > 0) { 2847 unsigned Factor = 2848 N1.getOperand(0).getValueType().getVectorNumElements(); 2849 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 2850 N1.getOperand(N2C->getZExtValue() / Factor), 2851 getConstant(N2C->getZExtValue() % Factor, 2852 N2.getValueType())); 2853 } 2854 2855 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 2856 // expanding large vector constants. 2857 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 2858 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 2859 EVT VEltTy = N1.getValueType().getVectorElementType(); 2860 if (Elt.getValueType() != VEltTy) { 2861 // If the vector element type is not legal, the BUILD_VECTOR operands 2862 // are promoted and implicitly truncated. Make that explicit here. 2863 Elt = getNode(ISD::TRUNCATE, DL, VEltTy, Elt); 2864 } 2865 if (VT != VEltTy) { 2866 // If the vector element type is not legal, the EXTRACT_VECTOR_ELT 2867 // result is implicitly extended. 2868 Elt = getNode(ISD::ANY_EXTEND, DL, VT, Elt); 2869 } 2870 return Elt; 2871 } 2872 2873 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 2874 // operations are lowered to scalars. 2875 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 2876 // If the indices are the same, return the inserted element else 2877 // if the indices are known different, extract the element from 2878 // the original vector. 2879 SDValue N1Op2 = N1.getOperand(2); 2880 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode()); 2881 2882 if (N1Op2C && N2C) { 2883 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 2884 if (VT == N1.getOperand(1).getValueType()) 2885 return N1.getOperand(1); 2886 else 2887 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 2888 } 2889 2890 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 2891 } 2892 } 2893 break; 2894 case ISD::EXTRACT_ELEMENT: 2895 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 2896 assert(!N1.getValueType().isVector() && !VT.isVector() && 2897 (N1.getValueType().isInteger() == VT.isInteger()) && 2898 N1.getValueType() != VT && 2899 "Wrong types for EXTRACT_ELEMENT!"); 2900 2901 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 2902 // 64-bit integers into 32-bit parts. Instead of building the extract of 2903 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 2904 if (N1.getOpcode() == ISD::BUILD_PAIR) 2905 return N1.getOperand(N2C->getZExtValue()); 2906 2907 // EXTRACT_ELEMENT of a constant int is also very common. 2908 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 2909 unsigned ElementSize = VT.getSizeInBits(); 2910 unsigned Shift = ElementSize * N2C->getZExtValue(); 2911 APInt ShiftedVal = C->getAPIntValue().lshr(Shift); 2912 return getConstant(ShiftedVal.trunc(ElementSize), VT); 2913 } 2914 break; 2915 case ISD::EXTRACT_SUBVECTOR: { 2916 SDValue Index = N2; 2917 if (VT.isSimple() && N1.getValueType().isSimple()) { 2918 assert(VT.isVector() && N1.getValueType().isVector() && 2919 "Extract subvector VTs must be a vectors!"); 2920 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() && 2921 "Extract subvector VTs must have the same element type!"); 2922 assert(VT.getSimpleVT() <= N1.getValueType().getSimpleVT() && 2923 "Extract subvector must be from larger vector to smaller vector!"); 2924 2925 if (isa<ConstantSDNode>(Index.getNode())) { 2926 assert((VT.getVectorNumElements() + 2927 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 2928 <= N1.getValueType().getVectorNumElements()) 2929 && "Extract subvector overflow!"); 2930 } 2931 2932 // Trivial extraction. 2933 if (VT.getSimpleVT() == N1.getValueType().getSimpleVT()) 2934 return N1; 2935 } 2936 break; 2937 } 2938 } 2939 2940 if (N1C) { 2941 if (N2C) { 2942 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1C, N2C); 2943 if (SV.getNode()) return SV; 2944 } else { // Cannonicalize constant to RHS if commutative 2945 if (isCommutativeBinOp(Opcode)) { 2946 std::swap(N1C, N2C); 2947 std::swap(N1, N2); 2948 } 2949 } 2950 } 2951 2952 // Constant fold FP operations. 2953 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 2954 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 2955 if (N1CFP) { 2956 if (!N2CFP && isCommutativeBinOp(Opcode)) { 2957 // Cannonicalize constant to RHS if commutative 2958 std::swap(N1CFP, N2CFP); 2959 std::swap(N1, N2); 2960 } else if (N2CFP && VT != MVT::ppcf128) { 2961 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 2962 APFloat::opStatus s; 2963 switch (Opcode) { 2964 case ISD::FADD: 2965 s = V1.add(V2, APFloat::rmNearestTiesToEven); 2966 if (s != APFloat::opInvalidOp) 2967 return getConstantFP(V1, VT); 2968 break; 2969 case ISD::FSUB: 2970 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 2971 if (s!=APFloat::opInvalidOp) 2972 return getConstantFP(V1, VT); 2973 break; 2974 case ISD::FMUL: 2975 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 2976 if (s!=APFloat::opInvalidOp) 2977 return getConstantFP(V1, VT); 2978 break; 2979 case ISD::FDIV: 2980 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 2981 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 2982 return getConstantFP(V1, VT); 2983 break; 2984 case ISD::FREM : 2985 s = V1.mod(V2, APFloat::rmNearestTiesToEven); 2986 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 2987 return getConstantFP(V1, VT); 2988 break; 2989 case ISD::FCOPYSIGN: 2990 V1.copySign(V2); 2991 return getConstantFP(V1, VT); 2992 default: break; 2993 } 2994 } 2995 2996 if (Opcode == ISD::FP_ROUND) { 2997 APFloat V = N1CFP->getValueAPF(); // make copy 2998 bool ignored; 2999 // This can return overflow, underflow, or inexact; we don't care. 3000 // FIXME need to be more flexible about rounding mode. 3001 (void)V.convert(*EVTToAPFloatSemantics(VT), 3002 APFloat::rmNearestTiesToEven, &ignored); 3003 return getConstantFP(V, VT); 3004 } 3005 } 3006 3007 // Canonicalize an UNDEF to the RHS, even over a constant. 3008 if (N1.getOpcode() == ISD::UNDEF) { 3009 if (isCommutativeBinOp(Opcode)) { 3010 std::swap(N1, N2); 3011 } else { 3012 switch (Opcode) { 3013 case ISD::FP_ROUND_INREG: 3014 case ISD::SIGN_EXTEND_INREG: 3015 case ISD::SUB: 3016 case ISD::FSUB: 3017 case ISD::FDIV: 3018 case ISD::FREM: 3019 case ISD::SRA: 3020 return N1; // fold op(undef, arg2) -> undef 3021 case ISD::UDIV: 3022 case ISD::SDIV: 3023 case ISD::UREM: 3024 case ISD::SREM: 3025 case ISD::SRL: 3026 case ISD::SHL: 3027 if (!VT.isVector()) 3028 return getConstant(0, VT); // fold op(undef, arg2) -> 0 3029 // For vectors, we can't easily build an all zero vector, just return 3030 // the LHS. 3031 return N2; 3032 } 3033 } 3034 } 3035 3036 // Fold a bunch of operators when the RHS is undef. 3037 if (N2.getOpcode() == ISD::UNDEF) { 3038 switch (Opcode) { 3039 case ISD::XOR: 3040 if (N1.getOpcode() == ISD::UNDEF) 3041 // Handle undef ^ undef -> 0 special case. This is a common 3042 // idiom (misuse). 3043 return getConstant(0, VT); 3044 // fallthrough 3045 case ISD::ADD: 3046 case ISD::ADDC: 3047 case ISD::ADDE: 3048 case ISD::SUB: 3049 case ISD::UDIV: 3050 case ISD::SDIV: 3051 case ISD::UREM: 3052 case ISD::SREM: 3053 return N2; // fold op(arg1, undef) -> undef 3054 case ISD::FADD: 3055 case ISD::FSUB: 3056 case ISD::FMUL: 3057 case ISD::FDIV: 3058 case ISD::FREM: 3059 if (getTarget().Options.UnsafeFPMath) 3060 return N2; 3061 break; 3062 case ISD::MUL: 3063 case ISD::AND: 3064 case ISD::SRL: 3065 case ISD::SHL: 3066 if (!VT.isVector()) 3067 return getConstant(0, VT); // fold op(arg1, undef) -> 0 3068 // For vectors, we can't easily build an all zero vector, just return 3069 // the LHS. 3070 return N1; 3071 case ISD::OR: 3072 if (!VT.isVector()) 3073 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 3074 // For vectors, we can't easily build an all one vector, just return 3075 // the LHS. 3076 return N1; 3077 case ISD::SRA: 3078 return N1; 3079 } 3080 } 3081 3082 // Memoize this node if possible. 3083 SDNode *N; 3084 SDVTList VTs = getVTList(VT); 3085 if (VT != MVT::Glue) { 3086 SDValue Ops[] = { N1, N2 }; 3087 FoldingSetNodeID ID; 3088 AddNodeIDNode(ID, Opcode, VTs, Ops, 2); 3089 void *IP = 0; 3090 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3091 return SDValue(E, 0); 3092 3093 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2); 3094 CSEMap.InsertNode(N, IP); 3095 } else { 3096 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2); 3097 } 3098 3099 AllNodes.push_back(N); 3100 #ifndef NDEBUG 3101 VerifySDNode(N); 3102 #endif 3103 return SDValue(N, 0); 3104 } 3105 3106 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 3107 SDValue N1, SDValue N2, SDValue N3) { 3108 // Perform various simplifications. 3109 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3110 switch (Opcode) { 3111 case ISD::CONCAT_VECTORS: 3112 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3113 // one big BUILD_VECTOR. 3114 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3115 N2.getOpcode() == ISD::BUILD_VECTOR && 3116 N3.getOpcode() == ISD::BUILD_VECTOR) { 3117 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3118 N1.getNode()->op_end()); 3119 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3120 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end()); 3121 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 3122 } 3123 break; 3124 case ISD::SETCC: { 3125 // Use FoldSetCC to simplify SETCC's. 3126 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL); 3127 if (Simp.getNode()) return Simp; 3128 break; 3129 } 3130 case ISD::SELECT: 3131 if (N1C) { 3132 if (N1C->getZExtValue()) 3133 return N2; // select true, X, Y -> X 3134 return N3; // select false, X, Y -> Y 3135 } 3136 3137 if (N2 == N3) return N2; // select C, X, X -> X 3138 break; 3139 case ISD::VECTOR_SHUFFLE: 3140 llvm_unreachable("should use getVectorShuffle constructor!"); 3141 case ISD::INSERT_SUBVECTOR: { 3142 SDValue Index = N3; 3143 if (VT.isSimple() && N1.getValueType().isSimple() 3144 && N2.getValueType().isSimple()) { 3145 assert(VT.isVector() && N1.getValueType().isVector() && 3146 N2.getValueType().isVector() && 3147 "Insert subvector VTs must be a vectors"); 3148 assert(VT == N1.getValueType() && 3149 "Dest and insert subvector source types must match!"); 3150 assert(N2.getValueType().getSimpleVT() <= N1.getValueType().getSimpleVT() && 3151 "Insert subvector must be from smaller vector to larger vector!"); 3152 if (isa<ConstantSDNode>(Index.getNode())) { 3153 assert((N2.getValueType().getVectorNumElements() + 3154 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3155 <= VT.getVectorNumElements()) 3156 && "Insert subvector overflow!"); 3157 } 3158 3159 // Trivial insertion. 3160 if (VT.getSimpleVT() == N2.getValueType().getSimpleVT()) 3161 return N2; 3162 } 3163 break; 3164 } 3165 case ISD::BITCAST: 3166 // Fold bit_convert nodes from a type to themselves. 3167 if (N1.getValueType() == VT) 3168 return N1; 3169 break; 3170 } 3171 3172 // Memoize node if it doesn't produce a flag. 3173 SDNode *N; 3174 SDVTList VTs = getVTList(VT); 3175 if (VT != MVT::Glue) { 3176 SDValue Ops[] = { N1, N2, N3 }; 3177 FoldingSetNodeID ID; 3178 AddNodeIDNode(ID, Opcode, VTs, Ops, 3); 3179 void *IP = 0; 3180 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3181 return SDValue(E, 0); 3182 3183 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3); 3184 CSEMap.InsertNode(N, IP); 3185 } else { 3186 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3); 3187 } 3188 3189 AllNodes.push_back(N); 3190 #ifndef NDEBUG 3191 VerifySDNode(N); 3192 #endif 3193 return SDValue(N, 0); 3194 } 3195 3196 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 3197 SDValue N1, SDValue N2, SDValue N3, 3198 SDValue N4) { 3199 SDValue Ops[] = { N1, N2, N3, N4 }; 3200 return getNode(Opcode, DL, VT, Ops, 4); 3201 } 3202 3203 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 3204 SDValue N1, SDValue N2, SDValue N3, 3205 SDValue N4, SDValue N5) { 3206 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 3207 return getNode(Opcode, DL, VT, Ops, 5); 3208 } 3209 3210 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 3211 /// the incoming stack arguments to be loaded from the stack. 3212 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 3213 SmallVector<SDValue, 8> ArgChains; 3214 3215 // Include the original chain at the beginning of the list. When this is 3216 // used by target LowerCall hooks, this helps legalize find the 3217 // CALLSEQ_BEGIN node. 3218 ArgChains.push_back(Chain); 3219 3220 // Add a chain value for each stack argument. 3221 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 3222 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 3223 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3224 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3225 if (FI->getIndex() < 0) 3226 ArgChains.push_back(SDValue(L, 1)); 3227 3228 // Build a tokenfactor for all the chains. 3229 return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other, 3230 &ArgChains[0], ArgChains.size()); 3231 } 3232 3233 /// SplatByte - Distribute ByteVal over NumBits bits. 3234 static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 3235 APInt Val = APInt(NumBits, ByteVal); 3236 unsigned Shift = 8; 3237 for (unsigned i = NumBits; i > 8; i >>= 1) { 3238 Val = (Val << Shift) | Val; 3239 Shift <<= 1; 3240 } 3241 return Val; 3242 } 3243 3244 /// getMemsetValue - Vectorized representation of the memset value 3245 /// operand. 3246 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 3247 DebugLoc dl) { 3248 assert(Value.getOpcode() != ISD::UNDEF); 3249 3250 unsigned NumBits = VT.getScalarType().getSizeInBits(); 3251 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3252 APInt Val = SplatByte(NumBits, C->getZExtValue() & 255); 3253 if (VT.isInteger()) 3254 return DAG.getConstant(Val, VT); 3255 return DAG.getConstantFP(APFloat(Val), VT); 3256 } 3257 3258 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value); 3259 if (NumBits > 8) { 3260 // Use a multiplication with 0x010101... to extend the input to the 3261 // required length. 3262 APInt Magic = SplatByte(NumBits, 0x01); 3263 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT)); 3264 } 3265 3266 return Value; 3267 } 3268 3269 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3270 /// used when a memcpy is turned into a memset when the source is a constant 3271 /// string ptr. 3272 static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG, 3273 const TargetLowering &TLI, StringRef Str) { 3274 // Handle vector with all elements zero. 3275 if (Str.empty()) { 3276 if (VT.isInteger()) 3277 return DAG.getConstant(0, VT); 3278 else if (VT == MVT::f32 || VT == MVT::f64) 3279 return DAG.getConstantFP(0.0, VT); 3280 else if (VT.isVector()) { 3281 unsigned NumElts = VT.getVectorNumElements(); 3282 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 3283 return DAG.getNode(ISD::BITCAST, dl, VT, 3284 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(), 3285 EltVT, NumElts))); 3286 } else 3287 llvm_unreachable("Expected type!"); 3288 } 3289 3290 assert(!VT.isVector() && "Can't handle vector type here!"); 3291 unsigned NumVTBytes = VT.getSizeInBits() / 8; 3292 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 3293 3294 uint64_t Val = 0; 3295 if (TLI.isLittleEndian()) { 3296 for (unsigned i = 0; i != NumBytes; ++i) 3297 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 3298 } else { 3299 for (unsigned i = 0; i != NumBytes; ++i) 3300 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 3301 } 3302 3303 return DAG.getConstant(Val, VT); 3304 } 3305 3306 /// getMemBasePlusOffset - Returns base and offset node for the 3307 /// 3308 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, 3309 SelectionDAG &DAG) { 3310 EVT VT = Base.getValueType(); 3311 return DAG.getNode(ISD::ADD, Base.getDebugLoc(), 3312 VT, Base, DAG.getConstant(Offset, VT)); 3313 } 3314 3315 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 3316 /// 3317 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 3318 unsigned SrcDelta = 0; 3319 GlobalAddressSDNode *G = NULL; 3320 if (Src.getOpcode() == ISD::GlobalAddress) 3321 G = cast<GlobalAddressSDNode>(Src); 3322 else if (Src.getOpcode() == ISD::ADD && 3323 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 3324 Src.getOperand(1).getOpcode() == ISD::Constant) { 3325 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 3326 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 3327 } 3328 if (!G) 3329 return false; 3330 3331 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false); 3332 } 3333 3334 /// FindOptimalMemOpLowering - Determines the optimial series memory ops 3335 /// to replace the memset / memcpy. Return true if the number of memory ops 3336 /// is below the threshold. It returns the types of the sequence of 3337 /// memory ops to perform memset / memcpy by reference. 3338 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 3339 unsigned Limit, uint64_t Size, 3340 unsigned DstAlign, unsigned SrcAlign, 3341 bool IsZeroVal, 3342 bool MemcpyStrSrc, 3343 SelectionDAG &DAG, 3344 const TargetLowering &TLI) { 3345 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 3346 "Expecting memcpy / memset source to meet alignment requirement!"); 3347 // If 'SrcAlign' is zero, that means the memory operation does not need to 3348 // load the value, i.e. memset or memcpy from constant string. Otherwise, 3349 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 3350 // is the specified alignment of the memory operation. If it is zero, that 3351 // means it's possible to change the alignment of the destination. 3352 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 3353 // not need to be loaded. 3354 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 3355 IsZeroVal, MemcpyStrSrc, 3356 DAG.getMachineFunction()); 3357 3358 if (VT == MVT::Other) { 3359 if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() || 3360 TLI.allowsUnalignedMemoryAccesses(VT)) { 3361 VT = TLI.getPointerTy(); 3362 } else { 3363 switch (DstAlign & 7) { 3364 case 0: VT = MVT::i64; break; 3365 case 4: VT = MVT::i32; break; 3366 case 2: VT = MVT::i16; break; 3367 default: VT = MVT::i8; break; 3368 } 3369 } 3370 3371 MVT LVT = MVT::i64; 3372 while (!TLI.isTypeLegal(LVT)) 3373 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 3374 assert(LVT.isInteger()); 3375 3376 if (VT.bitsGT(LVT)) 3377 VT = LVT; 3378 } 3379 3380 unsigned NumMemOps = 0; 3381 while (Size != 0) { 3382 unsigned VTSize = VT.getSizeInBits() / 8; 3383 while (VTSize > Size) { 3384 // For now, only use non-vector load / store's for the left-over pieces. 3385 if (VT.isVector() || VT.isFloatingPoint()) { 3386 VT = MVT::i64; 3387 while (!TLI.isTypeLegal(VT)) 3388 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 3389 VTSize = VT.getSizeInBits() / 8; 3390 } else { 3391 // This can result in a type that is not legal on the target, e.g. 3392 // 1 or 2 bytes on PPC. 3393 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 3394 VTSize >>= 1; 3395 } 3396 } 3397 3398 if (++NumMemOps > Limit) 3399 return false; 3400 MemOps.push_back(VT); 3401 Size -= VTSize; 3402 } 3403 3404 return true; 3405 } 3406 3407 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, 3408 SDValue Chain, SDValue Dst, 3409 SDValue Src, uint64_t Size, 3410 unsigned Align, bool isVol, 3411 bool AlwaysInline, 3412 MachinePointerInfo DstPtrInfo, 3413 MachinePointerInfo SrcPtrInfo) { 3414 // Turn a memcpy of undef to nop. 3415 if (Src.getOpcode() == ISD::UNDEF) 3416 return Chain; 3417 3418 // Expand memcpy to a series of load and store ops if the size operand falls 3419 // below a certain threshold. 3420 // TODO: In the AlwaysInline case, if the size is big then generate a loop 3421 // rather than maybe a humongous number of loads and stores. 3422 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3423 std::vector<EVT> MemOps; 3424 bool DstAlignCanChange = false; 3425 MachineFunction &MF = DAG.getMachineFunction(); 3426 MachineFrameInfo *MFI = MF.getFrameInfo(); 3427 bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 3428 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3429 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3430 DstAlignCanChange = true; 3431 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3432 if (Align > SrcAlign) 3433 SrcAlign = Align; 3434 StringRef Str; 3435 bool CopyFromStr = isMemSrcFromString(Src, Str); 3436 bool isZeroStr = CopyFromStr && Str.empty(); 3437 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 3438 3439 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3440 (DstAlignCanChange ? 0 : Align), 3441 (isZeroStr ? 0 : SrcAlign), 3442 true, CopyFromStr, DAG, TLI)) 3443 return SDValue(); 3444 3445 if (DstAlignCanChange) { 3446 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3447 unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); 3448 if (NewAlign > Align) { 3449 // Give the stack frame object a larger alignment if needed. 3450 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3451 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3452 Align = NewAlign; 3453 } 3454 } 3455 3456 SmallVector<SDValue, 8> OutChains; 3457 unsigned NumMemOps = MemOps.size(); 3458 uint64_t SrcOff = 0, DstOff = 0; 3459 for (unsigned i = 0; i != NumMemOps; ++i) { 3460 EVT VT = MemOps[i]; 3461 unsigned VTSize = VT.getSizeInBits() / 8; 3462 SDValue Value, Store; 3463 3464 if (CopyFromStr && 3465 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 3466 // It's unlikely a store of a vector immediate can be done in a single 3467 // instruction. It would require a load from a constantpool first. 3468 // We only handle zero vectors here. 3469 // FIXME: Handle other cases where store of vector immediate is done in 3470 // a single instruction. 3471 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 3472 Store = DAG.getStore(Chain, dl, Value, 3473 getMemBasePlusOffset(Dst, DstOff, DAG), 3474 DstPtrInfo.getWithOffset(DstOff), isVol, 3475 false, Align); 3476 } else { 3477 // The type might not be legal for the target. This should only happen 3478 // if the type is smaller than a legal type, as on PPC, so the right 3479 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 3480 // to Load/Store if NVT==VT. 3481 // FIXME does the case above also need this? 3482 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 3483 assert(NVT.bitsGE(VT)); 3484 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 3485 getMemBasePlusOffset(Src, SrcOff, DAG), 3486 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, 3487 MinAlign(SrcAlign, SrcOff)); 3488 Store = DAG.getTruncStore(Chain, dl, Value, 3489 getMemBasePlusOffset(Dst, DstOff, DAG), 3490 DstPtrInfo.getWithOffset(DstOff), VT, isVol, 3491 false, Align); 3492 } 3493 OutChains.push_back(Store); 3494 SrcOff += VTSize; 3495 DstOff += VTSize; 3496 } 3497 3498 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3499 &OutChains[0], OutChains.size()); 3500 } 3501 3502 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, 3503 SDValue Chain, SDValue Dst, 3504 SDValue Src, uint64_t Size, 3505 unsigned Align, bool isVol, 3506 bool AlwaysInline, 3507 MachinePointerInfo DstPtrInfo, 3508 MachinePointerInfo SrcPtrInfo) { 3509 // Turn a memmove of undef to nop. 3510 if (Src.getOpcode() == ISD::UNDEF) 3511 return Chain; 3512 3513 // Expand memmove to a series of load and store ops if the size operand falls 3514 // below a certain threshold. 3515 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3516 std::vector<EVT> MemOps; 3517 bool DstAlignCanChange = false; 3518 MachineFunction &MF = DAG.getMachineFunction(); 3519 MachineFrameInfo *MFI = MF.getFrameInfo(); 3520 bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 3521 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3522 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3523 DstAlignCanChange = true; 3524 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3525 if (Align > SrcAlign) 3526 SrcAlign = Align; 3527 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 3528 3529 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3530 (DstAlignCanChange ? 0 : Align), 3531 SrcAlign, true, false, DAG, TLI)) 3532 return SDValue(); 3533 3534 if (DstAlignCanChange) { 3535 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3536 unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); 3537 if (NewAlign > Align) { 3538 // Give the stack frame object a larger alignment if needed. 3539 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3540 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3541 Align = NewAlign; 3542 } 3543 } 3544 3545 uint64_t SrcOff = 0, DstOff = 0; 3546 SmallVector<SDValue, 8> LoadValues; 3547 SmallVector<SDValue, 8> LoadChains; 3548 SmallVector<SDValue, 8> OutChains; 3549 unsigned NumMemOps = MemOps.size(); 3550 for (unsigned i = 0; i < NumMemOps; i++) { 3551 EVT VT = MemOps[i]; 3552 unsigned VTSize = VT.getSizeInBits() / 8; 3553 SDValue Value, Store; 3554 3555 Value = DAG.getLoad(VT, dl, Chain, 3556 getMemBasePlusOffset(Src, SrcOff, DAG), 3557 SrcPtrInfo.getWithOffset(SrcOff), isVol, 3558 false, false, SrcAlign); 3559 LoadValues.push_back(Value); 3560 LoadChains.push_back(Value.getValue(1)); 3561 SrcOff += VTSize; 3562 } 3563 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3564 &LoadChains[0], LoadChains.size()); 3565 OutChains.clear(); 3566 for (unsigned i = 0; i < NumMemOps; i++) { 3567 EVT VT = MemOps[i]; 3568 unsigned VTSize = VT.getSizeInBits() / 8; 3569 SDValue Value, Store; 3570 3571 Store = DAG.getStore(Chain, dl, LoadValues[i], 3572 getMemBasePlusOffset(Dst, DstOff, DAG), 3573 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); 3574 OutChains.push_back(Store); 3575 DstOff += VTSize; 3576 } 3577 3578 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3579 &OutChains[0], OutChains.size()); 3580 } 3581 3582 static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, 3583 SDValue Chain, SDValue Dst, 3584 SDValue Src, uint64_t Size, 3585 unsigned Align, bool isVol, 3586 MachinePointerInfo DstPtrInfo) { 3587 // Turn a memset of undef to nop. 3588 if (Src.getOpcode() == ISD::UNDEF) 3589 return Chain; 3590 3591 // Expand memset to a series of load/store ops if the size operand 3592 // falls below a certain threshold. 3593 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3594 std::vector<EVT> MemOps; 3595 bool DstAlignCanChange = false; 3596 MachineFunction &MF = DAG.getMachineFunction(); 3597 MachineFrameInfo *MFI = MF.getFrameInfo(); 3598 bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 3599 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3600 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3601 DstAlignCanChange = true; 3602 bool IsZeroVal = 3603 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 3604 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 3605 Size, (DstAlignCanChange ? 0 : Align), 0, 3606 IsZeroVal, false, DAG, TLI)) 3607 return SDValue(); 3608 3609 if (DstAlignCanChange) { 3610 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3611 unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); 3612 if (NewAlign > Align) { 3613 // Give the stack frame object a larger alignment if needed. 3614 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3615 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3616 Align = NewAlign; 3617 } 3618 } 3619 3620 SmallVector<SDValue, 8> OutChains; 3621 uint64_t DstOff = 0; 3622 unsigned NumMemOps = MemOps.size(); 3623 3624 // Find the largest store and generate the bit pattern for it. 3625 EVT LargestVT = MemOps[0]; 3626 for (unsigned i = 1; i < NumMemOps; i++) 3627 if (MemOps[i].bitsGT(LargestVT)) 3628 LargestVT = MemOps[i]; 3629 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 3630 3631 for (unsigned i = 0; i < NumMemOps; i++) { 3632 EVT VT = MemOps[i]; 3633 3634 // If this store is smaller than the largest store see whether we can get 3635 // the smaller value for free with a truncate. 3636 SDValue Value = MemSetValue; 3637 if (VT.bitsLT(LargestVT)) { 3638 if (!LargestVT.isVector() && !VT.isVector() && 3639 TLI.isTruncateFree(LargestVT, VT)) 3640 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 3641 else 3642 Value = getMemsetValue(Src, VT, DAG, dl); 3643 } 3644 assert(Value.getValueType() == VT && "Value with wrong type."); 3645 SDValue Store = DAG.getStore(Chain, dl, Value, 3646 getMemBasePlusOffset(Dst, DstOff, DAG), 3647 DstPtrInfo.getWithOffset(DstOff), 3648 isVol, false, Align); 3649 OutChains.push_back(Store); 3650 DstOff += VT.getSizeInBits() / 8; 3651 } 3652 3653 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3654 &OutChains[0], OutChains.size()); 3655 } 3656 3657 SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, 3658 SDValue Src, SDValue Size, 3659 unsigned Align, bool isVol, bool AlwaysInline, 3660 MachinePointerInfo DstPtrInfo, 3661 MachinePointerInfo SrcPtrInfo) { 3662 3663 // Check to see if we should lower the memcpy to loads and stores first. 3664 // For cases within the target-specified limits, this is the best choice. 3665 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 3666 if (ConstantSize) { 3667 // Memcpy with size zero? Just return the original chain. 3668 if (ConstantSize->isNullValue()) 3669 return Chain; 3670 3671 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 3672 ConstantSize->getZExtValue(),Align, 3673 isVol, false, DstPtrInfo, SrcPtrInfo); 3674 if (Result.getNode()) 3675 return Result; 3676 } 3677 3678 // Then check to see if we should lower the memcpy with target-specific 3679 // code. If the target chooses to do this, this is the next best. 3680 SDValue Result = 3681 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align, 3682 isVol, AlwaysInline, 3683 DstPtrInfo, SrcPtrInfo); 3684 if (Result.getNode()) 3685 return Result; 3686 3687 // If we really need inline code and the target declined to provide it, 3688 // use a (potentially long) sequence of loads and stores. 3689 if (AlwaysInline) { 3690 assert(ConstantSize && "AlwaysInline requires a constant size!"); 3691 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 3692 ConstantSize->getZExtValue(), Align, isVol, 3693 true, DstPtrInfo, SrcPtrInfo); 3694 } 3695 3696 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 3697 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 3698 // respect volatile, so they may do things like read or write memory 3699 // beyond the given memory regions. But fixing this isn't easy, and most 3700 // people don't care. 3701 3702 // Emit a library call. 3703 TargetLowering::ArgListTy Args; 3704 TargetLowering::ArgListEntry Entry; 3705 Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); 3706 Entry.Node = Dst; Args.push_back(Entry); 3707 Entry.Node = Src; Args.push_back(Entry); 3708 Entry.Node = Size; Args.push_back(Entry); 3709 // FIXME: pass in DebugLoc 3710 TargetLowering:: 3711 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 3712 false, false, false, false, 0, 3713 TLI.getLibcallCallingConv(RTLIB::MEMCPY), 3714 /*isTailCall=*/false, 3715 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 3716 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY), 3717 TLI.getPointerTy()), 3718 Args, *this, dl); 3719 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI); 3720 3721 return CallResult.second; 3722 } 3723 3724 SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, 3725 SDValue Src, SDValue Size, 3726 unsigned Align, bool isVol, 3727 MachinePointerInfo DstPtrInfo, 3728 MachinePointerInfo SrcPtrInfo) { 3729 3730 // Check to see if we should lower the memmove to loads and stores first. 3731 // For cases within the target-specified limits, this is the best choice. 3732 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 3733 if (ConstantSize) { 3734 // Memmove with size zero? Just return the original chain. 3735 if (ConstantSize->isNullValue()) 3736 return Chain; 3737 3738 SDValue Result = 3739 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 3740 ConstantSize->getZExtValue(), Align, isVol, 3741 false, DstPtrInfo, SrcPtrInfo); 3742 if (Result.getNode()) 3743 return Result; 3744 } 3745 3746 // Then check to see if we should lower the memmove with target-specific 3747 // code. If the target chooses to do this, this is the next best. 3748 SDValue Result = 3749 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol, 3750 DstPtrInfo, SrcPtrInfo); 3751 if (Result.getNode()) 3752 return Result; 3753 3754 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 3755 // not be safe. See memcpy above for more details. 3756 3757 // Emit a library call. 3758 TargetLowering::ArgListTy Args; 3759 TargetLowering::ArgListEntry Entry; 3760 Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); 3761 Entry.Node = Dst; Args.push_back(Entry); 3762 Entry.Node = Src; Args.push_back(Entry); 3763 Entry.Node = Size; Args.push_back(Entry); 3764 // FIXME: pass in DebugLoc 3765 TargetLowering:: 3766 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 3767 false, false, false, false, 0, 3768 TLI.getLibcallCallingConv(RTLIB::MEMMOVE), 3769 /*isTailCall=*/false, 3770 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 3771 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE), 3772 TLI.getPointerTy()), 3773 Args, *this, dl); 3774 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI); 3775 3776 return CallResult.second; 3777 } 3778 3779 SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, 3780 SDValue Src, SDValue Size, 3781 unsigned Align, bool isVol, 3782 MachinePointerInfo DstPtrInfo) { 3783 3784 // Check to see if we should lower the memset to stores first. 3785 // For cases within the target-specified limits, this is the best choice. 3786 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 3787 if (ConstantSize) { 3788 // Memset with size zero? Just return the original chain. 3789 if (ConstantSize->isNullValue()) 3790 return Chain; 3791 3792 SDValue Result = 3793 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 3794 Align, isVol, DstPtrInfo); 3795 3796 if (Result.getNode()) 3797 return Result; 3798 } 3799 3800 // Then check to see if we should lower the memset with target-specific 3801 // code. If the target chooses to do this, this is the next best. 3802 SDValue Result = 3803 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol, 3804 DstPtrInfo); 3805 if (Result.getNode()) 3806 return Result; 3807 3808 // Emit a library call. 3809 Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext()); 3810 TargetLowering::ArgListTy Args; 3811 TargetLowering::ArgListEntry Entry; 3812 Entry.Node = Dst; Entry.Ty = IntPtrTy; 3813 Args.push_back(Entry); 3814 // Extend or truncate the argument to be an i32 value for the call. 3815 if (Src.getValueType().bitsGT(MVT::i32)) 3816 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src); 3817 else 3818 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src); 3819 Entry.Node = Src; 3820 Entry.Ty = Type::getInt32Ty(*getContext()); 3821 Entry.isSExt = true; 3822 Args.push_back(Entry); 3823 Entry.Node = Size; 3824 Entry.Ty = IntPtrTy; 3825 Entry.isSExt = false; 3826 Args.push_back(Entry); 3827 // FIXME: pass in DebugLoc 3828 TargetLowering:: 3829 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 3830 false, false, false, false, 0, 3831 TLI.getLibcallCallingConv(RTLIB::MEMSET), 3832 /*isTailCall=*/false, 3833 /*doesNotReturn*/false, /*isReturnValueUsed=*/false, 3834 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET), 3835 TLI.getPointerTy()), 3836 Args, *this, dl); 3837 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI); 3838 3839 return CallResult.second; 3840 } 3841 3842 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3843 SDValue Chain, SDValue Ptr, SDValue Cmp, 3844 SDValue Swp, MachinePointerInfo PtrInfo, 3845 unsigned Alignment, 3846 AtomicOrdering Ordering, 3847 SynchronizationScope SynchScope) { 3848 if (Alignment == 0) // Ensure that codegen never sees alignment 0 3849 Alignment = getEVTAlignment(MemVT); 3850 3851 MachineFunction &MF = getMachineFunction(); 3852 unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 3853 3854 // For now, atomics are considered to be volatile always. 3855 // FIXME: Volatile isn't really correct; we should keep track of atomic 3856 // orderings in the memoperand. 3857 Flags |= MachineMemOperand::MOVolatile; 3858 3859 MachineMemOperand *MMO = 3860 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); 3861 3862 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO, 3863 Ordering, SynchScope); 3864 } 3865 3866 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3867 SDValue Chain, 3868 SDValue Ptr, SDValue Cmp, 3869 SDValue Swp, MachineMemOperand *MMO, 3870 AtomicOrdering Ordering, 3871 SynchronizationScope SynchScope) { 3872 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); 3873 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 3874 3875 EVT VT = Cmp.getValueType(); 3876 3877 SDVTList VTs = getVTList(VT, MVT::Other); 3878 FoldingSetNodeID ID; 3879 ID.AddInteger(MemVT.getRawBits()); 3880 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 3881 AddNodeIDNode(ID, Opcode, VTs, Ops, 4); 3882 void* IP = 0; 3883 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 3884 cast<AtomicSDNode>(E)->refineAlignment(MMO); 3885 return SDValue(E, 0); 3886 } 3887 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, 3888 Ptr, Cmp, Swp, MMO, Ordering, 3889 SynchScope); 3890 CSEMap.InsertNode(N, IP); 3891 AllNodes.push_back(N); 3892 return SDValue(N, 0); 3893 } 3894 3895 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3896 SDValue Chain, 3897 SDValue Ptr, SDValue Val, 3898 const Value* PtrVal, 3899 unsigned Alignment, 3900 AtomicOrdering Ordering, 3901 SynchronizationScope SynchScope) { 3902 if (Alignment == 0) // Ensure that codegen never sees alignment 0 3903 Alignment = getEVTAlignment(MemVT); 3904 3905 MachineFunction &MF = getMachineFunction(); 3906 // A monotonic store does not load; a release store "loads" in the sense 3907 // that other stores cannot be sunk past it. 3908 // (An atomicrmw obviously both loads and stores.) 3909 unsigned Flags = MachineMemOperand::MOStore; 3910 if (Opcode != ISD::ATOMIC_STORE || Ordering > Monotonic) 3911 Flags |= MachineMemOperand::MOLoad; 3912 3913 // For now, atomics are considered to be volatile always. 3914 // FIXME: Volatile isn't really correct; we should keep track of atomic 3915 // orderings in the memoperand. 3916 Flags |= MachineMemOperand::MOVolatile; 3917 3918 MachineMemOperand *MMO = 3919 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 3920 MemVT.getStoreSize(), Alignment); 3921 3922 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO, 3923 Ordering, SynchScope); 3924 } 3925 3926 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3927 SDValue Chain, 3928 SDValue Ptr, SDValue Val, 3929 MachineMemOperand *MMO, 3930 AtomicOrdering Ordering, 3931 SynchronizationScope SynchScope) { 3932 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 3933 Opcode == ISD::ATOMIC_LOAD_SUB || 3934 Opcode == ISD::ATOMIC_LOAD_AND || 3935 Opcode == ISD::ATOMIC_LOAD_OR || 3936 Opcode == ISD::ATOMIC_LOAD_XOR || 3937 Opcode == ISD::ATOMIC_LOAD_NAND || 3938 Opcode == ISD::ATOMIC_LOAD_MIN || 3939 Opcode == ISD::ATOMIC_LOAD_MAX || 3940 Opcode == ISD::ATOMIC_LOAD_UMIN || 3941 Opcode == ISD::ATOMIC_LOAD_UMAX || 3942 Opcode == ISD::ATOMIC_SWAP || 3943 Opcode == ISD::ATOMIC_STORE) && 3944 "Invalid Atomic Op"); 3945 3946 EVT VT = Val.getValueType(); 3947 3948 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 3949 getVTList(VT, MVT::Other); 3950 FoldingSetNodeID ID; 3951 ID.AddInteger(MemVT.getRawBits()); 3952 SDValue Ops[] = {Chain, Ptr, Val}; 3953 AddNodeIDNode(ID, Opcode, VTs, Ops, 3); 3954 void* IP = 0; 3955 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 3956 cast<AtomicSDNode>(E)->refineAlignment(MMO); 3957 return SDValue(E, 0); 3958 } 3959 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, 3960 Ptr, Val, MMO, 3961 Ordering, SynchScope); 3962 CSEMap.InsertNode(N, IP); 3963 AllNodes.push_back(N); 3964 return SDValue(N, 0); 3965 } 3966 3967 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3968 EVT VT, SDValue Chain, 3969 SDValue Ptr, 3970 const Value* PtrVal, 3971 unsigned Alignment, 3972 AtomicOrdering Ordering, 3973 SynchronizationScope SynchScope) { 3974 if (Alignment == 0) // Ensure that codegen never sees alignment 0 3975 Alignment = getEVTAlignment(MemVT); 3976 3977 MachineFunction &MF = getMachineFunction(); 3978 // A monotonic load does not store; an acquire load "stores" in the sense 3979 // that other loads cannot be hoisted past it. 3980 unsigned Flags = MachineMemOperand::MOLoad; 3981 if (Ordering > Monotonic) 3982 Flags |= MachineMemOperand::MOStore; 3983 3984 // For now, atomics are considered to be volatile always. 3985 // FIXME: Volatile isn't really correct; we should keep track of atomic 3986 // orderings in the memoperand. 3987 Flags |= MachineMemOperand::MOVolatile; 3988 3989 MachineMemOperand *MMO = 3990 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 3991 MemVT.getStoreSize(), Alignment); 3992 3993 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO, 3994 Ordering, SynchScope); 3995 } 3996 3997 SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, 3998 EVT VT, SDValue Chain, 3999 SDValue Ptr, 4000 MachineMemOperand *MMO, 4001 AtomicOrdering Ordering, 4002 SynchronizationScope SynchScope) { 4003 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 4004 4005 SDVTList VTs = getVTList(VT, MVT::Other); 4006 FoldingSetNodeID ID; 4007 ID.AddInteger(MemVT.getRawBits()); 4008 SDValue Ops[] = {Chain, Ptr}; 4009 AddNodeIDNode(ID, Opcode, VTs, Ops, 2); 4010 void* IP = 0; 4011 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4012 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4013 return SDValue(E, 0); 4014 } 4015 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, 4016 Ptr, MMO, Ordering, SynchScope); 4017 CSEMap.InsertNode(N, IP); 4018 AllNodes.push_back(N); 4019 return SDValue(N, 0); 4020 } 4021 4022 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 4023 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps, 4024 DebugLoc dl) { 4025 if (NumOps == 1) 4026 return Ops[0]; 4027 4028 SmallVector<EVT, 4> VTs; 4029 VTs.reserve(NumOps); 4030 for (unsigned i = 0; i < NumOps; ++i) 4031 VTs.push_back(Ops[i].getValueType()); 4032 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps), 4033 Ops, NumOps); 4034 } 4035 4036 SDValue 4037 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, 4038 const EVT *VTs, unsigned NumVTs, 4039 const SDValue *Ops, unsigned NumOps, 4040 EVT MemVT, MachinePointerInfo PtrInfo, 4041 unsigned Align, bool Vol, 4042 bool ReadMem, bool WriteMem) { 4043 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps, 4044 MemVT, PtrInfo, Align, Vol, 4045 ReadMem, WriteMem); 4046 } 4047 4048 SDValue 4049 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, 4050 const SDValue *Ops, unsigned NumOps, 4051 EVT MemVT, MachinePointerInfo PtrInfo, 4052 unsigned Align, bool Vol, 4053 bool ReadMem, bool WriteMem) { 4054 if (Align == 0) // Ensure that codegen never sees alignment 0 4055 Align = getEVTAlignment(MemVT); 4056 4057 MachineFunction &MF = getMachineFunction(); 4058 unsigned Flags = 0; 4059 if (WriteMem) 4060 Flags |= MachineMemOperand::MOStore; 4061 if (ReadMem) 4062 Flags |= MachineMemOperand::MOLoad; 4063 if (Vol) 4064 Flags |= MachineMemOperand::MOVolatile; 4065 MachineMemOperand *MMO = 4066 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align); 4067 4068 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO); 4069 } 4070 4071 SDValue 4072 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, 4073 const SDValue *Ops, unsigned NumOps, 4074 EVT MemVT, MachineMemOperand *MMO) { 4075 assert((Opcode == ISD::INTRINSIC_VOID || 4076 Opcode == ISD::INTRINSIC_W_CHAIN || 4077 Opcode == ISD::PREFETCH || 4078 (Opcode <= INT_MAX && 4079 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 4080 "Opcode is not a memory-accessing opcode!"); 4081 4082 // Memoize the node unless it returns a flag. 4083 MemIntrinsicSDNode *N; 4084 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4085 FoldingSetNodeID ID; 4086 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4087 void *IP = 0; 4088 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4089 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 4090 return SDValue(E, 0); 4091 } 4092 4093 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, 4094 MemVT, MMO); 4095 CSEMap.InsertNode(N, IP); 4096 } else { 4097 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, 4098 MemVT, MMO); 4099 } 4100 AllNodes.push_back(N); 4101 return SDValue(N, 0); 4102 } 4103 4104 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4105 /// MachinePointerInfo record from it. This is particularly useful because the 4106 /// code generator has many cases where it doesn't bother passing in a 4107 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4108 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { 4109 // If this is FI+Offset, we can model it. 4110 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 4111 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); 4112 4113 // If this is (FI+Offset1)+Offset2, we can model it. 4114 if (Ptr.getOpcode() != ISD::ADD || 4115 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 4116 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 4117 return MachinePointerInfo(); 4118 4119 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4120 return MachinePointerInfo::getFixedStack(FI, Offset+ 4121 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 4122 } 4123 4124 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4125 /// MachinePointerInfo record from it. This is particularly useful because the 4126 /// code generator has many cases where it doesn't bother passing in a 4127 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4128 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { 4129 // If the 'Offset' value isn't a constant, we can't handle this. 4130 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 4131 return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); 4132 if (OffsetOp.getOpcode() == ISD::UNDEF) 4133 return InferPointerInfo(Ptr); 4134 return MachinePointerInfo(); 4135 } 4136 4137 4138 SDValue 4139 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4140 EVT VT, DebugLoc dl, SDValue Chain, 4141 SDValue Ptr, SDValue Offset, 4142 MachinePointerInfo PtrInfo, EVT MemVT, 4143 bool isVolatile, bool isNonTemporal, bool isInvariant, 4144 unsigned Alignment, const MDNode *TBAAInfo, 4145 const MDNode *Ranges) { 4146 assert(Chain.getValueType() == MVT::Other && 4147 "Invalid chain type"); 4148 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4149 Alignment = getEVTAlignment(VT); 4150 4151 unsigned Flags = MachineMemOperand::MOLoad; 4152 if (isVolatile) 4153 Flags |= MachineMemOperand::MOVolatile; 4154 if (isNonTemporal) 4155 Flags |= MachineMemOperand::MONonTemporal; 4156 if (isInvariant) 4157 Flags |= MachineMemOperand::MOInvariant; 4158 4159 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 4160 // clients. 4161 if (PtrInfo.V == 0) 4162 PtrInfo = InferPointerInfo(Ptr, Offset); 4163 4164 MachineFunction &MF = getMachineFunction(); 4165 MachineMemOperand *MMO = 4166 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 4167 TBAAInfo, Ranges); 4168 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 4169 } 4170 4171 SDValue 4172 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4173 EVT VT, DebugLoc dl, SDValue Chain, 4174 SDValue Ptr, SDValue Offset, EVT MemVT, 4175 MachineMemOperand *MMO) { 4176 if (VT == MemVT) { 4177 ExtType = ISD::NON_EXTLOAD; 4178 } else if (ExtType == ISD::NON_EXTLOAD) { 4179 assert(VT == MemVT && "Non-extending load from different memory type!"); 4180 } else { 4181 // Extending load. 4182 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 4183 "Should only be an extending load, not truncating!"); 4184 assert(VT.isInteger() == MemVT.isInteger() && 4185 "Cannot convert from FP to Int or Int -> FP!"); 4186 assert(VT.isVector() == MemVT.isVector() && 4187 "Cannot use trunc store to convert to or from a vector!"); 4188 assert((!VT.isVector() || 4189 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 4190 "Cannot use trunc store to change the number of vector elements!"); 4191 } 4192 4193 bool Indexed = AM != ISD::UNINDEXED; 4194 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && 4195 "Unindexed load with an offset!"); 4196 4197 SDVTList VTs = Indexed ? 4198 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 4199 SDValue Ops[] = { Chain, Ptr, Offset }; 4200 FoldingSetNodeID ID; 4201 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); 4202 ID.AddInteger(MemVT.getRawBits()); 4203 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), 4204 MMO->isNonTemporal(), 4205 MMO->isInvariant())); 4206 void *IP = 0; 4207 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4208 cast<LoadSDNode>(E)->refineAlignment(MMO); 4209 return SDValue(E, 0); 4210 } 4211 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl, VTs, AM, ExtType, 4212 MemVT, MMO); 4213 CSEMap.InsertNode(N, IP); 4214 AllNodes.push_back(N); 4215 return SDValue(N, 0); 4216 } 4217 4218 SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl, 4219 SDValue Chain, SDValue Ptr, 4220 MachinePointerInfo PtrInfo, 4221 bool isVolatile, bool isNonTemporal, 4222 bool isInvariant, unsigned Alignment, 4223 const MDNode *TBAAInfo, 4224 const MDNode *Ranges) { 4225 SDValue Undef = getUNDEF(Ptr.getValueType()); 4226 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4227 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment, 4228 TBAAInfo, Ranges); 4229 } 4230 4231 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT, 4232 SDValue Chain, SDValue Ptr, 4233 MachinePointerInfo PtrInfo, EVT MemVT, 4234 bool isVolatile, bool isNonTemporal, 4235 unsigned Alignment, const MDNode *TBAAInfo) { 4236 SDValue Undef = getUNDEF(Ptr.getValueType()); 4237 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4238 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment, 4239 TBAAInfo); 4240 } 4241 4242 4243 SDValue 4244 SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base, 4245 SDValue Offset, ISD::MemIndexedMode AM) { 4246 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 4247 assert(LD->getOffset().getOpcode() == ISD::UNDEF && 4248 "Load is already a indexed load!"); 4249 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 4250 LD->getChain(), Base, Offset, LD->getPointerInfo(), 4251 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), 4252 false, LD->getAlignment()); 4253 } 4254 4255 SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, 4256 SDValue Ptr, MachinePointerInfo PtrInfo, 4257 bool isVolatile, bool isNonTemporal, 4258 unsigned Alignment, const MDNode *TBAAInfo) { 4259 assert(Chain.getValueType() == MVT::Other && 4260 "Invalid chain type"); 4261 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4262 Alignment = getEVTAlignment(Val.getValueType()); 4263 4264 unsigned Flags = MachineMemOperand::MOStore; 4265 if (isVolatile) 4266 Flags |= MachineMemOperand::MOVolatile; 4267 if (isNonTemporal) 4268 Flags |= MachineMemOperand::MONonTemporal; 4269 4270 if (PtrInfo.V == 0) 4271 PtrInfo = InferPointerInfo(Ptr); 4272 4273 MachineFunction &MF = getMachineFunction(); 4274 MachineMemOperand *MMO = 4275 MF.getMachineMemOperand(PtrInfo, Flags, 4276 Val.getValueType().getStoreSize(), Alignment, 4277 TBAAInfo); 4278 4279 return getStore(Chain, dl, Val, Ptr, MMO); 4280 } 4281 4282 SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, 4283 SDValue Ptr, MachineMemOperand *MMO) { 4284 assert(Chain.getValueType() == MVT::Other && 4285 "Invalid chain type"); 4286 EVT VT = Val.getValueType(); 4287 SDVTList VTs = getVTList(MVT::Other); 4288 SDValue Undef = getUNDEF(Ptr.getValueType()); 4289 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4290 FoldingSetNodeID ID; 4291 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4292 ID.AddInteger(VT.getRawBits()); 4293 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 4294 MMO->isNonTemporal(), MMO->isInvariant())); 4295 void *IP = 0; 4296 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4297 cast<StoreSDNode>(E)->refineAlignment(MMO); 4298 return SDValue(E, 0); 4299 } 4300 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, 4301 false, VT, MMO); 4302 CSEMap.InsertNode(N, IP); 4303 AllNodes.push_back(N); 4304 return SDValue(N, 0); 4305 } 4306 4307 SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, 4308 SDValue Ptr, MachinePointerInfo PtrInfo, 4309 EVT SVT,bool isVolatile, bool isNonTemporal, 4310 unsigned Alignment, 4311 const MDNode *TBAAInfo) { 4312 assert(Chain.getValueType() == MVT::Other && 4313 "Invalid chain type"); 4314 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4315 Alignment = getEVTAlignment(SVT); 4316 4317 unsigned Flags = MachineMemOperand::MOStore; 4318 if (isVolatile) 4319 Flags |= MachineMemOperand::MOVolatile; 4320 if (isNonTemporal) 4321 Flags |= MachineMemOperand::MONonTemporal; 4322 4323 if (PtrInfo.V == 0) 4324 PtrInfo = InferPointerInfo(Ptr); 4325 4326 MachineFunction &MF = getMachineFunction(); 4327 MachineMemOperand *MMO = 4328 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, 4329 TBAAInfo); 4330 4331 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 4332 } 4333 4334 SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, 4335 SDValue Ptr, EVT SVT, 4336 MachineMemOperand *MMO) { 4337 EVT VT = Val.getValueType(); 4338 4339 assert(Chain.getValueType() == MVT::Other && 4340 "Invalid chain type"); 4341 if (VT == SVT) 4342 return getStore(Chain, dl, Val, Ptr, MMO); 4343 4344 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 4345 "Should only be a truncating store, not extending!"); 4346 assert(VT.isInteger() == SVT.isInteger() && 4347 "Can't do FP-INT conversion!"); 4348 assert(VT.isVector() == SVT.isVector() && 4349 "Cannot use trunc store to convert to or from a vector!"); 4350 assert((!VT.isVector() || 4351 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 4352 "Cannot use trunc store to change the number of vector elements!"); 4353 4354 SDVTList VTs = getVTList(MVT::Other); 4355 SDValue Undef = getUNDEF(Ptr.getValueType()); 4356 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4357 FoldingSetNodeID ID; 4358 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4359 ID.AddInteger(SVT.getRawBits()); 4360 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(), 4361 MMO->isNonTemporal(), MMO->isInvariant())); 4362 void *IP = 0; 4363 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4364 cast<StoreSDNode>(E)->refineAlignment(MMO); 4365 return SDValue(E, 0); 4366 } 4367 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, 4368 true, SVT, MMO); 4369 CSEMap.InsertNode(N, IP); 4370 AllNodes.push_back(N); 4371 return SDValue(N, 0); 4372 } 4373 4374 SDValue 4375 SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base, 4376 SDValue Offset, ISD::MemIndexedMode AM) { 4377 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 4378 assert(ST->getOffset().getOpcode() == ISD::UNDEF && 4379 "Store is already a indexed store!"); 4380 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 4381 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 4382 FoldingSetNodeID ID; 4383 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4384 ID.AddInteger(ST->getMemoryVT().getRawBits()); 4385 ID.AddInteger(ST->getRawSubclassData()); 4386 void *IP = 0; 4387 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4388 return SDValue(E, 0); 4389 4390 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, AM, 4391 ST->isTruncatingStore(), 4392 ST->getMemoryVT(), 4393 ST->getMemOperand()); 4394 CSEMap.InsertNode(N, IP); 4395 AllNodes.push_back(N); 4396 return SDValue(N, 0); 4397 } 4398 4399 SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl, 4400 SDValue Chain, SDValue Ptr, 4401 SDValue SV, 4402 unsigned Align) { 4403 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) }; 4404 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4); 4405 } 4406 4407 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 4408 const SDUse *Ops, unsigned NumOps) { 4409 switch (NumOps) { 4410 case 0: return getNode(Opcode, DL, VT); 4411 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4412 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4413 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4414 default: break; 4415 } 4416 4417 // Copy from an SDUse array into an SDValue array for use with 4418 // the regular getNode logic. 4419 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps); 4420 return getNode(Opcode, DL, VT, &NewOps[0], NumOps); 4421 } 4422 4423 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, 4424 const SDValue *Ops, unsigned NumOps) { 4425 switch (NumOps) { 4426 case 0: return getNode(Opcode, DL, VT); 4427 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4428 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4429 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4430 default: break; 4431 } 4432 4433 switch (Opcode) { 4434 default: break; 4435 case ISD::SELECT_CC: { 4436 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 4437 assert(Ops[0].getValueType() == Ops[1].getValueType() && 4438 "LHS and RHS of condition must have same type!"); 4439 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4440 "True and False arms of SelectCC must have same type!"); 4441 assert(Ops[2].getValueType() == VT && 4442 "select_cc node must be of same type as true and false value!"); 4443 break; 4444 } 4445 case ISD::BR_CC: { 4446 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 4447 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4448 "LHS/RHS of comparison should match types!"); 4449 break; 4450 } 4451 } 4452 4453 // Memoize nodes. 4454 SDNode *N; 4455 SDVTList VTs = getVTList(VT); 4456 4457 if (VT != MVT::Glue) { 4458 FoldingSetNodeID ID; 4459 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps); 4460 void *IP = 0; 4461 4462 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4463 return SDValue(E, 0); 4464 4465 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps); 4466 CSEMap.InsertNode(N, IP); 4467 } else { 4468 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps); 4469 } 4470 4471 AllNodes.push_back(N); 4472 #ifndef NDEBUG 4473 VerifySDNode(N); 4474 #endif 4475 return SDValue(N, 0); 4476 } 4477 4478 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, 4479 const std::vector<EVT> &ResultTys, 4480 const SDValue *Ops, unsigned NumOps) { 4481 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()), 4482 Ops, NumOps); 4483 } 4484 4485 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, 4486 const EVT *VTs, unsigned NumVTs, 4487 const SDValue *Ops, unsigned NumOps) { 4488 if (NumVTs == 1) 4489 return getNode(Opcode, DL, VTs[0], Ops, NumOps); 4490 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps); 4491 } 4492 4493 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4494 const SDValue *Ops, unsigned NumOps) { 4495 if (VTList.NumVTs == 1) 4496 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps); 4497 4498 #if 0 4499 switch (Opcode) { 4500 // FIXME: figure out how to safely handle things like 4501 // int foo(int x) { return 1 << (x & 255); } 4502 // int bar() { return foo(256); } 4503 case ISD::SRA_PARTS: 4504 case ISD::SRL_PARTS: 4505 case ISD::SHL_PARTS: 4506 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 4507 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 4508 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4509 else if (N3.getOpcode() == ISD::AND) 4510 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 4511 // If the and is only masking out bits that cannot effect the shift, 4512 // eliminate the and. 4513 unsigned NumBits = VT.getScalarType().getSizeInBits()*2; 4514 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 4515 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4516 } 4517 break; 4518 } 4519 #endif 4520 4521 // Memoize the node unless it returns a flag. 4522 SDNode *N; 4523 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4524 FoldingSetNodeID ID; 4525 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4526 void *IP = 0; 4527 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4528 return SDValue(E, 0); 4529 4530 if (NumOps == 1) { 4531 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]); 4532 } else if (NumOps == 2) { 4533 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]); 4534 } else if (NumOps == 3) { 4535 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], 4536 Ops[2]); 4537 } else { 4538 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps); 4539 } 4540 CSEMap.InsertNode(N, IP); 4541 } else { 4542 if (NumOps == 1) { 4543 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]); 4544 } else if (NumOps == 2) { 4545 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]); 4546 } else if (NumOps == 3) { 4547 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], 4548 Ops[2]); 4549 } else { 4550 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps); 4551 } 4552 } 4553 AllNodes.push_back(N); 4554 #ifndef NDEBUG 4555 VerifySDNode(N); 4556 #endif 4557 return SDValue(N, 0); 4558 } 4559 4560 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList) { 4561 return getNode(Opcode, DL, VTList, 0, 0); 4562 } 4563 4564 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4565 SDValue N1) { 4566 SDValue Ops[] = { N1 }; 4567 return getNode(Opcode, DL, VTList, Ops, 1); 4568 } 4569 4570 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4571 SDValue N1, SDValue N2) { 4572 SDValue Ops[] = { N1, N2 }; 4573 return getNode(Opcode, DL, VTList, Ops, 2); 4574 } 4575 4576 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4577 SDValue N1, SDValue N2, SDValue N3) { 4578 SDValue Ops[] = { N1, N2, N3 }; 4579 return getNode(Opcode, DL, VTList, Ops, 3); 4580 } 4581 4582 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4583 SDValue N1, SDValue N2, SDValue N3, 4584 SDValue N4) { 4585 SDValue Ops[] = { N1, N2, N3, N4 }; 4586 return getNode(Opcode, DL, VTList, Ops, 4); 4587 } 4588 4589 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, 4590 SDValue N1, SDValue N2, SDValue N3, 4591 SDValue N4, SDValue N5) { 4592 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4593 return getNode(Opcode, DL, VTList, Ops, 5); 4594 } 4595 4596 SDVTList SelectionDAG::getVTList(EVT VT) { 4597 return makeVTList(SDNode::getValueTypeList(VT), 1); 4598 } 4599 4600 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 4601 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4602 E = VTList.rend(); I != E; ++I) 4603 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2) 4604 return *I; 4605 4606 EVT *Array = Allocator.Allocate<EVT>(2); 4607 Array[0] = VT1; 4608 Array[1] = VT2; 4609 SDVTList Result = makeVTList(Array, 2); 4610 VTList.push_back(Result); 4611 return Result; 4612 } 4613 4614 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 4615 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4616 E = VTList.rend(); I != E; ++I) 4617 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 && 4618 I->VTs[2] == VT3) 4619 return *I; 4620 4621 EVT *Array = Allocator.Allocate<EVT>(3); 4622 Array[0] = VT1; 4623 Array[1] = VT2; 4624 Array[2] = VT3; 4625 SDVTList Result = makeVTList(Array, 3); 4626 VTList.push_back(Result); 4627 return Result; 4628 } 4629 4630 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 4631 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4632 E = VTList.rend(); I != E; ++I) 4633 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 && 4634 I->VTs[2] == VT3 && I->VTs[3] == VT4) 4635 return *I; 4636 4637 EVT *Array = Allocator.Allocate<EVT>(4); 4638 Array[0] = VT1; 4639 Array[1] = VT2; 4640 Array[2] = VT3; 4641 Array[3] = VT4; 4642 SDVTList Result = makeVTList(Array, 4); 4643 VTList.push_back(Result); 4644 return Result; 4645 } 4646 4647 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) { 4648 switch (NumVTs) { 4649 case 0: llvm_unreachable("Cannot have nodes without results!"); 4650 case 1: return getVTList(VTs[0]); 4651 case 2: return getVTList(VTs[0], VTs[1]); 4652 case 3: return getVTList(VTs[0], VTs[1], VTs[2]); 4653 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]); 4654 default: break; 4655 } 4656 4657 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4658 E = VTList.rend(); I != E; ++I) { 4659 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1]) 4660 continue; 4661 4662 bool NoMatch = false; 4663 for (unsigned i = 2; i != NumVTs; ++i) 4664 if (VTs[i] != I->VTs[i]) { 4665 NoMatch = true; 4666 break; 4667 } 4668 if (!NoMatch) 4669 return *I; 4670 } 4671 4672 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 4673 std::copy(VTs, VTs+NumVTs, Array); 4674 SDVTList Result = makeVTList(Array, NumVTs); 4675 VTList.push_back(Result); 4676 return Result; 4677 } 4678 4679 4680 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 4681 /// specified operands. If the resultant node already exists in the DAG, 4682 /// this does not modify the specified node, instead it returns the node that 4683 /// already exists. If the resultant node does not exist in the DAG, the 4684 /// input node is returned. As a degenerate case, if you specify the same 4685 /// input operands as the node already has, the input node is returned. 4686 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 4687 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 4688 4689 // Check to see if there is no change. 4690 if (Op == N->getOperand(0)) return N; 4691 4692 // See if the modified node already exists. 4693 void *InsertPos = 0; 4694 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 4695 return Existing; 4696 4697 // Nope it doesn't. Remove the node from its current place in the maps. 4698 if (InsertPos) 4699 if (!RemoveNodeFromCSEMaps(N)) 4700 InsertPos = 0; 4701 4702 // Now we update the operands. 4703 N->OperandList[0].set(Op); 4704 4705 // If this gets put into a CSE map, add it. 4706 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 4707 return N; 4708 } 4709 4710 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 4711 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 4712 4713 // Check to see if there is no change. 4714 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 4715 return N; // No operands changed, just return the input node. 4716 4717 // See if the modified node already exists. 4718 void *InsertPos = 0; 4719 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 4720 return Existing; 4721 4722 // Nope it doesn't. Remove the node from its current place in the maps. 4723 if (InsertPos) 4724 if (!RemoveNodeFromCSEMaps(N)) 4725 InsertPos = 0; 4726 4727 // Now we update the operands. 4728 if (N->OperandList[0] != Op1) 4729 N->OperandList[0].set(Op1); 4730 if (N->OperandList[1] != Op2) 4731 N->OperandList[1].set(Op2); 4732 4733 // If this gets put into a CSE map, add it. 4734 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 4735 return N; 4736 } 4737 4738 SDNode *SelectionDAG:: 4739 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 4740 SDValue Ops[] = { Op1, Op2, Op3 }; 4741 return UpdateNodeOperands(N, Ops, 3); 4742 } 4743 4744 SDNode *SelectionDAG:: 4745 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 4746 SDValue Op3, SDValue Op4) { 4747 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 4748 return UpdateNodeOperands(N, Ops, 4); 4749 } 4750 4751 SDNode *SelectionDAG:: 4752 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 4753 SDValue Op3, SDValue Op4, SDValue Op5) { 4754 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 4755 return UpdateNodeOperands(N, Ops, 5); 4756 } 4757 4758 SDNode *SelectionDAG:: 4759 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) { 4760 assert(N->getNumOperands() == NumOps && 4761 "Update with wrong number of operands"); 4762 4763 // Check to see if there is no change. 4764 bool AnyChange = false; 4765 for (unsigned i = 0; i != NumOps; ++i) { 4766 if (Ops[i] != N->getOperand(i)) { 4767 AnyChange = true; 4768 break; 4769 } 4770 } 4771 4772 // No operands changed, just return the input node. 4773 if (!AnyChange) return N; 4774 4775 // See if the modified node already exists. 4776 void *InsertPos = 0; 4777 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos)) 4778 return Existing; 4779 4780 // Nope it doesn't. Remove the node from its current place in the maps. 4781 if (InsertPos) 4782 if (!RemoveNodeFromCSEMaps(N)) 4783 InsertPos = 0; 4784 4785 // Now we update the operands. 4786 for (unsigned i = 0; i != NumOps; ++i) 4787 if (N->OperandList[i] != Ops[i]) 4788 N->OperandList[i].set(Ops[i]); 4789 4790 // If this gets put into a CSE map, add it. 4791 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 4792 return N; 4793 } 4794 4795 /// DropOperands - Release the operands and set this node to have 4796 /// zero operands. 4797 void SDNode::DropOperands() { 4798 // Unlike the code in MorphNodeTo that does this, we don't need to 4799 // watch for dead nodes here. 4800 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 4801 SDUse &Use = *I++; 4802 Use.set(SDValue()); 4803 } 4804 } 4805 4806 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 4807 /// machine opcode. 4808 /// 4809 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4810 EVT VT) { 4811 SDVTList VTs = getVTList(VT); 4812 return SelectNodeTo(N, MachineOpc, VTs, 0, 0); 4813 } 4814 4815 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4816 EVT VT, SDValue Op1) { 4817 SDVTList VTs = getVTList(VT); 4818 SDValue Ops[] = { Op1 }; 4819 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 4820 } 4821 4822 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4823 EVT VT, SDValue Op1, 4824 SDValue Op2) { 4825 SDVTList VTs = getVTList(VT); 4826 SDValue Ops[] = { Op1, Op2 }; 4827 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 4828 } 4829 4830 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4831 EVT VT, SDValue Op1, 4832 SDValue Op2, SDValue Op3) { 4833 SDVTList VTs = getVTList(VT); 4834 SDValue Ops[] = { Op1, Op2, Op3 }; 4835 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 4836 } 4837 4838 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4839 EVT VT, const SDValue *Ops, 4840 unsigned NumOps) { 4841 SDVTList VTs = getVTList(VT); 4842 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 4843 } 4844 4845 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4846 EVT VT1, EVT VT2, const SDValue *Ops, 4847 unsigned NumOps) { 4848 SDVTList VTs = getVTList(VT1, VT2); 4849 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 4850 } 4851 4852 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4853 EVT VT1, EVT VT2) { 4854 SDVTList VTs = getVTList(VT1, VT2); 4855 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0); 4856 } 4857 4858 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4859 EVT VT1, EVT VT2, EVT VT3, 4860 const SDValue *Ops, unsigned NumOps) { 4861 SDVTList VTs = getVTList(VT1, VT2, VT3); 4862 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 4863 } 4864 4865 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4866 EVT VT1, EVT VT2, EVT VT3, EVT VT4, 4867 const SDValue *Ops, unsigned NumOps) { 4868 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 4869 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 4870 } 4871 4872 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4873 EVT VT1, EVT VT2, 4874 SDValue Op1) { 4875 SDVTList VTs = getVTList(VT1, VT2); 4876 SDValue Ops[] = { Op1 }; 4877 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 4878 } 4879 4880 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4881 EVT VT1, EVT VT2, 4882 SDValue Op1, SDValue Op2) { 4883 SDVTList VTs = getVTList(VT1, VT2); 4884 SDValue Ops[] = { Op1, Op2 }; 4885 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 4886 } 4887 4888 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4889 EVT VT1, EVT VT2, 4890 SDValue Op1, SDValue Op2, 4891 SDValue Op3) { 4892 SDVTList VTs = getVTList(VT1, VT2); 4893 SDValue Ops[] = { Op1, Op2, Op3 }; 4894 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 4895 } 4896 4897 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4898 EVT VT1, EVT VT2, EVT VT3, 4899 SDValue Op1, SDValue Op2, 4900 SDValue Op3) { 4901 SDVTList VTs = getVTList(VT1, VT2, VT3); 4902 SDValue Ops[] = { Op1, Op2, Op3 }; 4903 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 4904 } 4905 4906 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 4907 SDVTList VTs, const SDValue *Ops, 4908 unsigned NumOps) { 4909 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps); 4910 // Reset the NodeID to -1. 4911 N->setNodeId(-1); 4912 return N; 4913 } 4914 4915 /// UpdadeDebugLocOnMergedSDNode - If the opt level is -O0 then it throws away 4916 /// the line number information on the merged node since it is not possible to 4917 /// preserve the information that operation is associated with multiple lines. 4918 /// This will make the debugger working better at -O0, were there is a higher 4919 /// probability having other instructions associated with that line. 4920 /// 4921 SDNode *SelectionDAG::UpdadeDebugLocOnMergedSDNode(SDNode *N, DebugLoc OLoc) { 4922 DebugLoc NLoc = N->getDebugLoc(); 4923 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && (OLoc != NLoc)) { 4924 N->setDebugLoc(DebugLoc()); 4925 } 4926 return N; 4927 } 4928 4929 /// MorphNodeTo - This *mutates* the specified node to have the specified 4930 /// return type, opcode, and operands. 4931 /// 4932 /// Note that MorphNodeTo returns the resultant node. If there is already a 4933 /// node of the specified opcode and operands, it returns that node instead of 4934 /// the current one. Note that the DebugLoc need not be the same. 4935 /// 4936 /// Using MorphNodeTo is faster than creating a new node and swapping it in 4937 /// with ReplaceAllUsesWith both because it often avoids allocating a new 4938 /// node, and because it doesn't require CSE recalculation for any of 4939 /// the node's users. 4940 /// 4941 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 4942 SDVTList VTs, const SDValue *Ops, 4943 unsigned NumOps) { 4944 // If an identical node already exists, use it. 4945 void *IP = 0; 4946 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 4947 FoldingSetNodeID ID; 4948 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps); 4949 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) 4950 return UpdadeDebugLocOnMergedSDNode(ON, N->getDebugLoc()); 4951 } 4952 4953 if (!RemoveNodeFromCSEMaps(N)) 4954 IP = 0; 4955 4956 // Start the morphing. 4957 N->NodeType = Opc; 4958 N->ValueList = VTs.VTs; 4959 N->NumValues = VTs.NumVTs; 4960 4961 // Clear the operands list, updating used nodes to remove this from their 4962 // use list. Keep track of any operands that become dead as a result. 4963 SmallPtrSet<SDNode*, 16> DeadNodeSet; 4964 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 4965 SDUse &Use = *I++; 4966 SDNode *Used = Use.getNode(); 4967 Use.set(SDValue()); 4968 if (Used->use_empty()) 4969 DeadNodeSet.insert(Used); 4970 } 4971 4972 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) { 4973 // Initialize the memory references information. 4974 MN->setMemRefs(0, 0); 4975 // If NumOps is larger than the # of operands we can have in a 4976 // MachineSDNode, reallocate the operand list. 4977 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) { 4978 if (MN->OperandsNeedDelete) 4979 delete[] MN->OperandList; 4980 if (NumOps > array_lengthof(MN->LocalOperands)) 4981 // We're creating a final node that will live unmorphed for the 4982 // remainder of the current SelectionDAG iteration, so we can allocate 4983 // the operands directly out of a pool with no recycling metadata. 4984 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 4985 Ops, NumOps); 4986 else 4987 MN->InitOperands(MN->LocalOperands, Ops, NumOps); 4988 MN->OperandsNeedDelete = false; 4989 } else 4990 MN->InitOperands(MN->OperandList, Ops, NumOps); 4991 } else { 4992 // If NumOps is larger than the # of operands we currently have, reallocate 4993 // the operand list. 4994 if (NumOps > N->NumOperands) { 4995 if (N->OperandsNeedDelete) 4996 delete[] N->OperandList; 4997 N->InitOperands(new SDUse[NumOps], Ops, NumOps); 4998 N->OperandsNeedDelete = true; 4999 } else 5000 N->InitOperands(N->OperandList, Ops, NumOps); 5001 } 5002 5003 // Delete any nodes that are still dead after adding the uses for the 5004 // new operands. 5005 if (!DeadNodeSet.empty()) { 5006 SmallVector<SDNode *, 16> DeadNodes; 5007 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(), 5008 E = DeadNodeSet.end(); I != E; ++I) 5009 if ((*I)->use_empty()) 5010 DeadNodes.push_back(*I); 5011 RemoveDeadNodes(DeadNodes); 5012 } 5013 5014 if (IP) 5015 CSEMap.InsertNode(N, IP); // Memoize the new node. 5016 return N; 5017 } 5018 5019 5020 /// getMachineNode - These are used for target selectors to create a new node 5021 /// with specified return type(s), MachineInstr opcode, and operands. 5022 /// 5023 /// Note that getMachineNode returns the resultant node. If there is already a 5024 /// node of the specified opcode and operands, it returns that node instead of 5025 /// the current one. 5026 MachineSDNode * 5027 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) { 5028 SDVTList VTs = getVTList(VT); 5029 return getMachineNode(Opcode, dl, VTs, 0, 0); 5030 } 5031 5032 MachineSDNode * 5033 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) { 5034 SDVTList VTs = getVTList(VT); 5035 SDValue Ops[] = { Op1 }; 5036 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5037 } 5038 5039 MachineSDNode * 5040 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, 5041 SDValue Op1, SDValue Op2) { 5042 SDVTList VTs = getVTList(VT); 5043 SDValue Ops[] = { Op1, Op2 }; 5044 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5045 } 5046 5047 MachineSDNode * 5048 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, 5049 SDValue Op1, SDValue Op2, SDValue Op3) { 5050 SDVTList VTs = getVTList(VT); 5051 SDValue Ops[] = { Op1, Op2, Op3 }; 5052 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5053 } 5054 5055 MachineSDNode * 5056 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, 5057 const SDValue *Ops, unsigned NumOps) { 5058 SDVTList VTs = getVTList(VT); 5059 return getMachineNode(Opcode, dl, VTs, Ops, NumOps); 5060 } 5061 5062 MachineSDNode * 5063 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) { 5064 SDVTList VTs = getVTList(VT1, VT2); 5065 return getMachineNode(Opcode, dl, VTs, 0, 0); 5066 } 5067 5068 MachineSDNode * 5069 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5070 EVT VT1, EVT VT2, SDValue Op1) { 5071 SDVTList VTs = getVTList(VT1, VT2); 5072 SDValue Ops[] = { Op1 }; 5073 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5074 } 5075 5076 MachineSDNode * 5077 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5078 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { 5079 SDVTList VTs = getVTList(VT1, VT2); 5080 SDValue Ops[] = { Op1, Op2 }; 5081 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5082 } 5083 5084 MachineSDNode * 5085 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5086 EVT VT1, EVT VT2, SDValue Op1, 5087 SDValue Op2, SDValue Op3) { 5088 SDVTList VTs = getVTList(VT1, VT2); 5089 SDValue Ops[] = { Op1, Op2, Op3 }; 5090 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5091 } 5092 5093 MachineSDNode * 5094 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5095 EVT VT1, EVT VT2, 5096 const SDValue *Ops, unsigned NumOps) { 5097 SDVTList VTs = getVTList(VT1, VT2); 5098 return getMachineNode(Opcode, dl, VTs, Ops, NumOps); 5099 } 5100 5101 MachineSDNode * 5102 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5103 EVT VT1, EVT VT2, EVT VT3, 5104 SDValue Op1, SDValue Op2) { 5105 SDVTList VTs = getVTList(VT1, VT2, VT3); 5106 SDValue Ops[] = { Op1, Op2 }; 5107 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5108 } 5109 5110 MachineSDNode * 5111 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5112 EVT VT1, EVT VT2, EVT VT3, 5113 SDValue Op1, SDValue Op2, SDValue Op3) { 5114 SDVTList VTs = getVTList(VT1, VT2, VT3); 5115 SDValue Ops[] = { Op1, Op2, Op3 }; 5116 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops)); 5117 } 5118 5119 MachineSDNode * 5120 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5121 EVT VT1, EVT VT2, EVT VT3, 5122 const SDValue *Ops, unsigned NumOps) { 5123 SDVTList VTs = getVTList(VT1, VT2, VT3); 5124 return getMachineNode(Opcode, dl, VTs, Ops, NumOps); 5125 } 5126 5127 MachineSDNode * 5128 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, 5129 EVT VT2, EVT VT3, EVT VT4, 5130 const SDValue *Ops, unsigned NumOps) { 5131 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5132 return getMachineNode(Opcode, dl, VTs, Ops, NumOps); 5133 } 5134 5135 MachineSDNode * 5136 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, 5137 const std::vector<EVT> &ResultTys, 5138 const SDValue *Ops, unsigned NumOps) { 5139 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size()); 5140 return getMachineNode(Opcode, dl, VTs, Ops, NumOps); 5141 } 5142 5143 MachineSDNode * 5144 SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, 5145 const SDValue *Ops, unsigned NumOps) { 5146 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 5147 MachineSDNode *N; 5148 void *IP = 0; 5149 5150 if (DoCSE) { 5151 FoldingSetNodeID ID; 5152 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps); 5153 IP = 0; 5154 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 5155 return cast<MachineSDNode>(UpdadeDebugLocOnMergedSDNode(E, DL)); 5156 } 5157 } 5158 5159 // Allocate a new MachineSDNode. 5160 N = new (NodeAllocator) MachineSDNode(~Opcode, DL, VTs); 5161 5162 // Initialize the operands list. 5163 if (NumOps > array_lengthof(N->LocalOperands)) 5164 // We're creating a final node that will live unmorphed for the 5165 // remainder of the current SelectionDAG iteration, so we can allocate 5166 // the operands directly out of a pool with no recycling metadata. 5167 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5168 Ops, NumOps); 5169 else 5170 N->InitOperands(N->LocalOperands, Ops, NumOps); 5171 N->OperandsNeedDelete = false; 5172 5173 if (DoCSE) 5174 CSEMap.InsertNode(N, IP); 5175 5176 AllNodes.push_back(N); 5177 #ifndef NDEBUG 5178 VerifyMachineNode(N); 5179 #endif 5180 return N; 5181 } 5182 5183 /// getTargetExtractSubreg - A convenience function for creating 5184 /// TargetOpcode::EXTRACT_SUBREG nodes. 5185 SDValue 5186 SelectionDAG::getTargetExtractSubreg(int SRIdx, DebugLoc DL, EVT VT, 5187 SDValue Operand) { 5188 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5189 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 5190 VT, Operand, SRIdxVal); 5191 return SDValue(Subreg, 0); 5192 } 5193 5194 /// getTargetInsertSubreg - A convenience function for creating 5195 /// TargetOpcode::INSERT_SUBREG nodes. 5196 SDValue 5197 SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT, 5198 SDValue Operand, SDValue Subreg) { 5199 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5200 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 5201 VT, Operand, Subreg, SRIdxVal); 5202 return SDValue(Result, 0); 5203 } 5204 5205 /// getNodeIfExists - Get the specified node if it's already available, or 5206 /// else return NULL. 5207 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 5208 const SDValue *Ops, unsigned NumOps) { 5209 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5210 FoldingSetNodeID ID; 5211 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 5212 void *IP = 0; 5213 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5214 return E; 5215 } 5216 return NULL; 5217 } 5218 5219 /// getDbgValue - Creates a SDDbgValue node. 5220 /// 5221 SDDbgValue * 5222 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off, 5223 DebugLoc DL, unsigned O) { 5224 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O); 5225 } 5226 5227 SDDbgValue * 5228 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off, 5229 DebugLoc DL, unsigned O) { 5230 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O); 5231 } 5232 5233 SDDbgValue * 5234 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off, 5235 DebugLoc DL, unsigned O) { 5236 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O); 5237 } 5238 5239 namespace { 5240 5241 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 5242 /// pointed to by a use iterator is deleted, increment the use iterator 5243 /// so that it doesn't dangle. 5244 /// 5245 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 5246 SDNode::use_iterator &UI; 5247 SDNode::use_iterator &UE; 5248 5249 virtual void NodeDeleted(SDNode *N, SDNode *E) { 5250 // Increment the iterator as needed. 5251 while (UI != UE && N == *UI) 5252 ++UI; 5253 } 5254 5255 public: 5256 RAUWUpdateListener(SelectionDAG &d, 5257 SDNode::use_iterator &ui, 5258 SDNode::use_iterator &ue) 5259 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 5260 }; 5261 5262 } 5263 5264 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5265 /// This can cause recursive merging of nodes in the DAG. 5266 /// 5267 /// This version assumes From has a single result value. 5268 /// 5269 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 5270 SDNode *From = FromN.getNode(); 5271 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 5272 "Cannot replace with this method!"); 5273 assert(From != To.getNode() && "Cannot replace uses of with self"); 5274 5275 // Iterate over all the existing uses of From. New uses will be added 5276 // to the beginning of the use list, which we avoid visiting. 5277 // This specifically avoids visiting uses of From that arise while the 5278 // replacement is happening, because any such uses would be the result 5279 // of CSE: If an existing node looks like From after one of its operands 5280 // is replaced by To, we don't want to replace of all its users with To 5281 // too. See PR3018 for more info. 5282 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5283 RAUWUpdateListener Listener(*this, UI, UE); 5284 while (UI != UE) { 5285 SDNode *User = *UI; 5286 5287 // This node is about to morph, remove its old self from the CSE maps. 5288 RemoveNodeFromCSEMaps(User); 5289 5290 // A user can appear in a use list multiple times, and when this 5291 // happens the uses are usually next to each other in the list. 5292 // To help reduce the number of CSE recomputations, process all 5293 // the uses of this user that we can find this way. 5294 do { 5295 SDUse &Use = UI.getUse(); 5296 ++UI; 5297 Use.set(To); 5298 } while (UI != UE && *UI == User); 5299 5300 // Now that we have modified User, add it back to the CSE maps. If it 5301 // already exists there, recursively merge the results together. 5302 AddModifiedNodeToCSEMaps(User); 5303 } 5304 5305 // If we just RAUW'd the root, take note. 5306 if (FromN == getRoot()) 5307 setRoot(To); 5308 } 5309 5310 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5311 /// This can cause recursive merging of nodes in the DAG. 5312 /// 5313 /// This version assumes that for each value of From, there is a 5314 /// corresponding value in To in the same position with the same type. 5315 /// 5316 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 5317 #ifndef NDEBUG 5318 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 5319 assert((!From->hasAnyUseOfValue(i) || 5320 From->getValueType(i) == To->getValueType(i)) && 5321 "Cannot use this version of ReplaceAllUsesWith!"); 5322 #endif 5323 5324 // Handle the trivial case. 5325 if (From == To) 5326 return; 5327 5328 // Iterate over just the existing users of From. See the comments in 5329 // the ReplaceAllUsesWith above. 5330 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5331 RAUWUpdateListener Listener(*this, UI, UE); 5332 while (UI != UE) { 5333 SDNode *User = *UI; 5334 5335 // This node is about to morph, remove its old self from the CSE maps. 5336 RemoveNodeFromCSEMaps(User); 5337 5338 // A user can appear in a use list multiple times, and when this 5339 // happens the uses are usually next to each other in the list. 5340 // To help reduce the number of CSE recomputations, process all 5341 // the uses of this user that we can find this way. 5342 do { 5343 SDUse &Use = UI.getUse(); 5344 ++UI; 5345 Use.setNode(To); 5346 } while (UI != UE && *UI == User); 5347 5348 // Now that we have modified User, add it back to the CSE maps. If it 5349 // already exists there, recursively merge the results together. 5350 AddModifiedNodeToCSEMaps(User); 5351 } 5352 5353 // If we just RAUW'd the root, take note. 5354 if (From == getRoot().getNode()) 5355 setRoot(SDValue(To, getRoot().getResNo())); 5356 } 5357 5358 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5359 /// This can cause recursive merging of nodes in the DAG. 5360 /// 5361 /// This version can replace From with any result values. To must match the 5362 /// number and types of values returned by From. 5363 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 5364 if (From->getNumValues() == 1) // Handle the simple case efficiently. 5365 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 5366 5367 // Iterate over just the existing users of From. See the comments in 5368 // the ReplaceAllUsesWith above. 5369 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5370 RAUWUpdateListener Listener(*this, UI, UE); 5371 while (UI != UE) { 5372 SDNode *User = *UI; 5373 5374 // This node is about to morph, remove its old self from the CSE maps. 5375 RemoveNodeFromCSEMaps(User); 5376 5377 // A user can appear in a use list multiple times, and when this 5378 // happens the uses are usually next to each other in the list. 5379 // To help reduce the number of CSE recomputations, process all 5380 // the uses of this user that we can find this way. 5381 do { 5382 SDUse &Use = UI.getUse(); 5383 const SDValue &ToOp = To[Use.getResNo()]; 5384 ++UI; 5385 Use.set(ToOp); 5386 } while (UI != UE && *UI == User); 5387 5388 // Now that we have modified User, add it back to the CSE maps. If it 5389 // already exists there, recursively merge the results together. 5390 AddModifiedNodeToCSEMaps(User); 5391 } 5392 5393 // If we just RAUW'd the root, take note. 5394 if (From == getRoot().getNode()) 5395 setRoot(SDValue(To[getRoot().getResNo()])); 5396 } 5397 5398 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 5399 /// uses of other values produced by From.getNode() alone. The Deleted 5400 /// vector is handled the same way as for ReplaceAllUsesWith. 5401 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 5402 // Handle the really simple, really trivial case efficiently. 5403 if (From == To) return; 5404 5405 // Handle the simple, trivial, case efficiently. 5406 if (From.getNode()->getNumValues() == 1) { 5407 ReplaceAllUsesWith(From, To); 5408 return; 5409 } 5410 5411 // Iterate over just the existing users of From. See the comments in 5412 // the ReplaceAllUsesWith above. 5413 SDNode::use_iterator UI = From.getNode()->use_begin(), 5414 UE = From.getNode()->use_end(); 5415 RAUWUpdateListener Listener(*this, UI, UE); 5416 while (UI != UE) { 5417 SDNode *User = *UI; 5418 bool UserRemovedFromCSEMaps = false; 5419 5420 // A user can appear in a use list multiple times, and when this 5421 // happens the uses are usually next to each other in the list. 5422 // To help reduce the number of CSE recomputations, process all 5423 // the uses of this user that we can find this way. 5424 do { 5425 SDUse &Use = UI.getUse(); 5426 5427 // Skip uses of different values from the same node. 5428 if (Use.getResNo() != From.getResNo()) { 5429 ++UI; 5430 continue; 5431 } 5432 5433 // If this node hasn't been modified yet, it's still in the CSE maps, 5434 // so remove its old self from the CSE maps. 5435 if (!UserRemovedFromCSEMaps) { 5436 RemoveNodeFromCSEMaps(User); 5437 UserRemovedFromCSEMaps = true; 5438 } 5439 5440 ++UI; 5441 Use.set(To); 5442 } while (UI != UE && *UI == User); 5443 5444 // We are iterating over all uses of the From node, so if a use 5445 // doesn't use the specific value, no changes are made. 5446 if (!UserRemovedFromCSEMaps) 5447 continue; 5448 5449 // Now that we have modified User, add it back to the CSE maps. If it 5450 // already exists there, recursively merge the results together. 5451 AddModifiedNodeToCSEMaps(User); 5452 } 5453 5454 // If we just RAUW'd the root, take note. 5455 if (From == getRoot()) 5456 setRoot(To); 5457 } 5458 5459 namespace { 5460 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 5461 /// to record information about a use. 5462 struct UseMemo { 5463 SDNode *User; 5464 unsigned Index; 5465 SDUse *Use; 5466 }; 5467 5468 /// operator< - Sort Memos by User. 5469 bool operator<(const UseMemo &L, const UseMemo &R) { 5470 return (intptr_t)L.User < (intptr_t)R.User; 5471 } 5472 } 5473 5474 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 5475 /// uses of other values produced by From.getNode() alone. The same value 5476 /// may appear in both the From and To list. The Deleted vector is 5477 /// handled the same way as for ReplaceAllUsesWith. 5478 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 5479 const SDValue *To, 5480 unsigned Num){ 5481 // Handle the simple, trivial case efficiently. 5482 if (Num == 1) 5483 return ReplaceAllUsesOfValueWith(*From, *To); 5484 5485 // Read up all the uses and make records of them. This helps 5486 // processing new uses that are introduced during the 5487 // replacement process. 5488 SmallVector<UseMemo, 4> Uses; 5489 for (unsigned i = 0; i != Num; ++i) { 5490 unsigned FromResNo = From[i].getResNo(); 5491 SDNode *FromNode = From[i].getNode(); 5492 for (SDNode::use_iterator UI = FromNode->use_begin(), 5493 E = FromNode->use_end(); UI != E; ++UI) { 5494 SDUse &Use = UI.getUse(); 5495 if (Use.getResNo() == FromResNo) { 5496 UseMemo Memo = { *UI, i, &Use }; 5497 Uses.push_back(Memo); 5498 } 5499 } 5500 } 5501 5502 // Sort the uses, so that all the uses from a given User are together. 5503 std::sort(Uses.begin(), Uses.end()); 5504 5505 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 5506 UseIndex != UseIndexEnd; ) { 5507 // We know that this user uses some value of From. If it is the right 5508 // value, update it. 5509 SDNode *User = Uses[UseIndex].User; 5510 5511 // This node is about to morph, remove its old self from the CSE maps. 5512 RemoveNodeFromCSEMaps(User); 5513 5514 // The Uses array is sorted, so all the uses for a given User 5515 // are next to each other in the list. 5516 // To help reduce the number of CSE recomputations, process all 5517 // the uses of this user that we can find this way. 5518 do { 5519 unsigned i = Uses[UseIndex].Index; 5520 SDUse &Use = *Uses[UseIndex].Use; 5521 ++UseIndex; 5522 5523 Use.set(To[i]); 5524 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 5525 5526 // Now that we have modified User, add it back to the CSE maps. If it 5527 // already exists there, recursively merge the results together. 5528 AddModifiedNodeToCSEMaps(User); 5529 } 5530 } 5531 5532 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 5533 /// based on their topological order. It returns the maximum id and a vector 5534 /// of the SDNodes* in assigned order by reference. 5535 unsigned SelectionDAG::AssignTopologicalOrder() { 5536 5537 unsigned DAGSize = 0; 5538 5539 // SortedPos tracks the progress of the algorithm. Nodes before it are 5540 // sorted, nodes after it are unsorted. When the algorithm completes 5541 // it is at the end of the list. 5542 allnodes_iterator SortedPos = allnodes_begin(); 5543 5544 // Visit all the nodes. Move nodes with no operands to the front of 5545 // the list immediately. Annotate nodes that do have operands with their 5546 // operand count. Before we do this, the Node Id fields of the nodes 5547 // may contain arbitrary values. After, the Node Id fields for nodes 5548 // before SortedPos will contain the topological sort index, and the 5549 // Node Id fields for nodes At SortedPos and after will contain the 5550 // count of outstanding operands. 5551 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 5552 SDNode *N = I++; 5553 checkForCycles(N); 5554 unsigned Degree = N->getNumOperands(); 5555 if (Degree == 0) { 5556 // A node with no uses, add it to the result array immediately. 5557 N->setNodeId(DAGSize++); 5558 allnodes_iterator Q = N; 5559 if (Q != SortedPos) 5560 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 5561 assert(SortedPos != AllNodes.end() && "Overran node list"); 5562 ++SortedPos; 5563 } else { 5564 // Temporarily use the Node Id as scratch space for the degree count. 5565 N->setNodeId(Degree); 5566 } 5567 } 5568 5569 // Visit all the nodes. As we iterate, move nodes into sorted order, 5570 // such that by the time the end is reached all nodes will be sorted. 5571 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) { 5572 SDNode *N = I; 5573 checkForCycles(N); 5574 // N is in sorted position, so all its uses have one less operand 5575 // that needs to be sorted. 5576 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 5577 UI != UE; ++UI) { 5578 SDNode *P = *UI; 5579 unsigned Degree = P->getNodeId(); 5580 assert(Degree != 0 && "Invalid node degree"); 5581 --Degree; 5582 if (Degree == 0) { 5583 // All of P's operands are sorted, so P may sorted now. 5584 P->setNodeId(DAGSize++); 5585 if (P != SortedPos) 5586 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 5587 assert(SortedPos != AllNodes.end() && "Overran node list"); 5588 ++SortedPos; 5589 } else { 5590 // Update P's outstanding operand count. 5591 P->setNodeId(Degree); 5592 } 5593 } 5594 if (I == SortedPos) { 5595 #ifndef NDEBUG 5596 SDNode *S = ++I; 5597 dbgs() << "Overran sorted position:\n"; 5598 S->dumprFull(); 5599 #endif 5600 llvm_unreachable(0); 5601 } 5602 } 5603 5604 assert(SortedPos == AllNodes.end() && 5605 "Topological sort incomplete!"); 5606 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 5607 "First node in topological sort is not the entry token!"); 5608 assert(AllNodes.front().getNodeId() == 0 && 5609 "First node in topological sort has non-zero id!"); 5610 assert(AllNodes.front().getNumOperands() == 0 && 5611 "First node in topological sort has operands!"); 5612 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 5613 "Last node in topologic sort has unexpected id!"); 5614 assert(AllNodes.back().use_empty() && 5615 "Last node in topologic sort has users!"); 5616 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 5617 return DAGSize; 5618 } 5619 5620 /// AssignOrdering - Assign an order to the SDNode. 5621 void SelectionDAG::AssignOrdering(const SDNode *SD, unsigned Order) { 5622 assert(SD && "Trying to assign an order to a null node!"); 5623 Ordering->add(SD, Order); 5624 } 5625 5626 /// GetOrdering - Get the order for the SDNode. 5627 unsigned SelectionDAG::GetOrdering(const SDNode *SD) const { 5628 assert(SD && "Trying to get the order of a null node!"); 5629 return Ordering->getOrder(SD); 5630 } 5631 5632 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 5633 /// value is produced by SD. 5634 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 5635 DbgInfo->add(DB, SD, isParameter); 5636 if (SD) 5637 SD->setHasDebugValue(true); 5638 } 5639 5640 /// TransferDbgValues - Transfer SDDbgValues. 5641 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 5642 if (From == To || !From.getNode()->getHasDebugValue()) 5643 return; 5644 SDNode *FromNode = From.getNode(); 5645 SDNode *ToNode = To.getNode(); 5646 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 5647 SmallVector<SDDbgValue *, 2> ClonedDVs; 5648 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 5649 I != E; ++I) { 5650 SDDbgValue *Dbg = *I; 5651 if (Dbg->getKind() == SDDbgValue::SDNODE) { 5652 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(), 5653 Dbg->getOffset(), Dbg->getDebugLoc(), 5654 Dbg->getOrder()); 5655 ClonedDVs.push_back(Clone); 5656 } 5657 } 5658 for (SmallVector<SDDbgValue *, 2>::iterator I = ClonedDVs.begin(), 5659 E = ClonedDVs.end(); I != E; ++I) 5660 AddDbgValue(*I, ToNode, false); 5661 } 5662 5663 //===----------------------------------------------------------------------===// 5664 // SDNode Class 5665 //===----------------------------------------------------------------------===// 5666 5667 HandleSDNode::~HandleSDNode() { 5668 DropOperands(); 5669 } 5670 5671 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, DebugLoc DL, 5672 const GlobalValue *GA, 5673 EVT VT, int64_t o, unsigned char TF) 5674 : SDNode(Opc, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 5675 TheGlobal = GA; 5676 } 5677 5678 MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt, 5679 MachineMemOperand *mmo) 5680 : SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) { 5681 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 5682 MMO->isNonTemporal(), MMO->isInvariant()); 5683 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 5684 assert(isNonTemporal() == MMO->isNonTemporal() && 5685 "Non-temporal encoding error!"); 5686 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 5687 } 5688 5689 MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, 5690 const SDValue *Ops, unsigned NumOps, EVT memvt, 5691 MachineMemOperand *mmo) 5692 : SDNode(Opc, dl, VTs, Ops, NumOps), 5693 MemoryVT(memvt), MMO(mmo) { 5694 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 5695 MMO->isNonTemporal(), MMO->isInvariant()); 5696 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 5697 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 5698 } 5699 5700 /// Profile - Gather unique data for the node. 5701 /// 5702 void SDNode::Profile(FoldingSetNodeID &ID) const { 5703 AddNodeIDNode(ID, this); 5704 } 5705 5706 namespace { 5707 struct EVTArray { 5708 std::vector<EVT> VTs; 5709 5710 EVTArray() { 5711 VTs.reserve(MVT::LAST_VALUETYPE); 5712 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 5713 VTs.push_back(MVT((MVT::SimpleValueType)i)); 5714 } 5715 }; 5716 } 5717 5718 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 5719 static ManagedStatic<EVTArray> SimpleVTArray; 5720 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 5721 5722 /// getValueTypeList - Return a pointer to the specified value type. 5723 /// 5724 const EVT *SDNode::getValueTypeList(EVT VT) { 5725 if (VT.isExtended()) { 5726 sys::SmartScopedLock<true> Lock(*VTMutex); 5727 return &(*EVTs->insert(VT).first); 5728 } else { 5729 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 5730 "Value type out of range!"); 5731 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 5732 } 5733 } 5734 5735 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 5736 /// indicated value. This method ignores uses of other values defined by this 5737 /// operation. 5738 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 5739 assert(Value < getNumValues() && "Bad value!"); 5740 5741 // TODO: Only iterate over uses of a given value of the node 5742 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 5743 if (UI.getUse().getResNo() == Value) { 5744 if (NUses == 0) 5745 return false; 5746 --NUses; 5747 } 5748 } 5749 5750 // Found exactly the right number of uses? 5751 return NUses == 0; 5752 } 5753 5754 5755 /// hasAnyUseOfValue - Return true if there are any use of the indicated 5756 /// value. This method ignores uses of other values defined by this operation. 5757 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 5758 assert(Value < getNumValues() && "Bad value!"); 5759 5760 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 5761 if (UI.getUse().getResNo() == Value) 5762 return true; 5763 5764 return false; 5765 } 5766 5767 5768 /// isOnlyUserOf - Return true if this node is the only use of N. 5769 /// 5770 bool SDNode::isOnlyUserOf(SDNode *N) const { 5771 bool Seen = false; 5772 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 5773 SDNode *User = *I; 5774 if (User == this) 5775 Seen = true; 5776 else 5777 return false; 5778 } 5779 5780 return Seen; 5781 } 5782 5783 /// isOperand - Return true if this node is an operand of N. 5784 /// 5785 bool SDValue::isOperandOf(SDNode *N) const { 5786 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 5787 if (*this == N->getOperand(i)) 5788 return true; 5789 return false; 5790 } 5791 5792 bool SDNode::isOperandOf(SDNode *N) const { 5793 for (unsigned i = 0, e = N->NumOperands; i != e; ++i) 5794 if (this == N->OperandList[i].getNode()) 5795 return true; 5796 return false; 5797 } 5798 5799 /// reachesChainWithoutSideEffects - Return true if this operand (which must 5800 /// be a chain) reaches the specified operand without crossing any 5801 /// side-effecting instructions on any chain path. In practice, this looks 5802 /// through token factors and non-volatile loads. In order to remain efficient, 5803 /// this only looks a couple of nodes in, it does not do an exhaustive search. 5804 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 5805 unsigned Depth) const { 5806 if (*this == Dest) return true; 5807 5808 // Don't search too deeply, we just want to be able to see through 5809 // TokenFactor's etc. 5810 if (Depth == 0) return false; 5811 5812 // If this is a token factor, all inputs to the TF happen in parallel. If any 5813 // of the operands of the TF does not reach dest, then we cannot do the xform. 5814 if (getOpcode() == ISD::TokenFactor) { 5815 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 5816 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 5817 return false; 5818 return true; 5819 } 5820 5821 // Loads don't have side effects, look through them. 5822 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 5823 if (!Ld->isVolatile()) 5824 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 5825 } 5826 return false; 5827 } 5828 5829 /// hasPredecessor - Return true if N is a predecessor of this node. 5830 /// N is either an operand of this node, or can be reached by recursively 5831 /// traversing up the operands. 5832 /// NOTE: This is an expensive method. Use it carefully. 5833 bool SDNode::hasPredecessor(const SDNode *N) const { 5834 SmallPtrSet<const SDNode *, 32> Visited; 5835 SmallVector<const SDNode *, 16> Worklist; 5836 return hasPredecessorHelper(N, Visited, Worklist); 5837 } 5838 5839 bool SDNode::hasPredecessorHelper(const SDNode *N, 5840 SmallPtrSet<const SDNode *, 32> &Visited, 5841 SmallVector<const SDNode *, 16> &Worklist) const { 5842 if (Visited.empty()) { 5843 Worklist.push_back(this); 5844 } else { 5845 // Take a look in the visited set. If we've already encountered this node 5846 // we needn't search further. 5847 if (Visited.count(N)) 5848 return true; 5849 } 5850 5851 // Haven't visited N yet. Continue the search. 5852 while (!Worklist.empty()) { 5853 const SDNode *M = Worklist.pop_back_val(); 5854 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 5855 SDNode *Op = M->getOperand(i).getNode(); 5856 if (Visited.insert(Op)) 5857 Worklist.push_back(Op); 5858 if (Op == N) 5859 return true; 5860 } 5861 } 5862 5863 return false; 5864 } 5865 5866 uint64_t SDNode::getConstantOperandVal(unsigned Num) const { 5867 assert(Num < NumOperands && "Invalid child # of SDNode!"); 5868 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue(); 5869 } 5870 5871 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 5872 assert(N->getNumValues() == 1 && 5873 "Can't unroll a vector with multiple results!"); 5874 5875 EVT VT = N->getValueType(0); 5876 unsigned NE = VT.getVectorNumElements(); 5877 EVT EltVT = VT.getVectorElementType(); 5878 DebugLoc dl = N->getDebugLoc(); 5879 5880 SmallVector<SDValue, 8> Scalars; 5881 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 5882 5883 // If ResNE is 0, fully unroll the vector op. 5884 if (ResNE == 0) 5885 ResNE = NE; 5886 else if (NE > ResNE) 5887 NE = ResNE; 5888 5889 unsigned i; 5890 for (i= 0; i != NE; ++i) { 5891 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 5892 SDValue Operand = N->getOperand(j); 5893 EVT OperandVT = Operand.getValueType(); 5894 if (OperandVT.isVector()) { 5895 // A vector operand; extract a single element. 5896 EVT OperandEltVT = OperandVT.getVectorElementType(); 5897 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, 5898 OperandEltVT, 5899 Operand, 5900 getConstant(i, TLI.getPointerTy())); 5901 } else { 5902 // A scalar operand; just use it as is. 5903 Operands[j] = Operand; 5904 } 5905 } 5906 5907 switch (N->getOpcode()) { 5908 default: 5909 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 5910 &Operands[0], Operands.size())); 5911 break; 5912 case ISD::VSELECT: 5913 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, 5914 &Operands[0], Operands.size())); 5915 break; 5916 case ISD::SHL: 5917 case ISD::SRA: 5918 case ISD::SRL: 5919 case ISD::ROTL: 5920 case ISD::ROTR: 5921 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 5922 getShiftAmountOperand(Operands[0].getValueType(), 5923 Operands[1]))); 5924 break; 5925 case ISD::SIGN_EXTEND_INREG: 5926 case ISD::FP_ROUND_INREG: { 5927 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 5928 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 5929 Operands[0], 5930 getValueType(ExtVT))); 5931 } 5932 } 5933 } 5934 5935 for (; i < ResNE; ++i) 5936 Scalars.push_back(getUNDEF(EltVT)); 5937 5938 return getNode(ISD::BUILD_VECTOR, dl, 5939 EVT::getVectorVT(*getContext(), EltVT, ResNE), 5940 &Scalars[0], Scalars.size()); 5941 } 5942 5943 5944 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 5945 /// location that is 'Dist' units away from the location that the 'Base' load 5946 /// is loading from. 5947 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 5948 unsigned Bytes, int Dist) const { 5949 if (LD->getChain() != Base->getChain()) 5950 return false; 5951 EVT VT = LD->getValueType(0); 5952 if (VT.getSizeInBits() / 8 != Bytes) 5953 return false; 5954 5955 SDValue Loc = LD->getOperand(1); 5956 SDValue BaseLoc = Base->getOperand(1); 5957 if (Loc.getOpcode() == ISD::FrameIndex) { 5958 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5959 return false; 5960 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo(); 5961 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5962 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5963 int FS = MFI->getObjectSize(FI); 5964 int BFS = MFI->getObjectSize(BFI); 5965 if (FS != BFS || FS != (int)Bytes) return false; 5966 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 5967 } 5968 5969 // Handle X+C 5970 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 5971 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 5972 return true; 5973 5974 const GlobalValue *GV1 = NULL; 5975 const GlobalValue *GV2 = NULL; 5976 int64_t Offset1 = 0; 5977 int64_t Offset2 = 0; 5978 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 5979 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 5980 if (isGA1 && isGA2 && GV1 == GV2) 5981 return Offset1 == (Offset2 + Dist*Bytes); 5982 return false; 5983 } 5984 5985 5986 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 5987 /// it cannot be inferred. 5988 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 5989 // If this is a GlobalAddress + cst, return the alignment. 5990 const GlobalValue *GV; 5991 int64_t GVOffset = 0; 5992 if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 5993 unsigned PtrWidth = TLI.getPointerTy().getSizeInBits(); 5994 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 5995 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne, 5996 TLI.getTargetData()); 5997 unsigned AlignBits = KnownZero.countTrailingOnes(); 5998 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 5999 if (Align) 6000 return MinAlign(Align, GVOffset); 6001 } 6002 6003 // If this is a direct reference to a stack slot, use information about the 6004 // stack slot's alignment. 6005 int FrameIdx = 1 << 31; 6006 int64_t FrameOffset = 0; 6007 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 6008 FrameIdx = FI->getIndex(); 6009 } else if (isBaseWithConstantOffset(Ptr) && 6010 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 6011 // Handle FI+Cst 6012 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6013 FrameOffset = Ptr.getConstantOperandVal(1); 6014 } 6015 6016 if (FrameIdx != (1 << 31)) { 6017 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo(); 6018 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 6019 FrameOffset); 6020 return FIInfoAlign; 6021 } 6022 6023 return 0; 6024 } 6025 6026 // getAddressSpace - Return the address space this GlobalAddress belongs to. 6027 unsigned GlobalAddressSDNode::getAddressSpace() const { 6028 return getGlobal()->getType()->getAddressSpace(); 6029 } 6030 6031 6032 Type *ConstantPoolSDNode::getType() const { 6033 if (isMachineConstantPoolEntry()) 6034 return Val.MachineCPVal->getType(); 6035 return Val.ConstVal->getType(); 6036 } 6037 6038 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 6039 APInt &SplatUndef, 6040 unsigned &SplatBitSize, 6041 bool &HasAnyUndefs, 6042 unsigned MinSplatBits, 6043 bool isBigEndian) { 6044 EVT VT = getValueType(0); 6045 assert(VT.isVector() && "Expected a vector type"); 6046 unsigned sz = VT.getSizeInBits(); 6047 if (MinSplatBits > sz) 6048 return false; 6049 6050 SplatValue = APInt(sz, 0); 6051 SplatUndef = APInt(sz, 0); 6052 6053 // Get the bits. Bits with undefined values (when the corresponding element 6054 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 6055 // in SplatValue. If any of the values are not constant, give up and return 6056 // false. 6057 unsigned int nOps = getNumOperands(); 6058 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 6059 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); 6060 6061 for (unsigned j = 0; j < nOps; ++j) { 6062 unsigned i = isBigEndian ? nOps-1-j : j; 6063 SDValue OpVal = getOperand(i); 6064 unsigned BitPos = j * EltBitSize; 6065 6066 if (OpVal.getOpcode() == ISD::UNDEF) 6067 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 6068 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 6069 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 6070 zextOrTrunc(sz) << BitPos; 6071 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 6072 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 6073 else 6074 return false; 6075 } 6076 6077 // The build_vector is all constants or undefs. Find the smallest element 6078 // size that splats the vector. 6079 6080 HasAnyUndefs = (SplatUndef != 0); 6081 while (sz > 8) { 6082 6083 unsigned HalfSize = sz / 2; 6084 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 6085 APInt LowValue = SplatValue.trunc(HalfSize); 6086 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 6087 APInt LowUndef = SplatUndef.trunc(HalfSize); 6088 6089 // If the two halves do not match (ignoring undef bits), stop here. 6090 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 6091 MinSplatBits > HalfSize) 6092 break; 6093 6094 SplatValue = HighValue | LowValue; 6095 SplatUndef = HighUndef & LowUndef; 6096 6097 sz = HalfSize; 6098 } 6099 6100 SplatBitSize = sz; 6101 return true; 6102 } 6103 6104 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 6105 // Find the first non-undef value in the shuffle mask. 6106 unsigned i, e; 6107 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 6108 /* search */; 6109 6110 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 6111 6112 // Make sure all remaining elements are either undef or the same as the first 6113 // non-undef value. 6114 for (int Idx = Mask[i]; i != e; ++i) 6115 if (Mask[i] >= 0 && Mask[i] != Idx) 6116 return false; 6117 return true; 6118 } 6119 6120 #ifdef XDEBUG 6121 static void checkForCyclesHelper(const SDNode *N, 6122 SmallPtrSet<const SDNode*, 32> &Visited, 6123 SmallPtrSet<const SDNode*, 32> &Checked) { 6124 // If this node has already been checked, don't check it again. 6125 if (Checked.count(N)) 6126 return; 6127 6128 // If a node has already been visited on this depth-first walk, reject it as 6129 // a cycle. 6130 if (!Visited.insert(N)) { 6131 dbgs() << "Offending node:\n"; 6132 N->dumprFull(); 6133 errs() << "Detected cycle in SelectionDAG\n"; 6134 abort(); 6135 } 6136 6137 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6138 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked); 6139 6140 Checked.insert(N); 6141 Visited.erase(N); 6142 } 6143 #endif 6144 6145 void llvm::checkForCycles(const llvm::SDNode *N) { 6146 #ifdef XDEBUG 6147 assert(N && "Checking nonexistant SDNode"); 6148 SmallPtrSet<const SDNode*, 32> visited; 6149 SmallPtrSet<const SDNode*, 32> checked; 6150 checkForCyclesHelper(N, visited, checked); 6151 #endif 6152 } 6153 6154 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) { 6155 checkForCycles(DAG->getRoot().getNode()); 6156 } 6157