1 //===-- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SelectionDAG::LegalizeVectors method. 11 // 12 // The vector legalizer looks for vector operations which might need to be 13 // scalarized and legalizes them. This is a separate step from Legalize because 14 // scalarizing can introduce illegal types. For example, suppose we have an 15 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition 16 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the 17 // operation, which introduces nodes with the illegal type i64 which must be 18 // expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC; 19 // the operation must be unrolled, which introduces nodes with the illegal 20 // type i8 which must be promoted. 21 // 22 // This does not legalize vector manipulations like ISD::BUILD_VECTOR, 23 // or operations that happen to take a vector which are custom-lowered; 24 // the legalization for such operations never produces nodes 25 // with illegal types, so it's okay to put off legalizing them until 26 // SelectionDAG::Legalize runs. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/Target/TargetLowering.h" 32 using namespace llvm; 33 34 namespace { 35 class VectorLegalizer { 36 SelectionDAG& DAG; 37 const TargetLowering &TLI; 38 bool Changed; // Keep track of whether anything changed 39 40 /// For nodes that are of legal width, and that have more than one use, this 41 /// map indicates what regularized operand to use. This allows us to avoid 42 /// legalizing the same thing more than once. 43 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; 44 45 /// \brief Adds a node to the translation cache. 46 void AddLegalizedOperand(SDValue From, SDValue To) { 47 LegalizedNodes.insert(std::make_pair(From, To)); 48 // If someone requests legalization of the new node, return itself. 49 if (From != To) 50 LegalizedNodes.insert(std::make_pair(To, To)); 51 } 52 53 /// \brief Legalizes the given node. 54 SDValue LegalizeOp(SDValue Op); 55 56 /// \brief Assuming the node is legal, "legalize" the results. 57 SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); 58 59 /// \brief Implements unrolling a VSETCC. 60 SDValue UnrollVSETCC(SDValue Op); 61 62 /// \brief Implement expand-based legalization of vector operations. 63 /// 64 /// This is just a high-level routine to dispatch to specific code paths for 65 /// operations to legalize them. 66 SDValue Expand(SDValue Op); 67 68 /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if 69 /// FSUB isn't legal. 70 /// 71 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if 72 /// SINT_TO_FLOAT and SHR on vectors isn't legal. 73 SDValue ExpandUINT_TO_FLOAT(SDValue Op); 74 75 /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. 76 SDValue ExpandSEXTINREG(SDValue Op); 77 78 /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG. 79 /// 80 /// Shuffles the low lanes of the operand into place and bitcasts to the proper 81 /// type. The contents of the bits in the extended part of each element are 82 /// undef. 83 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); 84 85 /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG. 86 /// 87 /// Shuffles the low lanes of the operand into place, bitcasts to the proper 88 /// type, then shifts left and arithmetic shifts right to introduce a sign 89 /// extension. 90 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); 91 92 /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG. 93 /// 94 /// Shuffles the low lanes of the operand into place and blends zeros into 95 /// the remaining lanes, finally bitcasting to the proper type. 96 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); 97 98 /// \brief Expand bswap of vectors into a shuffle if legal. 99 SDValue ExpandBSWAP(SDValue Op); 100 101 /// \brief Implement vselect in terms of XOR, AND, OR when blend is not 102 /// supported by the target. 103 SDValue ExpandVSELECT(SDValue Op); 104 SDValue ExpandSELECT(SDValue Op); 105 SDValue ExpandLoad(SDValue Op); 106 SDValue ExpandStore(SDValue Op); 107 SDValue ExpandFNEG(SDValue Op); 108 SDValue ExpandBITREVERSE(SDValue Op); 109 SDValue ExpandCTLZ_CTTZ_ZERO_UNDEF(SDValue Op); 110 111 /// \brief Implements vector promotion. 112 /// 113 /// This is essentially just bitcasting the operands to a different type and 114 /// bitcasting the result back to the original type. 115 SDValue Promote(SDValue Op); 116 117 /// \brief Implements [SU]INT_TO_FP vector promotion. 118 /// 119 /// This is a [zs]ext of the input operand to the next size up. 120 SDValue PromoteINT_TO_FP(SDValue Op); 121 122 /// \brief Implements FP_TO_[SU]INT vector promotion of the result type. 123 /// 124 /// It is promoted to the next size up integer type. The result is then 125 /// truncated back to the original type. 126 SDValue PromoteFP_TO_INT(SDValue Op, bool isSigned); 127 128 public: 129 /// \brief Begin legalizer the vector operations in the DAG. 130 bool Run(); 131 VectorLegalizer(SelectionDAG& dag) : 132 DAG(dag), TLI(dag.getTargetLoweringInfo()), Changed(false) {} 133 }; 134 135 bool VectorLegalizer::Run() { 136 // Before we start legalizing vector nodes, check if there are any vectors. 137 bool HasVectors = false; 138 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 139 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { 140 // Check if the values of the nodes contain vectors. We don't need to check 141 // the operands because we are going to check their values at some point. 142 for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); 143 J != E; ++J) 144 HasVectors |= J->isVector(); 145 146 // If we found a vector node we can start the legalization. 147 if (HasVectors) 148 break; 149 } 150 151 // If this basic block has no vectors then no need to legalize vectors. 152 if (!HasVectors) 153 return false; 154 155 // The legalize process is inherently a bottom-up recursive process (users 156 // legalize their uses before themselves). Given infinite stack space, we 157 // could just start legalizing on the root and traverse the whole graph. In 158 // practice however, this causes us to run out of stack space on large basic 159 // blocks. To avoid this problem, compute an ordering of the nodes where each 160 // node is only legalized after all of its operands are legalized. 161 DAG.AssignTopologicalOrder(); 162 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 163 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) 164 LegalizeOp(SDValue(&*I, 0)); 165 166 // Finally, it's possible the root changed. Get the new root. 167 SDValue OldRoot = DAG.getRoot(); 168 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 169 DAG.setRoot(LegalizedNodes[OldRoot]); 170 171 LegalizedNodes.clear(); 172 173 // Remove dead nodes now. 174 DAG.RemoveDeadNodes(); 175 176 return Changed; 177 } 178 179 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDValue Result) { 180 // Generic legalization: just pass the operand through. 181 for (unsigned i = 0, e = Op.getNode()->getNumValues(); i != e; ++i) 182 AddLegalizedOperand(Op.getValue(i), Result.getValue(i)); 183 return Result.getValue(Op.getResNo()); 184 } 185 186 SDValue VectorLegalizer::LegalizeOp(SDValue Op) { 187 // Note that LegalizeOp may be reentered even from single-use nodes, which 188 // means that we always must cache transformed nodes. 189 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 190 if (I != LegalizedNodes.end()) return I->second; 191 192 SDNode* Node = Op.getNode(); 193 194 // Legalize the operands 195 SmallVector<SDValue, 8> Ops; 196 for (const SDValue &Op : Node->op_values()) 197 Ops.push_back(LegalizeOp(Op)); 198 199 SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 0); 200 201 bool HasVectorValue = false; 202 if (Op.getOpcode() == ISD::LOAD) { 203 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 204 ISD::LoadExtType ExtType = LD->getExtensionType(); 205 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) 206 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0), 207 LD->getMemoryVT())) { 208 default: llvm_unreachable("This action is not supported yet!"); 209 case TargetLowering::Legal: 210 return TranslateLegalizeResults(Op, Result); 211 case TargetLowering::Custom: 212 if (SDValue Lowered = TLI.LowerOperation(Result, DAG)) { 213 if (Lowered == Result) 214 return TranslateLegalizeResults(Op, Lowered); 215 Changed = true; 216 if (Lowered->getNumValues() != Op->getNumValues()) { 217 // This expanded to something other than the load. Assume the 218 // lowering code took care of any chain values, and just handle the 219 // returned value. 220 assert(Result.getValue(1).use_empty() && 221 "There are still live users of the old chain!"); 222 return LegalizeOp(Lowered); 223 } 224 return TranslateLegalizeResults(Op, Lowered); 225 } 226 case TargetLowering::Expand: 227 Changed = true; 228 return LegalizeOp(ExpandLoad(Op)); 229 } 230 } else if (Op.getOpcode() == ISD::STORE) { 231 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 232 EVT StVT = ST->getMemoryVT(); 233 MVT ValVT = ST->getValue().getSimpleValueType(); 234 if (StVT.isVector() && ST->isTruncatingStore()) 235 switch (TLI.getTruncStoreAction(ValVT, StVT)) { 236 default: llvm_unreachable("This action is not supported yet!"); 237 case TargetLowering::Legal: 238 return TranslateLegalizeResults(Op, Result); 239 case TargetLowering::Custom: { 240 SDValue Lowered = TLI.LowerOperation(Result, DAG); 241 Changed = Lowered != Result; 242 return TranslateLegalizeResults(Op, Lowered); 243 } 244 case TargetLowering::Expand: 245 Changed = true; 246 return LegalizeOp(ExpandStore(Op)); 247 } 248 } else if (Op.getOpcode() == ISD::MSCATTER || Op.getOpcode() == ISD::MSTORE) 249 HasVectorValue = true; 250 251 for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end(); 252 J != E; 253 ++J) 254 HasVectorValue |= J->isVector(); 255 if (!HasVectorValue) 256 return TranslateLegalizeResults(Op, Result); 257 258 EVT QueryType; 259 switch (Op.getOpcode()) { 260 default: 261 return TranslateLegalizeResults(Op, Result); 262 case ISD::ADD: 263 case ISD::SUB: 264 case ISD::MUL: 265 case ISD::SDIV: 266 case ISD::UDIV: 267 case ISD::SREM: 268 case ISD::UREM: 269 case ISD::SDIVREM: 270 case ISD::UDIVREM: 271 case ISD::FADD: 272 case ISD::FSUB: 273 case ISD::FMUL: 274 case ISD::FDIV: 275 case ISD::FREM: 276 case ISD::AND: 277 case ISD::OR: 278 case ISD::XOR: 279 case ISD::SHL: 280 case ISD::SRA: 281 case ISD::SRL: 282 case ISD::ROTL: 283 case ISD::ROTR: 284 case ISD::BSWAP: 285 case ISD::BITREVERSE: 286 case ISD::CTLZ: 287 case ISD::CTTZ: 288 case ISD::CTLZ_ZERO_UNDEF: 289 case ISD::CTTZ_ZERO_UNDEF: 290 case ISD::CTPOP: 291 case ISD::SELECT: 292 case ISD::VSELECT: 293 case ISD::SELECT_CC: 294 case ISD::SETCC: 295 case ISD::ZERO_EXTEND: 296 case ISD::ANY_EXTEND: 297 case ISD::TRUNCATE: 298 case ISD::SIGN_EXTEND: 299 case ISD::FP_TO_SINT: 300 case ISD::FP_TO_UINT: 301 case ISD::FNEG: 302 case ISD::FABS: 303 case ISD::FMINNUM: 304 case ISD::FMAXNUM: 305 case ISD::FMINNAN: 306 case ISD::FMAXNAN: 307 case ISD::FCOPYSIGN: 308 case ISD::FSQRT: 309 case ISD::FSIN: 310 case ISD::FCOS: 311 case ISD::FPOWI: 312 case ISD::FPOW: 313 case ISD::FLOG: 314 case ISD::FLOG2: 315 case ISD::FLOG10: 316 case ISD::FEXP: 317 case ISD::FEXP2: 318 case ISD::FCEIL: 319 case ISD::FTRUNC: 320 case ISD::FRINT: 321 case ISD::FNEARBYINT: 322 case ISD::FROUND: 323 case ISD::FFLOOR: 324 case ISD::FP_ROUND: 325 case ISD::FP_EXTEND: 326 case ISD::FMA: 327 case ISD::SIGN_EXTEND_INREG: 328 case ISD::ANY_EXTEND_VECTOR_INREG: 329 case ISD::SIGN_EXTEND_VECTOR_INREG: 330 case ISD::ZERO_EXTEND_VECTOR_INREG: 331 case ISD::SMIN: 332 case ISD::SMAX: 333 case ISD::UMIN: 334 case ISD::UMAX: 335 QueryType = Node->getValueType(0); 336 break; 337 case ISD::FP_ROUND_INREG: 338 QueryType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 339 break; 340 case ISD::SINT_TO_FP: 341 case ISD::UINT_TO_FP: 342 QueryType = Node->getOperand(0).getValueType(); 343 break; 344 case ISD::MSCATTER: 345 QueryType = cast<MaskedScatterSDNode>(Node)->getValue().getValueType(); 346 break; 347 case ISD::MSTORE: 348 QueryType = cast<MaskedStoreSDNode>(Node)->getValue().getValueType(); 349 break; 350 } 351 352 switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) { 353 default: llvm_unreachable("This action is not supported yet!"); 354 case TargetLowering::Promote: 355 Result = Promote(Op); 356 Changed = true; 357 break; 358 case TargetLowering::Legal: 359 break; 360 case TargetLowering::Custom: { 361 if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) { 362 Result = Tmp1; 363 break; 364 } 365 // FALL THROUGH 366 } 367 case TargetLowering::Expand: 368 Result = Expand(Op); 369 } 370 371 // Make sure that the generated code is itself legal. 372 if (Result != Op) { 373 Result = LegalizeOp(Result); 374 Changed = true; 375 } 376 377 // Note that LegalizeOp may be reentered even from single-use nodes, which 378 // means that we always must cache transformed nodes. 379 AddLegalizedOperand(Op, Result); 380 return Result; 381 } 382 383 SDValue VectorLegalizer::Promote(SDValue Op) { 384 // For a few operations there is a specific concept for promotion based on 385 // the operand's type. 386 switch (Op.getOpcode()) { 387 case ISD::SINT_TO_FP: 388 case ISD::UINT_TO_FP: 389 // "Promote" the operation by extending the operand. 390 return PromoteINT_TO_FP(Op); 391 case ISD::FP_TO_UINT: 392 case ISD::FP_TO_SINT: 393 // Promote the operation by extending the operand. 394 return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT); 395 } 396 397 // There are currently two cases of vector promotion: 398 // 1) Bitcasting a vector of integers to a different type to a vector of the 399 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64. 400 // 2) Extending a vector of floats to a vector of the same number of larger 401 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32. 402 MVT VT = Op.getSimpleValueType(); 403 assert(Op.getNode()->getNumValues() == 1 && 404 "Can't promote a vector with multiple results!"); 405 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 406 SDLoc dl(Op); 407 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 408 409 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 410 if (Op.getOperand(j).getValueType().isVector()) 411 if (Op.getOperand(j) 412 .getValueType() 413 .getVectorElementType() 414 .isFloatingPoint() && 415 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()) 416 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j)); 417 else 418 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j)); 419 else 420 Operands[j] = Op.getOperand(j); 421 } 422 423 Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands, Op.getNode()->getFlags()); 424 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) || 425 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() && 426 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())) 427 return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0, dl)); 428 else 429 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 430 } 431 432 SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) { 433 // INT_TO_FP operations may require the input operand be promoted even 434 // when the type is otherwise legal. 435 EVT VT = Op.getOperand(0).getValueType(); 436 assert(Op.getNode()->getNumValues() == 1 && 437 "Can't promote a vector with multiple results!"); 438 439 // Normal getTypeToPromoteTo() doesn't work here, as that will promote 440 // by widening the vector w/ the same element width and twice the number 441 // of elements. We want the other way around, the same number of elements, 442 // each twice the width. 443 // 444 // Increase the bitwidth of the element to the next pow-of-two 445 // (which is greater than 8 bits). 446 447 EVT NVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 448 assert(NVT.isSimple() && "Promoting to a non-simple vector type!"); 449 SDLoc dl(Op); 450 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 451 452 unsigned Opc = Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : 453 ISD::SIGN_EXTEND; 454 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 455 if (Op.getOperand(j).getValueType().isVector()) 456 Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j)); 457 else 458 Operands[j] = Op.getOperand(j); 459 } 460 461 return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands); 462 } 463 464 // For FP_TO_INT we promote the result type to a vector type with wider 465 // elements and then truncate the result. This is different from the default 466 // PromoteVector which uses bitcast to promote thus assumning that the 467 // promoted vector type has the same overall size. 468 SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) { 469 assert(Op.getNode()->getNumValues() == 1 && 470 "Can't promote a vector with multiple results!"); 471 EVT VT = Op.getValueType(); 472 473 EVT NewVT; 474 unsigned NewOpc; 475 while (1) { 476 NewVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 477 assert(NewVT.isSimple() && "Promoting to a non-simple vector type!"); 478 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewVT)) { 479 NewOpc = ISD::FP_TO_SINT; 480 break; 481 } 482 if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewVT)) { 483 NewOpc = ISD::FP_TO_UINT; 484 break; 485 } 486 } 487 488 SDLoc loc(Op); 489 SDValue promoted = DAG.getNode(NewOpc, SDLoc(Op), NewVT, Op.getOperand(0)); 490 return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, promoted); 491 } 492 493 494 SDValue VectorLegalizer::ExpandLoad(SDValue Op) { 495 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 496 497 EVT SrcVT = LD->getMemoryVT(); 498 EVT SrcEltVT = SrcVT.getScalarType(); 499 unsigned NumElem = SrcVT.getVectorNumElements(); 500 501 502 SDValue NewChain; 503 SDValue Value; 504 if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { 505 SDLoc dl(Op); 506 507 SmallVector<SDValue, 8> Vals; 508 SmallVector<SDValue, 8> LoadChains; 509 510 EVT DstEltVT = LD->getValueType(0).getScalarType(); 511 SDValue Chain = LD->getChain(); 512 SDValue BasePTR = LD->getBasePtr(); 513 ISD::LoadExtType ExtType = LD->getExtensionType(); 514 515 // When elements in a vector is not byte-addressable, we cannot directly 516 // load each element by advancing pointer, which could only address bytes. 517 // Instead, we load all significant words, mask bits off, and concatenate 518 // them to form each element. Finally, they are extended to destination 519 // scalar type to build the destination vector. 520 EVT WideVT = TLI.getPointerTy(DAG.getDataLayout()); 521 522 assert(WideVT.isRound() && 523 "Could not handle the sophisticated case when the widest integer is" 524 " not power of 2."); 525 assert(WideVT.bitsGE(SrcEltVT) && 526 "Type is not legalized?"); 527 528 unsigned WideBytes = WideVT.getStoreSize(); 529 unsigned Offset = 0; 530 unsigned RemainingBytes = SrcVT.getStoreSize(); 531 SmallVector<SDValue, 8> LoadVals; 532 533 while (RemainingBytes > 0) { 534 SDValue ScalarLoad; 535 unsigned LoadBytes = WideBytes; 536 537 if (RemainingBytes >= LoadBytes) { 538 ScalarLoad = 539 DAG.getLoad(WideVT, dl, Chain, BasePTR, 540 LD->getPointerInfo().getWithOffset(Offset), 541 MinAlign(LD->getAlignment(), Offset), 542 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 543 } else { 544 EVT LoadVT = WideVT; 545 while (RemainingBytes < LoadBytes) { 546 LoadBytes >>= 1; // Reduce the load size by half. 547 LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3); 548 } 549 ScalarLoad = 550 DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR, 551 LD->getPointerInfo().getWithOffset(Offset), LoadVT, 552 MinAlign(LD->getAlignment(), Offset), 553 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 554 } 555 556 RemainingBytes -= LoadBytes; 557 Offset += LoadBytes; 558 BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR, 559 DAG.getConstant(LoadBytes, dl, 560 BasePTR.getValueType())); 561 562 LoadVals.push_back(ScalarLoad.getValue(0)); 563 LoadChains.push_back(ScalarLoad.getValue(1)); 564 } 565 566 // Extract bits, pack and extend/trunc them into destination type. 567 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 568 SDValue SrcEltBitMask = DAG.getConstant((1U << SrcEltBits) - 1, dl, WideVT); 569 570 unsigned BitOffset = 0; 571 unsigned WideIdx = 0; 572 unsigned WideBits = WideVT.getSizeInBits(); 573 574 for (unsigned Idx = 0; Idx != NumElem; ++Idx) { 575 SDValue Lo, Hi, ShAmt; 576 577 if (BitOffset < WideBits) { 578 ShAmt = DAG.getConstant( 579 BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 580 Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt); 581 Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask); 582 } 583 584 BitOffset += SrcEltBits; 585 if (BitOffset >= WideBits) { 586 WideIdx++; 587 BitOffset -= WideBits; 588 if (BitOffset > 0) { 589 ShAmt = DAG.getConstant( 590 SrcEltBits - BitOffset, dl, 591 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 592 Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt); 593 Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask); 594 } 595 } 596 597 if (Hi.getNode()) 598 Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi); 599 600 switch (ExtType) { 601 default: llvm_unreachable("Unknown extended-load op!"); 602 case ISD::EXTLOAD: 603 Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT); 604 break; 605 case ISD::ZEXTLOAD: 606 Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT); 607 break; 608 case ISD::SEXTLOAD: 609 ShAmt = 610 DAG.getConstant(WideBits - SrcEltBits, dl, 611 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 612 Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt); 613 Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt); 614 Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT); 615 break; 616 } 617 Vals.push_back(Lo); 618 } 619 620 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 621 Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 622 Op.getNode()->getValueType(0), Vals); 623 } else { 624 SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG); 625 626 NewChain = Scalarized.getValue(1); 627 Value = Scalarized.getValue(0); 628 } 629 630 AddLegalizedOperand(Op.getValue(0), Value); 631 AddLegalizedOperand(Op.getValue(1), NewChain); 632 633 return (Op.getResNo() ? NewChain : Value); 634 } 635 636 SDValue VectorLegalizer::ExpandStore(SDValue Op) { 637 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 638 639 EVT StVT = ST->getMemoryVT(); 640 EVT MemSclVT = StVT.getScalarType(); 641 unsigned ScalarSize = MemSclVT.getSizeInBits(); 642 643 // Round odd types to the next pow of two. 644 if (!isPowerOf2_32(ScalarSize)) { 645 // FIXME: This is completely broken and inconsistent with ExpandLoad 646 // handling. 647 648 // For sub-byte element sizes, this ends up with 0 stride between elements, 649 // so the same element just gets re-written to the same location. There seem 650 // to be tests explicitly testing for this broken behavior though. tests 651 // for this broken behavior. 652 653 LLVMContext &Ctx = *DAG.getContext(); 654 655 EVT NewMemVT 656 = EVT::getVectorVT(Ctx, 657 MemSclVT.getIntegerVT(Ctx, NextPowerOf2(ScalarSize)), 658 StVT.getVectorNumElements()); 659 660 SDValue NewVectorStore = DAG.getTruncStore( 661 ST->getChain(), SDLoc(Op), ST->getValue(), ST->getBasePtr(), 662 ST->getPointerInfo(), NewMemVT, ST->getAlignment(), 663 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 664 ST = cast<StoreSDNode>(NewVectorStore.getNode()); 665 } 666 667 SDValue TF = TLI.scalarizeVectorStore(ST, DAG); 668 AddLegalizedOperand(Op, TF); 669 return TF; 670 } 671 672 SDValue VectorLegalizer::Expand(SDValue Op) { 673 switch (Op->getOpcode()) { 674 case ISD::SIGN_EXTEND_INREG: 675 return ExpandSEXTINREG(Op); 676 case ISD::ANY_EXTEND_VECTOR_INREG: 677 return ExpandANY_EXTEND_VECTOR_INREG(Op); 678 case ISD::SIGN_EXTEND_VECTOR_INREG: 679 return ExpandSIGN_EXTEND_VECTOR_INREG(Op); 680 case ISD::ZERO_EXTEND_VECTOR_INREG: 681 return ExpandZERO_EXTEND_VECTOR_INREG(Op); 682 case ISD::BSWAP: 683 return ExpandBSWAP(Op); 684 case ISD::VSELECT: 685 return ExpandVSELECT(Op); 686 case ISD::SELECT: 687 return ExpandSELECT(Op); 688 case ISD::UINT_TO_FP: 689 return ExpandUINT_TO_FLOAT(Op); 690 case ISD::FNEG: 691 return ExpandFNEG(Op); 692 case ISD::SETCC: 693 return UnrollVSETCC(Op); 694 case ISD::BITREVERSE: 695 return ExpandBITREVERSE(Op); 696 case ISD::CTLZ_ZERO_UNDEF: 697 case ISD::CTTZ_ZERO_UNDEF: 698 return ExpandCTLZ_CTTZ_ZERO_UNDEF(Op); 699 default: 700 return DAG.UnrollVectorOp(Op.getNode()); 701 } 702 } 703 704 SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { 705 // Lower a select instruction where the condition is a scalar and the 706 // operands are vectors. Lower this select to VSELECT and implement it 707 // using XOR AND OR. The selector bit is broadcasted. 708 EVT VT = Op.getValueType(); 709 SDLoc DL(Op); 710 711 SDValue Mask = Op.getOperand(0); 712 SDValue Op1 = Op.getOperand(1); 713 SDValue Op2 = Op.getOperand(2); 714 715 assert(VT.isVector() && !Mask.getValueType().isVector() 716 && Op1.getValueType() == Op2.getValueType() && "Invalid type"); 717 718 unsigned NumElem = VT.getVectorNumElements(); 719 720 // If we can't even use the basic vector operations of 721 // AND,OR,XOR, we will have to scalarize the op. 722 // Notice that the operation may be 'promoted' which means that it is 723 // 'bitcasted' to another type which is handled. 724 // Also, we need to be able to construct a splat vector using BUILD_VECTOR. 725 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 726 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 727 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 728 TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand) 729 return DAG.UnrollVectorOp(Op.getNode()); 730 731 // Generate a mask operand. 732 EVT MaskTy = VT.changeVectorElementTypeToInteger(); 733 734 // What is the size of each element in the vector mask. 735 EVT BitTy = MaskTy.getScalarType(); 736 737 Mask = DAG.getSelect(DL, BitTy, Mask, 738 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, 739 BitTy), 740 DAG.getConstant(0, DL, BitTy)); 741 742 // Broadcast the mask so that the entire vector is all-one or all zero. 743 SmallVector<SDValue, 8> Ops(NumElem, Mask); 744 Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, Ops); 745 746 // Bitcast the operands to be the same type as the mask. 747 // This is needed when we select between FP types because 748 // the mask is a vector of integers. 749 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1); 750 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2); 751 752 SDValue AllOnes = DAG.getConstant( 753 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy); 754 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes); 755 756 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask); 757 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask); 758 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2); 759 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 760 } 761 762 SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) { 763 EVT VT = Op.getValueType(); 764 765 // Make sure that the SRA and SHL instructions are available. 766 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand || 767 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand) 768 return DAG.UnrollVectorOp(Op.getNode()); 769 770 SDLoc DL(Op); 771 EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT(); 772 773 unsigned BW = VT.getScalarType().getSizeInBits(); 774 unsigned OrigBW = OrigTy.getScalarType().getSizeInBits(); 775 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT); 776 777 Op = Op.getOperand(0); 778 Op = DAG.getNode(ISD::SHL, DL, VT, Op, ShiftSz); 779 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz); 780 } 781 782 // Generically expand a vector anyext in register to a shuffle of the relevant 783 // lanes into the appropriate locations, with other lanes left undef. 784 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) { 785 SDLoc DL(Op); 786 EVT VT = Op.getValueType(); 787 int NumElements = VT.getVectorNumElements(); 788 SDValue Src = Op.getOperand(0); 789 EVT SrcVT = Src.getValueType(); 790 int NumSrcElements = SrcVT.getVectorNumElements(); 791 792 // Build a base mask of undef shuffles. 793 SmallVector<int, 16> ShuffleMask; 794 ShuffleMask.resize(NumSrcElements, -1); 795 796 // Place the extended lanes into the correct locations. 797 int ExtLaneScale = NumSrcElements / NumElements; 798 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 799 for (int i = 0; i < NumElements; ++i) 800 ShuffleMask[i * ExtLaneScale + EndianOffset] = i; 801 802 return DAG.getNode( 803 ISD::BITCAST, DL, VT, 804 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask)); 805 } 806 807 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) { 808 SDLoc DL(Op); 809 EVT VT = Op.getValueType(); 810 SDValue Src = Op.getOperand(0); 811 EVT SrcVT = Src.getValueType(); 812 813 // First build an any-extend node which can be legalized above when we 814 // recurse through it. 815 Op = DAG.getAnyExtendVectorInReg(Src, DL, VT); 816 817 // Now we need sign extend. Do this by shifting the elements. Even if these 818 // aren't legal operations, they have a better chance of being legalized 819 // without full scalarization than the sign extension does. 820 unsigned EltWidth = VT.getVectorElementType().getSizeInBits(); 821 unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits(); 822 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT); 823 return DAG.getNode(ISD::SRA, DL, VT, 824 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount), 825 ShiftAmount); 826 } 827 828 // Generically expand a vector zext in register to a shuffle of the relevant 829 // lanes into the appropriate locations, a blend of zero into the high bits, 830 // and a bitcast to the wider element type. 831 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) { 832 SDLoc DL(Op); 833 EVT VT = Op.getValueType(); 834 int NumElements = VT.getVectorNumElements(); 835 SDValue Src = Op.getOperand(0); 836 EVT SrcVT = Src.getValueType(); 837 int NumSrcElements = SrcVT.getVectorNumElements(); 838 839 // Build up a zero vector to blend into this one. 840 SDValue Zero = DAG.getConstant(0, DL, SrcVT); 841 842 // Shuffle the incoming lanes into the correct position, and pull all other 843 // lanes from the zero vector. 844 SmallVector<int, 16> ShuffleMask; 845 ShuffleMask.reserve(NumSrcElements); 846 for (int i = 0; i < NumSrcElements; ++i) 847 ShuffleMask.push_back(i); 848 849 int ExtLaneScale = NumSrcElements / NumElements; 850 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 851 for (int i = 0; i < NumElements; ++i) 852 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i; 853 854 return DAG.getNode(ISD::BITCAST, DL, VT, 855 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask)); 856 } 857 858 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) { 859 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; 860 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) 861 for (int J = ScalarSizeInBytes - 1; J >= 0; --J) 862 ShuffleMask.push_back((I * ScalarSizeInBytes) + J); 863 } 864 865 SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) { 866 EVT VT = Op.getValueType(); 867 868 // Generate a byte wise shuffle mask for the BSWAP. 869 SmallVector<int, 16> ShuffleMask; 870 createBSWAPShuffleMask(VT, ShuffleMask); 871 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size()); 872 873 // Only emit a shuffle if the mask is legal. 874 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) 875 return DAG.UnrollVectorOp(Op.getNode()); 876 877 SDLoc DL(Op); 878 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 879 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask); 880 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 881 } 882 883 SDValue VectorLegalizer::ExpandBITREVERSE(SDValue Op) { 884 EVT VT = Op.getValueType(); 885 886 // If we have the scalar operation, it's probably cheaper to unroll it. 887 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) 888 return DAG.UnrollVectorOp(Op.getNode()); 889 890 // If the vector element width is a whole number of bytes, test if its legal 891 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte 892 // vector. This greatly reduces the number of bit shifts necessary. 893 unsigned ScalarSizeInBits = VT.getScalarSizeInBits(); 894 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) { 895 SmallVector<int, 16> BSWAPMask; 896 createBSWAPShuffleMask(VT, BSWAPMask); 897 898 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size()); 899 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) && 900 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) || 901 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) && 902 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) && 903 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) && 904 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) { 905 SDLoc DL(Op); 906 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 907 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), 908 BSWAPMask); 909 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op); 910 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 911 } 912 } 913 914 // If we have the appropriate vector bit operations, it is better to use them 915 // than unrolling and expanding each component. 916 if (!TLI.isOperationLegalOrCustom(ISD::SHL, VT) || 917 !TLI.isOperationLegalOrCustom(ISD::SRL, VT) || 918 !TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 919 !TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) 920 return DAG.UnrollVectorOp(Op.getNode()); 921 922 // Let LegalizeDAG handle this later. 923 return Op; 924 } 925 926 SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { 927 // Implement VSELECT in terms of XOR, AND, OR 928 // on platforms which do not support blend natively. 929 SDLoc DL(Op); 930 931 SDValue Mask = Op.getOperand(0); 932 SDValue Op1 = Op.getOperand(1); 933 SDValue Op2 = Op.getOperand(2); 934 935 EVT VT = Mask.getValueType(); 936 937 // If we can't even use the basic vector operations of 938 // AND,OR,XOR, we will have to scalarize the op. 939 // Notice that the operation may be 'promoted' which means that it is 940 // 'bitcasted' to another type which is handled. 941 // This operation also isn't safe with AND, OR, XOR when the boolean 942 // type is 0/1 as we need an all ones vector constant to mask with. 943 // FIXME: Sign extend 1 to all ones if thats legal on the target. 944 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 945 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 946 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 947 TLI.getBooleanContents(Op1.getValueType()) != 948 TargetLowering::ZeroOrNegativeOneBooleanContent) 949 return DAG.UnrollVectorOp(Op.getNode()); 950 951 // If the mask and the type are different sizes, unroll the vector op. This 952 // can occur when getSetCCResultType returns something that is different in 953 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8. 954 if (VT.getSizeInBits() != Op1.getValueType().getSizeInBits()) 955 return DAG.UnrollVectorOp(Op.getNode()); 956 957 // Bitcast the operands to be the same type as the mask. 958 // This is needed when we select between FP types because 959 // the mask is a vector of integers. 960 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1); 961 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2); 962 963 SDValue AllOnes = DAG.getConstant( 964 APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()), DL, VT); 965 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes); 966 967 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask); 968 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask); 969 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2); 970 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 971 } 972 973 SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) { 974 EVT VT = Op.getOperand(0).getValueType(); 975 SDLoc DL(Op); 976 977 // Make sure that the SINT_TO_FP and SRL instructions are available. 978 if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand || 979 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) 980 return DAG.UnrollVectorOp(Op.getNode()); 981 982 EVT SVT = VT.getScalarType(); 983 assert((SVT.getSizeInBits() == 64 || SVT.getSizeInBits() == 32) && 984 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide"); 985 986 unsigned BW = SVT.getSizeInBits(); 987 SDValue HalfWord = DAG.getConstant(BW/2, DL, VT); 988 989 // Constants to clear the upper part of the word. 990 // Notice that we can also use SHL+SHR, but using a constant is slightly 991 // faster on x86. 992 uint64_t HWMask = (SVT.getSizeInBits()==64)?0x00000000FFFFFFFF:0x0000FFFF; 993 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT); 994 995 // Two to the power of half-word-size. 996 SDValue TWOHW = DAG.getConstantFP(1 << (BW/2), DL, Op.getValueType()); 997 998 // Clear upper part of LO, lower HI 999 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Op.getOperand(0), HalfWord); 1000 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), HalfWordMask); 1001 1002 // Convert hi and lo to floats 1003 // Convert the hi part back to the upper values 1004 // TODO: Can any fast-math-flags be set on these nodes? 1005 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), HI); 1006 fHI = DAG.getNode(ISD::FMUL, DL, Op.getValueType(), fHI, TWOHW); 1007 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), LO); 1008 1009 // Add the two halves 1010 return DAG.getNode(ISD::FADD, DL, Op.getValueType(), fHI, fLO); 1011 } 1012 1013 1014 SDValue VectorLegalizer::ExpandFNEG(SDValue Op) { 1015 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) { 1016 SDLoc DL(Op); 1017 SDValue Zero = DAG.getConstantFP(-0.0, DL, Op.getValueType()); 1018 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB. 1019 return DAG.getNode(ISD::FSUB, DL, Op.getValueType(), 1020 Zero, Op.getOperand(0)); 1021 } 1022 return DAG.UnrollVectorOp(Op.getNode()); 1023 } 1024 1025 SDValue VectorLegalizer::ExpandCTLZ_CTTZ_ZERO_UNDEF(SDValue Op) { 1026 // If the non-ZERO_UNDEF version is supported we can use that instead. 1027 unsigned Opc = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ? ISD::CTLZ : ISD::CTTZ; 1028 if (TLI.isOperationLegalOrCustom(Opc, Op.getValueType())) { 1029 SDLoc DL(Op); 1030 return DAG.getNode(Opc, DL, Op.getValueType(), Op.getOperand(0)); 1031 } 1032 1033 // Otherwise go ahead and unroll. 1034 return DAG.UnrollVectorOp(Op.getNode()); 1035 } 1036 1037 SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) { 1038 EVT VT = Op.getValueType(); 1039 unsigned NumElems = VT.getVectorNumElements(); 1040 EVT EltVT = VT.getVectorElementType(); 1041 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2); 1042 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 1043 SDLoc dl(Op); 1044 SmallVector<SDValue, 8> Ops(NumElems); 1045 for (unsigned i = 0; i < NumElems; ++i) { 1046 SDValue LHSElem = DAG.getNode( 1047 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 1048 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1049 SDValue RHSElem = DAG.getNode( 1050 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 1051 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1052 Ops[i] = DAG.getNode(ISD::SETCC, dl, 1053 TLI.getSetCCResultType(DAG.getDataLayout(), 1054 *DAG.getContext(), TmpEltVT), 1055 LHSElem, RHSElem, CC); 1056 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i], 1057 DAG.getConstant(APInt::getAllOnesValue 1058 (EltVT.getSizeInBits()), dl, EltVT), 1059 DAG.getConstant(0, dl, EltVT)); 1060 } 1061 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 1062 } 1063 1064 } 1065 1066 bool SelectionDAG::LegalizeVectors() { 1067 return VectorLegalizer(*this).Run(); 1068 } 1069