1 //===-- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SelectionDAG::LegalizeVectors method. 11 // 12 // The vector legalizer looks for vector operations which might need to be 13 // scalarized and legalizes them. This is a separate step from Legalize because 14 // scalarizing can introduce illegal types. For example, suppose we have an 15 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition 16 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the 17 // operation, which introduces nodes with the illegal type i64 which must be 18 // expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC; 19 // the operation must be unrolled, which introduces nodes with the illegal 20 // type i8 which must be promoted. 21 // 22 // This does not legalize vector manipulations like ISD::BUILD_VECTOR, 23 // or operations that happen to take a vector which are custom-lowered; 24 // the legalization for such operations never produces nodes 25 // with illegal types, so it's okay to put off legalizing them until 26 // SelectionDAG::Legalize runs. 27 // 28 //===----------------------------------------------------------------------===// 29 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/Target/TargetLowering.h" 32 using namespace llvm; 33 34 namespace { 35 class VectorLegalizer { 36 SelectionDAG& DAG; 37 const TargetLowering &TLI; 38 bool Changed; // Keep track of whether anything changed 39 40 /// For nodes that are of legal width, and that have more than one use, this 41 /// map indicates what regularized operand to use. This allows us to avoid 42 /// legalizing the same thing more than once. 43 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; 44 45 /// \brief Adds a node to the translation cache. 46 void AddLegalizedOperand(SDValue From, SDValue To) { 47 LegalizedNodes.insert(std::make_pair(From, To)); 48 // If someone requests legalization of the new node, return itself. 49 if (From != To) 50 LegalizedNodes.insert(std::make_pair(To, To)); 51 } 52 53 /// \brief Legalizes the given node. 54 SDValue LegalizeOp(SDValue Op); 55 56 /// \brief Assuming the node is legal, "legalize" the results. 57 SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); 58 59 /// \brief Implements unrolling a VSETCC. 60 SDValue UnrollVSETCC(SDValue Op); 61 62 /// \brief Implement expand-based legalization of vector operations. 63 /// 64 /// This is just a high-level routine to dispatch to specific code paths for 65 /// operations to legalize them. 66 SDValue Expand(SDValue Op); 67 68 /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if 69 /// FSUB isn't legal. 70 /// 71 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if 72 /// SINT_TO_FLOAT and SHR on vectors isn't legal. 73 SDValue ExpandUINT_TO_FLOAT(SDValue Op); 74 75 /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. 76 SDValue ExpandSEXTINREG(SDValue Op); 77 78 /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG. 79 /// 80 /// Shuffles the low lanes of the operand into place and bitcasts to the proper 81 /// type. The contents of the bits in the extended part of each element are 82 /// undef. 83 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); 84 85 /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG. 86 /// 87 /// Shuffles the low lanes of the operand into place, bitcasts to the proper 88 /// type, then shifts left and arithmetic shifts right to introduce a sign 89 /// extension. 90 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); 91 92 /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG. 93 /// 94 /// Shuffles the low lanes of the operand into place and blends zeros into 95 /// the remaining lanes, finally bitcasting to the proper type. 96 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); 97 98 /// \brief Expand bswap of vectors into a shuffle if legal. 99 SDValue ExpandBSWAP(SDValue Op); 100 101 /// \brief Implement vselect in terms of XOR, AND, OR when blend is not 102 /// supported by the target. 103 SDValue ExpandVSELECT(SDValue Op); 104 SDValue ExpandSELECT(SDValue Op); 105 SDValue ExpandLoad(SDValue Op); 106 SDValue ExpandStore(SDValue Op); 107 SDValue ExpandFNEG(SDValue Op); 108 SDValue ExpandFSUB(SDValue Op); 109 SDValue ExpandBITREVERSE(SDValue Op); 110 SDValue ExpandCTLZ(SDValue Op); 111 SDValue ExpandCTTZ_ZERO_UNDEF(SDValue Op); 112 113 /// \brief Implements vector promotion. 114 /// 115 /// This is essentially just bitcasting the operands to a different type and 116 /// bitcasting the result back to the original type. 117 SDValue Promote(SDValue Op); 118 119 /// \brief Implements [SU]INT_TO_FP vector promotion. 120 /// 121 /// This is a [zs]ext of the input operand to the next size up. 122 SDValue PromoteINT_TO_FP(SDValue Op); 123 124 /// \brief Implements FP_TO_[SU]INT vector promotion of the result type. 125 /// 126 /// It is promoted to the next size up integer type. The result is then 127 /// truncated back to the original type. 128 SDValue PromoteFP_TO_INT(SDValue Op, bool isSigned); 129 130 public: 131 /// \brief Begin legalizer the vector operations in the DAG. 132 bool Run(); 133 VectorLegalizer(SelectionDAG& dag) : 134 DAG(dag), TLI(dag.getTargetLoweringInfo()), Changed(false) {} 135 }; 136 137 bool VectorLegalizer::Run() { 138 // Before we start legalizing vector nodes, check if there are any vectors. 139 bool HasVectors = false; 140 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 141 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { 142 // Check if the values of the nodes contain vectors. We don't need to check 143 // the operands because we are going to check their values at some point. 144 for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); 145 J != E; ++J) 146 HasVectors |= J->isVector(); 147 148 // If we found a vector node we can start the legalization. 149 if (HasVectors) 150 break; 151 } 152 153 // If this basic block has no vectors then no need to legalize vectors. 154 if (!HasVectors) 155 return false; 156 157 // The legalize process is inherently a bottom-up recursive process (users 158 // legalize their uses before themselves). Given infinite stack space, we 159 // could just start legalizing on the root and traverse the whole graph. In 160 // practice however, this causes us to run out of stack space on large basic 161 // blocks. To avoid this problem, compute an ordering of the nodes where each 162 // node is only legalized after all of its operands are legalized. 163 DAG.AssignTopologicalOrder(); 164 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 165 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) 166 LegalizeOp(SDValue(&*I, 0)); 167 168 // Finally, it's possible the root changed. Get the new root. 169 SDValue OldRoot = DAG.getRoot(); 170 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 171 DAG.setRoot(LegalizedNodes[OldRoot]); 172 173 LegalizedNodes.clear(); 174 175 // Remove dead nodes now. 176 DAG.RemoveDeadNodes(); 177 178 return Changed; 179 } 180 181 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDValue Result) { 182 // Generic legalization: just pass the operand through. 183 for (unsigned i = 0, e = Op.getNode()->getNumValues(); i != e; ++i) 184 AddLegalizedOperand(Op.getValue(i), Result.getValue(i)); 185 return Result.getValue(Op.getResNo()); 186 } 187 188 SDValue VectorLegalizer::LegalizeOp(SDValue Op) { 189 // Note that LegalizeOp may be reentered even from single-use nodes, which 190 // means that we always must cache transformed nodes. 191 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 192 if (I != LegalizedNodes.end()) return I->second; 193 194 SDNode* Node = Op.getNode(); 195 196 // Legalize the operands 197 SmallVector<SDValue, 8> Ops; 198 for (const SDValue &Op : Node->op_values()) 199 Ops.push_back(LegalizeOp(Op)); 200 201 SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 0); 202 203 bool HasVectorValue = false; 204 if (Op.getOpcode() == ISD::LOAD) { 205 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 206 ISD::LoadExtType ExtType = LD->getExtensionType(); 207 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) 208 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0), 209 LD->getMemoryVT())) { 210 default: llvm_unreachable("This action is not supported yet!"); 211 case TargetLowering::Legal: 212 return TranslateLegalizeResults(Op, Result); 213 case TargetLowering::Custom: 214 if (SDValue Lowered = TLI.LowerOperation(Result, DAG)) { 215 if (Lowered == Result) 216 return TranslateLegalizeResults(Op, Lowered); 217 Changed = true; 218 if (Lowered->getNumValues() != Op->getNumValues()) { 219 // This expanded to something other than the load. Assume the 220 // lowering code took care of any chain values, and just handle the 221 // returned value. 222 assert(Result.getValue(1).use_empty() && 223 "There are still live users of the old chain!"); 224 return LegalizeOp(Lowered); 225 } 226 return TranslateLegalizeResults(Op, Lowered); 227 } 228 case TargetLowering::Expand: 229 Changed = true; 230 return LegalizeOp(ExpandLoad(Op)); 231 } 232 } else if (Op.getOpcode() == ISD::STORE) { 233 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 234 EVT StVT = ST->getMemoryVT(); 235 MVT ValVT = ST->getValue().getSimpleValueType(); 236 if (StVT.isVector() && ST->isTruncatingStore()) 237 switch (TLI.getTruncStoreAction(ValVT, StVT)) { 238 default: llvm_unreachable("This action is not supported yet!"); 239 case TargetLowering::Legal: 240 return TranslateLegalizeResults(Op, Result); 241 case TargetLowering::Custom: { 242 SDValue Lowered = TLI.LowerOperation(Result, DAG); 243 Changed = Lowered != Result; 244 return TranslateLegalizeResults(Op, Lowered); 245 } 246 case TargetLowering::Expand: 247 Changed = true; 248 return LegalizeOp(ExpandStore(Op)); 249 } 250 } else if (Op.getOpcode() == ISD::MSCATTER || Op.getOpcode() == ISD::MSTORE) 251 HasVectorValue = true; 252 253 for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end(); 254 J != E; 255 ++J) 256 HasVectorValue |= J->isVector(); 257 if (!HasVectorValue) 258 return TranslateLegalizeResults(Op, Result); 259 260 EVT QueryType; 261 switch (Op.getOpcode()) { 262 default: 263 return TranslateLegalizeResults(Op, Result); 264 case ISD::ADD: 265 case ISD::SUB: 266 case ISD::MUL: 267 case ISD::SDIV: 268 case ISD::UDIV: 269 case ISD::SREM: 270 case ISD::UREM: 271 case ISD::SDIVREM: 272 case ISD::UDIVREM: 273 case ISD::FADD: 274 case ISD::FSUB: 275 case ISD::FMUL: 276 case ISD::FDIV: 277 case ISD::FREM: 278 case ISD::AND: 279 case ISD::OR: 280 case ISD::XOR: 281 case ISD::SHL: 282 case ISD::SRA: 283 case ISD::SRL: 284 case ISD::ROTL: 285 case ISD::ROTR: 286 case ISD::BSWAP: 287 case ISD::BITREVERSE: 288 case ISD::CTLZ: 289 case ISD::CTTZ: 290 case ISD::CTLZ_ZERO_UNDEF: 291 case ISD::CTTZ_ZERO_UNDEF: 292 case ISD::CTPOP: 293 case ISD::SELECT: 294 case ISD::VSELECT: 295 case ISD::SELECT_CC: 296 case ISD::SETCC: 297 case ISD::ZERO_EXTEND: 298 case ISD::ANY_EXTEND: 299 case ISD::TRUNCATE: 300 case ISD::SIGN_EXTEND: 301 case ISD::FP_TO_SINT: 302 case ISD::FP_TO_UINT: 303 case ISD::FNEG: 304 case ISD::FABS: 305 case ISD::FMINNUM: 306 case ISD::FMAXNUM: 307 case ISD::FMINNAN: 308 case ISD::FMAXNAN: 309 case ISD::FCOPYSIGN: 310 case ISD::FSQRT: 311 case ISD::FSIN: 312 case ISD::FCOS: 313 case ISD::FPOWI: 314 case ISD::FPOW: 315 case ISD::FLOG: 316 case ISD::FLOG2: 317 case ISD::FLOG10: 318 case ISD::FEXP: 319 case ISD::FEXP2: 320 case ISD::FCEIL: 321 case ISD::FTRUNC: 322 case ISD::FRINT: 323 case ISD::FNEARBYINT: 324 case ISD::FROUND: 325 case ISD::FFLOOR: 326 case ISD::FP_ROUND: 327 case ISD::FP_EXTEND: 328 case ISD::FMA: 329 case ISD::SIGN_EXTEND_INREG: 330 case ISD::ANY_EXTEND_VECTOR_INREG: 331 case ISD::SIGN_EXTEND_VECTOR_INREG: 332 case ISD::ZERO_EXTEND_VECTOR_INREG: 333 case ISD::SMIN: 334 case ISD::SMAX: 335 case ISD::UMIN: 336 case ISD::UMAX: 337 case ISD::SMUL_LOHI: 338 case ISD::UMUL_LOHI: 339 QueryType = Node->getValueType(0); 340 break; 341 case ISD::FP_ROUND_INREG: 342 QueryType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 343 break; 344 case ISD::SINT_TO_FP: 345 case ISD::UINT_TO_FP: 346 QueryType = Node->getOperand(0).getValueType(); 347 break; 348 case ISD::MSCATTER: 349 QueryType = cast<MaskedScatterSDNode>(Node)->getValue().getValueType(); 350 break; 351 case ISD::MSTORE: 352 QueryType = cast<MaskedStoreSDNode>(Node)->getValue().getValueType(); 353 break; 354 } 355 356 switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) { 357 default: llvm_unreachable("This action is not supported yet!"); 358 case TargetLowering::Promote: 359 Result = Promote(Op); 360 Changed = true; 361 break; 362 case TargetLowering::Legal: 363 break; 364 case TargetLowering::Custom: { 365 if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) { 366 Result = Tmp1; 367 break; 368 } 369 LLVM_FALLTHROUGH; 370 } 371 case TargetLowering::Expand: 372 Result = Expand(Op); 373 } 374 375 // Make sure that the generated code is itself legal. 376 if (Result != Op) { 377 Result = LegalizeOp(Result); 378 Changed = true; 379 } 380 381 // Note that LegalizeOp may be reentered even from single-use nodes, which 382 // means that we always must cache transformed nodes. 383 AddLegalizedOperand(Op, Result); 384 return Result; 385 } 386 387 SDValue VectorLegalizer::Promote(SDValue Op) { 388 // For a few operations there is a specific concept for promotion based on 389 // the operand's type. 390 switch (Op.getOpcode()) { 391 case ISD::SINT_TO_FP: 392 case ISD::UINT_TO_FP: 393 // "Promote" the operation by extending the operand. 394 return PromoteINT_TO_FP(Op); 395 case ISD::FP_TO_UINT: 396 case ISD::FP_TO_SINT: 397 // Promote the operation by extending the operand. 398 return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT); 399 } 400 401 // There are currently two cases of vector promotion: 402 // 1) Bitcasting a vector of integers to a different type to a vector of the 403 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64. 404 // 2) Extending a vector of floats to a vector of the same number of larger 405 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32. 406 MVT VT = Op.getSimpleValueType(); 407 assert(Op.getNode()->getNumValues() == 1 && 408 "Can't promote a vector with multiple results!"); 409 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 410 SDLoc dl(Op); 411 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 412 413 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 414 if (Op.getOperand(j).getValueType().isVector()) 415 if (Op.getOperand(j) 416 .getValueType() 417 .getVectorElementType() 418 .isFloatingPoint() && 419 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()) 420 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j)); 421 else 422 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j)); 423 else 424 Operands[j] = Op.getOperand(j); 425 } 426 427 Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands, Op.getNode()->getFlags()); 428 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) || 429 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() && 430 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())) 431 return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0, dl)); 432 else 433 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 434 } 435 436 SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) { 437 // INT_TO_FP operations may require the input operand be promoted even 438 // when the type is otherwise legal. 439 EVT VT = Op.getOperand(0).getValueType(); 440 assert(Op.getNode()->getNumValues() == 1 && 441 "Can't promote a vector with multiple results!"); 442 443 // Normal getTypeToPromoteTo() doesn't work here, as that will promote 444 // by widening the vector w/ the same element width and twice the number 445 // of elements. We want the other way around, the same number of elements, 446 // each twice the width. 447 // 448 // Increase the bitwidth of the element to the next pow-of-two 449 // (which is greater than 8 bits). 450 451 EVT NVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 452 assert(NVT.isSimple() && "Promoting to a non-simple vector type!"); 453 SDLoc dl(Op); 454 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 455 456 unsigned Opc = Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : 457 ISD::SIGN_EXTEND; 458 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 459 if (Op.getOperand(j).getValueType().isVector()) 460 Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j)); 461 else 462 Operands[j] = Op.getOperand(j); 463 } 464 465 return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands); 466 } 467 468 // For FP_TO_INT we promote the result type to a vector type with wider 469 // elements and then truncate the result. This is different from the default 470 // PromoteVector which uses bitcast to promote thus assumning that the 471 // promoted vector type has the same overall size. 472 SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) { 473 assert(Op.getNode()->getNumValues() == 1 && 474 "Can't promote a vector with multiple results!"); 475 EVT VT = Op.getValueType(); 476 477 EVT NewVT; 478 unsigned NewOpc; 479 while (1) { 480 NewVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 481 assert(NewVT.isSimple() && "Promoting to a non-simple vector type!"); 482 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewVT)) { 483 NewOpc = ISD::FP_TO_SINT; 484 break; 485 } 486 if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewVT)) { 487 NewOpc = ISD::FP_TO_UINT; 488 break; 489 } 490 } 491 492 SDLoc loc(Op); 493 SDValue promoted = DAG.getNode(NewOpc, SDLoc(Op), NewVT, Op.getOperand(0)); 494 return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, promoted); 495 } 496 497 498 SDValue VectorLegalizer::ExpandLoad(SDValue Op) { 499 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 500 501 EVT SrcVT = LD->getMemoryVT(); 502 EVT SrcEltVT = SrcVT.getScalarType(); 503 unsigned NumElem = SrcVT.getVectorNumElements(); 504 505 506 SDValue NewChain; 507 SDValue Value; 508 if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { 509 SDLoc dl(Op); 510 511 SmallVector<SDValue, 8> Vals; 512 SmallVector<SDValue, 8> LoadChains; 513 514 EVT DstEltVT = LD->getValueType(0).getScalarType(); 515 SDValue Chain = LD->getChain(); 516 SDValue BasePTR = LD->getBasePtr(); 517 ISD::LoadExtType ExtType = LD->getExtensionType(); 518 519 // When elements in a vector is not byte-addressable, we cannot directly 520 // load each element by advancing pointer, which could only address bytes. 521 // Instead, we load all significant words, mask bits off, and concatenate 522 // them to form each element. Finally, they are extended to destination 523 // scalar type to build the destination vector. 524 EVT WideVT = TLI.getPointerTy(DAG.getDataLayout()); 525 526 assert(WideVT.isRound() && 527 "Could not handle the sophisticated case when the widest integer is" 528 " not power of 2."); 529 assert(WideVT.bitsGE(SrcEltVT) && 530 "Type is not legalized?"); 531 532 unsigned WideBytes = WideVT.getStoreSize(); 533 unsigned Offset = 0; 534 unsigned RemainingBytes = SrcVT.getStoreSize(); 535 SmallVector<SDValue, 8> LoadVals; 536 537 while (RemainingBytes > 0) { 538 SDValue ScalarLoad; 539 unsigned LoadBytes = WideBytes; 540 541 if (RemainingBytes >= LoadBytes) { 542 ScalarLoad = 543 DAG.getLoad(WideVT, dl, Chain, BasePTR, 544 LD->getPointerInfo().getWithOffset(Offset), 545 MinAlign(LD->getAlignment(), Offset), 546 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 547 } else { 548 EVT LoadVT = WideVT; 549 while (RemainingBytes < LoadBytes) { 550 LoadBytes >>= 1; // Reduce the load size by half. 551 LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3); 552 } 553 ScalarLoad = 554 DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR, 555 LD->getPointerInfo().getWithOffset(Offset), LoadVT, 556 MinAlign(LD->getAlignment(), Offset), 557 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 558 } 559 560 RemainingBytes -= LoadBytes; 561 Offset += LoadBytes; 562 BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR, 563 DAG.getConstant(LoadBytes, dl, 564 BasePTR.getValueType())); 565 566 LoadVals.push_back(ScalarLoad.getValue(0)); 567 LoadChains.push_back(ScalarLoad.getValue(1)); 568 } 569 570 // Extract bits, pack and extend/trunc them into destination type. 571 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 572 SDValue SrcEltBitMask = DAG.getConstant((1U << SrcEltBits) - 1, dl, WideVT); 573 574 unsigned BitOffset = 0; 575 unsigned WideIdx = 0; 576 unsigned WideBits = WideVT.getSizeInBits(); 577 578 for (unsigned Idx = 0; Idx != NumElem; ++Idx) { 579 SDValue Lo, Hi, ShAmt; 580 581 if (BitOffset < WideBits) { 582 ShAmt = DAG.getConstant( 583 BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 584 Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt); 585 Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask); 586 } 587 588 BitOffset += SrcEltBits; 589 if (BitOffset >= WideBits) { 590 WideIdx++; 591 BitOffset -= WideBits; 592 if (BitOffset > 0) { 593 ShAmt = DAG.getConstant( 594 SrcEltBits - BitOffset, dl, 595 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 596 Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt); 597 Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask); 598 } 599 } 600 601 if (Hi.getNode()) 602 Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi); 603 604 switch (ExtType) { 605 default: llvm_unreachable("Unknown extended-load op!"); 606 case ISD::EXTLOAD: 607 Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT); 608 break; 609 case ISD::ZEXTLOAD: 610 Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT); 611 break; 612 case ISD::SEXTLOAD: 613 ShAmt = 614 DAG.getConstant(WideBits - SrcEltBits, dl, 615 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 616 Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt); 617 Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt); 618 Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT); 619 break; 620 } 621 Vals.push_back(Lo); 622 } 623 624 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 625 Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals); 626 } else { 627 SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG); 628 629 NewChain = Scalarized.getValue(1); 630 Value = Scalarized.getValue(0); 631 } 632 633 AddLegalizedOperand(Op.getValue(0), Value); 634 AddLegalizedOperand(Op.getValue(1), NewChain); 635 636 return (Op.getResNo() ? NewChain : Value); 637 } 638 639 SDValue VectorLegalizer::ExpandStore(SDValue Op) { 640 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 641 642 EVT StVT = ST->getMemoryVT(); 643 EVT MemSclVT = StVT.getScalarType(); 644 unsigned ScalarSize = MemSclVT.getSizeInBits(); 645 646 // Round odd types to the next pow of two. 647 if (!isPowerOf2_32(ScalarSize)) { 648 // FIXME: This is completely broken and inconsistent with ExpandLoad 649 // handling. 650 651 // For sub-byte element sizes, this ends up with 0 stride between elements, 652 // so the same element just gets re-written to the same location. There seem 653 // to be tests explicitly testing for this broken behavior though. tests 654 // for this broken behavior. 655 656 LLVMContext &Ctx = *DAG.getContext(); 657 658 EVT NewMemVT 659 = EVT::getVectorVT(Ctx, 660 MemSclVT.getIntegerVT(Ctx, NextPowerOf2(ScalarSize)), 661 StVT.getVectorNumElements()); 662 663 SDValue NewVectorStore = DAG.getTruncStore( 664 ST->getChain(), SDLoc(Op), ST->getValue(), ST->getBasePtr(), 665 ST->getPointerInfo(), NewMemVT, ST->getAlignment(), 666 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 667 ST = cast<StoreSDNode>(NewVectorStore.getNode()); 668 } 669 670 SDValue TF = TLI.scalarizeVectorStore(ST, DAG); 671 AddLegalizedOperand(Op, TF); 672 return TF; 673 } 674 675 SDValue VectorLegalizer::Expand(SDValue Op) { 676 switch (Op->getOpcode()) { 677 case ISD::SIGN_EXTEND_INREG: 678 return ExpandSEXTINREG(Op); 679 case ISD::ANY_EXTEND_VECTOR_INREG: 680 return ExpandANY_EXTEND_VECTOR_INREG(Op); 681 case ISD::SIGN_EXTEND_VECTOR_INREG: 682 return ExpandSIGN_EXTEND_VECTOR_INREG(Op); 683 case ISD::ZERO_EXTEND_VECTOR_INREG: 684 return ExpandZERO_EXTEND_VECTOR_INREG(Op); 685 case ISD::BSWAP: 686 return ExpandBSWAP(Op); 687 case ISD::VSELECT: 688 return ExpandVSELECT(Op); 689 case ISD::SELECT: 690 return ExpandSELECT(Op); 691 case ISD::UINT_TO_FP: 692 return ExpandUINT_TO_FLOAT(Op); 693 case ISD::FNEG: 694 return ExpandFNEG(Op); 695 case ISD::FSUB: 696 return ExpandFSUB(Op); 697 case ISD::SETCC: 698 return UnrollVSETCC(Op); 699 case ISD::BITREVERSE: 700 return ExpandBITREVERSE(Op); 701 case ISD::CTLZ: 702 case ISD::CTLZ_ZERO_UNDEF: 703 return ExpandCTLZ(Op); 704 case ISD::CTTZ_ZERO_UNDEF: 705 return ExpandCTTZ_ZERO_UNDEF(Op); 706 default: 707 return DAG.UnrollVectorOp(Op.getNode()); 708 } 709 } 710 711 SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { 712 // Lower a select instruction where the condition is a scalar and the 713 // operands are vectors. Lower this select to VSELECT and implement it 714 // using XOR AND OR. The selector bit is broadcasted. 715 EVT VT = Op.getValueType(); 716 SDLoc DL(Op); 717 718 SDValue Mask = Op.getOperand(0); 719 SDValue Op1 = Op.getOperand(1); 720 SDValue Op2 = Op.getOperand(2); 721 722 assert(VT.isVector() && !Mask.getValueType().isVector() 723 && Op1.getValueType() == Op2.getValueType() && "Invalid type"); 724 725 // If we can't even use the basic vector operations of 726 // AND,OR,XOR, we will have to scalarize the op. 727 // Notice that the operation may be 'promoted' which means that it is 728 // 'bitcasted' to another type which is handled. 729 // Also, we need to be able to construct a splat vector using BUILD_VECTOR. 730 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 731 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 732 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 733 TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand) 734 return DAG.UnrollVectorOp(Op.getNode()); 735 736 // Generate a mask operand. 737 EVT MaskTy = VT.changeVectorElementTypeToInteger(); 738 739 // What is the size of each element in the vector mask. 740 EVT BitTy = MaskTy.getScalarType(); 741 742 Mask = DAG.getSelect(DL, BitTy, Mask, 743 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, 744 BitTy), 745 DAG.getConstant(0, DL, BitTy)); 746 747 // Broadcast the mask so that the entire vector is all-one or all zero. 748 Mask = DAG.getSplatBuildVector(MaskTy, DL, Mask); 749 750 // Bitcast the operands to be the same type as the mask. 751 // This is needed when we select between FP types because 752 // the mask is a vector of integers. 753 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1); 754 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2); 755 756 SDValue AllOnes = DAG.getConstant( 757 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy); 758 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes); 759 760 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask); 761 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask); 762 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2); 763 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 764 } 765 766 SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) { 767 EVT VT = Op.getValueType(); 768 769 // Make sure that the SRA and SHL instructions are available. 770 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand || 771 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand) 772 return DAG.UnrollVectorOp(Op.getNode()); 773 774 SDLoc DL(Op); 775 EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT(); 776 777 unsigned BW = VT.getScalarSizeInBits(); 778 unsigned OrigBW = OrigTy.getScalarSizeInBits(); 779 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT); 780 781 Op = Op.getOperand(0); 782 Op = DAG.getNode(ISD::SHL, DL, VT, Op, ShiftSz); 783 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz); 784 } 785 786 // Generically expand a vector anyext in register to a shuffle of the relevant 787 // lanes into the appropriate locations, with other lanes left undef. 788 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) { 789 SDLoc DL(Op); 790 EVT VT = Op.getValueType(); 791 int NumElements = VT.getVectorNumElements(); 792 SDValue Src = Op.getOperand(0); 793 EVT SrcVT = Src.getValueType(); 794 int NumSrcElements = SrcVT.getVectorNumElements(); 795 796 // Build a base mask of undef shuffles. 797 SmallVector<int, 16> ShuffleMask; 798 ShuffleMask.resize(NumSrcElements, -1); 799 800 // Place the extended lanes into the correct locations. 801 int ExtLaneScale = NumSrcElements / NumElements; 802 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 803 for (int i = 0; i < NumElements; ++i) 804 ShuffleMask[i * ExtLaneScale + EndianOffset] = i; 805 806 return DAG.getNode( 807 ISD::BITCAST, DL, VT, 808 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask)); 809 } 810 811 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) { 812 SDLoc DL(Op); 813 EVT VT = Op.getValueType(); 814 SDValue Src = Op.getOperand(0); 815 EVT SrcVT = Src.getValueType(); 816 817 // First build an any-extend node which can be legalized above when we 818 // recurse through it. 819 Op = DAG.getAnyExtendVectorInReg(Src, DL, VT); 820 821 // Now we need sign extend. Do this by shifting the elements. Even if these 822 // aren't legal operations, they have a better chance of being legalized 823 // without full scalarization than the sign extension does. 824 unsigned EltWidth = VT.getScalarSizeInBits(); 825 unsigned SrcEltWidth = SrcVT.getScalarSizeInBits(); 826 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT); 827 return DAG.getNode(ISD::SRA, DL, VT, 828 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount), 829 ShiftAmount); 830 } 831 832 // Generically expand a vector zext in register to a shuffle of the relevant 833 // lanes into the appropriate locations, a blend of zero into the high bits, 834 // and a bitcast to the wider element type. 835 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) { 836 SDLoc DL(Op); 837 EVT VT = Op.getValueType(); 838 int NumElements = VT.getVectorNumElements(); 839 SDValue Src = Op.getOperand(0); 840 EVT SrcVT = Src.getValueType(); 841 int NumSrcElements = SrcVT.getVectorNumElements(); 842 843 // Build up a zero vector to blend into this one. 844 SDValue Zero = DAG.getConstant(0, DL, SrcVT); 845 846 // Shuffle the incoming lanes into the correct position, and pull all other 847 // lanes from the zero vector. 848 SmallVector<int, 16> ShuffleMask; 849 ShuffleMask.reserve(NumSrcElements); 850 for (int i = 0; i < NumSrcElements; ++i) 851 ShuffleMask.push_back(i); 852 853 int ExtLaneScale = NumSrcElements / NumElements; 854 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 855 for (int i = 0; i < NumElements; ++i) 856 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i; 857 858 return DAG.getNode(ISD::BITCAST, DL, VT, 859 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask)); 860 } 861 862 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) { 863 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; 864 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) 865 for (int J = ScalarSizeInBytes - 1; J >= 0; --J) 866 ShuffleMask.push_back((I * ScalarSizeInBytes) + J); 867 } 868 869 SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) { 870 EVT VT = Op.getValueType(); 871 872 // Generate a byte wise shuffle mask for the BSWAP. 873 SmallVector<int, 16> ShuffleMask; 874 createBSWAPShuffleMask(VT, ShuffleMask); 875 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size()); 876 877 // Only emit a shuffle if the mask is legal. 878 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) 879 return DAG.UnrollVectorOp(Op.getNode()); 880 881 SDLoc DL(Op); 882 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 883 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask); 884 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 885 } 886 887 SDValue VectorLegalizer::ExpandBITREVERSE(SDValue Op) { 888 EVT VT = Op.getValueType(); 889 890 // If we have the scalar operation, it's probably cheaper to unroll it. 891 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) 892 return DAG.UnrollVectorOp(Op.getNode()); 893 894 // If the vector element width is a whole number of bytes, test if its legal 895 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte 896 // vector. This greatly reduces the number of bit shifts necessary. 897 unsigned ScalarSizeInBits = VT.getScalarSizeInBits(); 898 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) { 899 SmallVector<int, 16> BSWAPMask; 900 createBSWAPShuffleMask(VT, BSWAPMask); 901 902 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size()); 903 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) && 904 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) || 905 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) && 906 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) && 907 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) && 908 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) { 909 SDLoc DL(Op); 910 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 911 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), 912 BSWAPMask); 913 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op); 914 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 915 } 916 } 917 918 // If we have the appropriate vector bit operations, it is better to use them 919 // than unrolling and expanding each component. 920 if (!TLI.isOperationLegalOrCustom(ISD::SHL, VT) || 921 !TLI.isOperationLegalOrCustom(ISD::SRL, VT) || 922 !TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 923 !TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) 924 return DAG.UnrollVectorOp(Op.getNode()); 925 926 // Let LegalizeDAG handle this later. 927 return Op; 928 } 929 930 SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { 931 // Implement VSELECT in terms of XOR, AND, OR 932 // on platforms which do not support blend natively. 933 SDLoc DL(Op); 934 935 SDValue Mask = Op.getOperand(0); 936 SDValue Op1 = Op.getOperand(1); 937 SDValue Op2 = Op.getOperand(2); 938 939 EVT VT = Mask.getValueType(); 940 941 // If we can't even use the basic vector operations of 942 // AND,OR,XOR, we will have to scalarize the op. 943 // Notice that the operation may be 'promoted' which means that it is 944 // 'bitcasted' to another type which is handled. 945 // This operation also isn't safe with AND, OR, XOR when the boolean 946 // type is 0/1 as we need an all ones vector constant to mask with. 947 // FIXME: Sign extend 1 to all ones if thats legal on the target. 948 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 949 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 950 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 951 TLI.getBooleanContents(Op1.getValueType()) != 952 TargetLowering::ZeroOrNegativeOneBooleanContent) 953 return DAG.UnrollVectorOp(Op.getNode()); 954 955 // If the mask and the type are different sizes, unroll the vector op. This 956 // can occur when getSetCCResultType returns something that is different in 957 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8. 958 if (VT.getSizeInBits() != Op1.getValueSizeInBits()) 959 return DAG.UnrollVectorOp(Op.getNode()); 960 961 // Bitcast the operands to be the same type as the mask. 962 // This is needed when we select between FP types because 963 // the mask is a vector of integers. 964 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1); 965 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2); 966 967 SDValue AllOnes = DAG.getConstant( 968 APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT); 969 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes); 970 971 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask); 972 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask); 973 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2); 974 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 975 } 976 977 SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) { 978 EVT VT = Op.getOperand(0).getValueType(); 979 SDLoc DL(Op); 980 981 // Make sure that the SINT_TO_FP and SRL instructions are available. 982 if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand || 983 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) 984 return DAG.UnrollVectorOp(Op.getNode()); 985 986 unsigned BW = VT.getScalarSizeInBits(); 987 assert((BW == 64 || BW == 32) && 988 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide"); 989 990 SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT); 991 992 // Constants to clear the upper part of the word. 993 // Notice that we can also use SHL+SHR, but using a constant is slightly 994 // faster on x86. 995 uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF; 996 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT); 997 998 // Two to the power of half-word-size. 999 SDValue TWOHW = DAG.getConstantFP(1 << (BW / 2), DL, Op.getValueType()); 1000 1001 // Clear upper part of LO, lower HI 1002 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Op.getOperand(0), HalfWord); 1003 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), HalfWordMask); 1004 1005 // Convert hi and lo to floats 1006 // Convert the hi part back to the upper values 1007 // TODO: Can any fast-math-flags be set on these nodes? 1008 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), HI); 1009 fHI = DAG.getNode(ISD::FMUL, DL, Op.getValueType(), fHI, TWOHW); 1010 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), LO); 1011 1012 // Add the two halves 1013 return DAG.getNode(ISD::FADD, DL, Op.getValueType(), fHI, fLO); 1014 } 1015 1016 SDValue VectorLegalizer::ExpandFNEG(SDValue Op) { 1017 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) { 1018 SDLoc DL(Op); 1019 SDValue Zero = DAG.getConstantFP(-0.0, DL, Op.getValueType()); 1020 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB. 1021 return DAG.getNode(ISD::FSUB, DL, Op.getValueType(), 1022 Zero, Op.getOperand(0)); 1023 } 1024 return DAG.UnrollVectorOp(Op.getNode()); 1025 } 1026 1027 SDValue VectorLegalizer::ExpandFSUB(SDValue Op) { 1028 // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal, 1029 // we can defer this to operation legalization where it will be lowered as 1030 // a+(-b). 1031 EVT VT = Op.getValueType(); 1032 if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) && 1033 TLI.isOperationLegalOrCustom(ISD::FADD, VT)) 1034 return Op; // Defer to LegalizeDAG 1035 1036 return DAG.UnrollVectorOp(Op.getNode()); 1037 } 1038 1039 SDValue VectorLegalizer::ExpandCTLZ(SDValue Op) { 1040 EVT VT = Op.getValueType(); 1041 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 1042 1043 // If the non-ZERO_UNDEF version is supported we can use that instead. 1044 if (Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF && 1045 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) { 1046 SDLoc DL(Op); 1047 return DAG.getNode(ISD::CTLZ, DL, Op.getValueType(), Op.getOperand(0)); 1048 } 1049 1050 // If CTPOP is available we can lower with a CTPOP based method: 1051 // u16 ctlz(u16 x) { 1052 // x |= (x >> 1); 1053 // x |= (x >> 2); 1054 // x |= (x >> 4); 1055 // x |= (x >> 8); 1056 // return ctpop(~x); 1057 // } 1058 // Ref: "Hacker's Delight" by Henry Warren 1059 if (isPowerOf2_32(NumBitsPerElt) && 1060 TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 1061 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 1062 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT) && 1063 TLI.isOperationLegalOrCustomOrPromote(ISD::XOR, VT)) { 1064 SDLoc DL(Op); 1065 SDValue Res = Op.getOperand(0); 1066 EVT ShiftTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 1067 1068 for (unsigned i = 1; i != NumBitsPerElt; i *= 2) 1069 Res = DAG.getNode( 1070 ISD::OR, DL, VT, Res, 1071 DAG.getNode(ISD::SRL, DL, VT, Res, DAG.getConstant(i, DL, ShiftTy))); 1072 1073 Res = DAG.getNOT(DL, Res, VT); 1074 return DAG.getNode(ISD::CTPOP, DL, VT, Res); 1075 } 1076 1077 // Otherwise go ahead and unroll. 1078 return DAG.UnrollVectorOp(Op.getNode()); 1079 } 1080 1081 SDValue VectorLegalizer::ExpandCTTZ_ZERO_UNDEF(SDValue Op) { 1082 // If the non-ZERO_UNDEF version is supported we can use that instead. 1083 if (TLI.isOperationLegalOrCustom(ISD::CTTZ, Op.getValueType())) { 1084 SDLoc DL(Op); 1085 return DAG.getNode(ISD::CTTZ, DL, Op.getValueType(), Op.getOperand(0)); 1086 } 1087 1088 // Otherwise go ahead and unroll. 1089 return DAG.UnrollVectorOp(Op.getNode()); 1090 } 1091 1092 SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) { 1093 EVT VT = Op.getValueType(); 1094 unsigned NumElems = VT.getVectorNumElements(); 1095 EVT EltVT = VT.getVectorElementType(); 1096 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2); 1097 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 1098 SDLoc dl(Op); 1099 SmallVector<SDValue, 8> Ops(NumElems); 1100 for (unsigned i = 0; i < NumElems; ++i) { 1101 SDValue LHSElem = DAG.getNode( 1102 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 1103 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1104 SDValue RHSElem = DAG.getNode( 1105 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 1106 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1107 Ops[i] = DAG.getNode(ISD::SETCC, dl, 1108 TLI.getSetCCResultType(DAG.getDataLayout(), 1109 *DAG.getContext(), TmpEltVT), 1110 LHSElem, RHSElem, CC); 1111 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i], 1112 DAG.getConstant(APInt::getAllOnesValue 1113 (EltVT.getSizeInBits()), dl, EltVT), 1114 DAG.getConstant(0, dl, EltVT)); 1115 } 1116 return DAG.getBuildVector(VT, dl, Ops); 1117 } 1118 1119 } 1120 1121 bool SelectionDAG::LegalizeVectors() { 1122 return VectorLegalizer(*this).Run(); 1123 } 1124