1 //===- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SelectionDAG::LegalizeVectors method. 10 // 11 // The vector legalizer looks for vector operations which might need to be 12 // scalarized and legalizes them. This is a separate step from Legalize because 13 // scalarizing can introduce illegal types. For example, suppose we have an 14 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition 15 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the 16 // operation, which introduces nodes with the illegal type i64 which must be 17 // expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC; 18 // the operation must be unrolled, which introduces nodes with the illegal 19 // type i8 which must be promoted. 20 // 21 // This does not legalize vector manipulations like ISD::BUILD_VECTOR, 22 // or operations that happen to take a vector which are custom-lowered; 23 // the legalization for such operations never produces nodes 24 // with illegal types, so it's okay to put off legalizing them until 25 // SelectionDAG::Legalize runs. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "llvm/ADT/APInt.h" 30 #include "llvm/ADT/DenseMap.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/MachineMemOperand.h" 34 #include "llvm/CodeGen/SelectionDAG.h" 35 #include "llvm/CodeGen/SelectionDAGNodes.h" 36 #include "llvm/CodeGen/TargetLowering.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Compiler.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/MachineValueType.h" 44 #include "llvm/Support/MathExtras.h" 45 #include <cassert> 46 #include <cstdint> 47 #include <iterator> 48 #include <utility> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "legalizevectorops" 53 54 namespace { 55 56 class VectorLegalizer { 57 SelectionDAG& DAG; 58 const TargetLowering &TLI; 59 bool Changed = false; // Keep track of whether anything changed 60 61 /// For nodes that are of legal width, and that have more than one use, this 62 /// map indicates what regularized operand to use. This allows us to avoid 63 /// legalizing the same thing more than once. 64 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; 65 66 /// Adds a node to the translation cache. 67 void AddLegalizedOperand(SDValue From, SDValue To) { 68 LegalizedNodes.insert(std::make_pair(From, To)); 69 // If someone requests legalization of the new node, return itself. 70 if (From != To) 71 LegalizedNodes.insert(std::make_pair(To, To)); 72 } 73 74 /// Legalizes the given node. 75 SDValue LegalizeOp(SDValue Op); 76 77 /// Assuming the node is legal, "legalize" the results. 78 SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); 79 80 /// Implements unrolling a VSETCC. 81 SDValue UnrollVSETCC(SDValue Op); 82 83 /// Implement expand-based legalization of vector operations. 84 /// 85 /// This is just a high-level routine to dispatch to specific code paths for 86 /// operations to legalize them. 87 SDValue Expand(SDValue Op); 88 89 /// Implements expansion for FP_TO_UINT; falls back to UnrollVectorOp if 90 /// FP_TO_SINT isn't legal. 91 SDValue ExpandFP_TO_UINT(SDValue Op); 92 93 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if 94 /// SINT_TO_FLOAT and SHR on vectors isn't legal. 95 SDValue ExpandUINT_TO_FLOAT(SDValue Op); 96 97 /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. 98 SDValue ExpandSEXTINREG(SDValue Op); 99 100 /// Implement expansion for ANY_EXTEND_VECTOR_INREG. 101 /// 102 /// Shuffles the low lanes of the operand into place and bitcasts to the proper 103 /// type. The contents of the bits in the extended part of each element are 104 /// undef. 105 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); 106 107 /// Implement expansion for SIGN_EXTEND_VECTOR_INREG. 108 /// 109 /// Shuffles the low lanes of the operand into place, bitcasts to the proper 110 /// type, then shifts left and arithmetic shifts right to introduce a sign 111 /// extension. 112 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); 113 114 /// Implement expansion for ZERO_EXTEND_VECTOR_INREG. 115 /// 116 /// Shuffles the low lanes of the operand into place and blends zeros into 117 /// the remaining lanes, finally bitcasting to the proper type. 118 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); 119 120 /// Implement expand-based legalization of ABS vector operations. 121 /// If following expanding is legal/custom then do it: 122 /// (ABS x) --> (XOR (ADD x, (SRA x, sizeof(x)-1)), (SRA x, sizeof(x)-1)) 123 /// else unroll the operation. 124 SDValue ExpandABS(SDValue Op); 125 126 /// Expand bswap of vectors into a shuffle if legal. 127 SDValue ExpandBSWAP(SDValue Op); 128 129 /// Implement vselect in terms of XOR, AND, OR when blend is not 130 /// supported by the target. 131 SDValue ExpandVSELECT(SDValue Op); 132 SDValue ExpandSELECT(SDValue Op); 133 std::pair<SDValue, SDValue> ExpandLoad(SDValue Op); 134 SDValue ExpandStore(SDValue Op); 135 SDValue ExpandFNEG(SDValue Op); 136 SDValue ExpandFSUB(SDValue Op); 137 SDValue ExpandBITREVERSE(SDValue Op); 138 SDValue ExpandCTPOP(SDValue Op); 139 SDValue ExpandCTLZ(SDValue Op); 140 SDValue ExpandCTTZ(SDValue Op); 141 SDValue ExpandFunnelShift(SDValue Op); 142 SDValue ExpandROT(SDValue Op); 143 SDValue ExpandFMINNUM_FMAXNUM(SDValue Op); 144 SDValue ExpandUADDSUBO(SDValue Op); 145 SDValue ExpandSADDSUBO(SDValue Op); 146 SDValue ExpandMULO(SDValue Op); 147 SDValue ExpandAddSubSat(SDValue Op); 148 SDValue ExpandFixedPointMul(SDValue Op); 149 SDValue ExpandFixedPointDiv(SDValue Op); 150 SDValue ExpandStrictFPOp(SDValue Op); 151 152 SDValue UnrollStrictFPOp(SDValue Op); 153 154 /// Implements vector promotion. 155 /// 156 /// This is essentially just bitcasting the operands to a different type and 157 /// bitcasting the result back to the original type. 158 SDValue Promote(SDValue Op); 159 160 /// Implements [SU]INT_TO_FP vector promotion. 161 /// 162 /// This is a [zs]ext of the input operand to a larger integer type. 163 SDValue PromoteINT_TO_FP(SDValue Op); 164 165 /// Implements FP_TO_[SU]INT vector promotion of the result type. 166 /// 167 /// It is promoted to a larger integer type. The result is then 168 /// truncated back to the original type. 169 SDValue PromoteFP_TO_INT(SDValue Op); 170 171 public: 172 VectorLegalizer(SelectionDAG& dag) : 173 DAG(dag), TLI(dag.getTargetLoweringInfo()) {} 174 175 /// Begin legalizer the vector operations in the DAG. 176 bool Run(); 177 }; 178 179 } // end anonymous namespace 180 181 bool VectorLegalizer::Run() { 182 // Before we start legalizing vector nodes, check if there are any vectors. 183 bool HasVectors = false; 184 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 185 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { 186 // Check if the values of the nodes contain vectors. We don't need to check 187 // the operands because we are going to check their values at some point. 188 for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); 189 J != E; ++J) 190 HasVectors |= J->isVector(); 191 192 // If we found a vector node we can start the legalization. 193 if (HasVectors) 194 break; 195 } 196 197 // If this basic block has no vectors then no need to legalize vectors. 198 if (!HasVectors) 199 return false; 200 201 // The legalize process is inherently a bottom-up recursive process (users 202 // legalize their uses before themselves). Given infinite stack space, we 203 // could just start legalizing on the root and traverse the whole graph. In 204 // practice however, this causes us to run out of stack space on large basic 205 // blocks. To avoid this problem, compute an ordering of the nodes where each 206 // node is only legalized after all of its operands are legalized. 207 DAG.AssignTopologicalOrder(); 208 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 209 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) 210 LegalizeOp(SDValue(&*I, 0)); 211 212 // Finally, it's possible the root changed. Get the new root. 213 SDValue OldRoot = DAG.getRoot(); 214 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 215 DAG.setRoot(LegalizedNodes[OldRoot]); 216 217 LegalizedNodes.clear(); 218 219 // Remove dead nodes now. 220 DAG.RemoveDeadNodes(); 221 222 return Changed; 223 } 224 225 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDValue Result) { 226 // Generic legalization: just pass the operand through. 227 for (unsigned i = 0, e = Op.getNode()->getNumValues(); i != e; ++i) 228 AddLegalizedOperand(Op.getValue(i), Result.getValue(i)); 229 return Result.getValue(Op.getResNo()); 230 } 231 232 SDValue VectorLegalizer::LegalizeOp(SDValue Op) { 233 // Note that LegalizeOp may be reentered even from single-use nodes, which 234 // means that we always must cache transformed nodes. 235 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 236 if (I != LegalizedNodes.end()) return I->second; 237 238 SDNode* Node = Op.getNode(); 239 240 // Legalize the operands 241 SmallVector<SDValue, 8> Ops; 242 for (const SDValue &Op : Node->op_values()) 243 Ops.push_back(LegalizeOp(Op)); 244 245 SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 246 Op.getResNo()); 247 248 if (Op.getOpcode() == ISD::LOAD) { 249 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 250 ISD::LoadExtType ExtType = LD->getExtensionType(); 251 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) { 252 LLVM_DEBUG(dbgs() << "\nLegalizing extending vector load: "; 253 Node->dump(&DAG)); 254 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0), 255 LD->getMemoryVT())) { 256 default: llvm_unreachable("This action is not supported yet!"); 257 case TargetLowering::Legal: 258 return TranslateLegalizeResults(Op, Result); 259 case TargetLowering::Custom: 260 if (SDValue Lowered = TLI.LowerOperation(Result, DAG)) { 261 assert(Lowered->getNumValues() == Op->getNumValues() && 262 "Unexpected number of results"); 263 if (Lowered != Result) { 264 // Make sure the new code is also legal. 265 Lowered = LegalizeOp(Lowered); 266 Changed = true; 267 } 268 return TranslateLegalizeResults(Op, Lowered); 269 } 270 LLVM_FALLTHROUGH; 271 case TargetLowering::Expand: { 272 Changed = true; 273 std::pair<SDValue, SDValue> Tmp = ExpandLoad(Result); 274 AddLegalizedOperand(Op.getValue(0), Tmp.first); 275 AddLegalizedOperand(Op.getValue(1), Tmp.second); 276 return Op.getResNo() ? Tmp.first : Tmp.second; 277 } 278 } 279 } 280 } else if (Op.getOpcode() == ISD::STORE) { 281 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 282 EVT StVT = ST->getMemoryVT(); 283 MVT ValVT = ST->getValue().getSimpleValueType(); 284 if (StVT.isVector() && ST->isTruncatingStore()) { 285 LLVM_DEBUG(dbgs() << "\nLegalizing truncating vector store: "; 286 Node->dump(&DAG)); 287 switch (TLI.getTruncStoreAction(ValVT, StVT)) { 288 default: llvm_unreachable("This action is not supported yet!"); 289 case TargetLowering::Legal: 290 return TranslateLegalizeResults(Op, Result); 291 case TargetLowering::Custom: { 292 SDValue Lowered = TLI.LowerOperation(Result, DAG); 293 if (Lowered != Result) { 294 // Make sure the new code is also legal. 295 Lowered = LegalizeOp(Lowered); 296 Changed = true; 297 } 298 return TranslateLegalizeResults(Op, Lowered); 299 } 300 case TargetLowering::Expand: { 301 Changed = true; 302 SDValue Chain = ExpandStore(Result); 303 AddLegalizedOperand(Op, Chain); 304 return Chain; 305 } 306 } 307 } 308 } 309 310 bool HasVectorValueOrOp = false; 311 for (auto J = Node->value_begin(), E = Node->value_end(); J != E; ++J) 312 HasVectorValueOrOp |= J->isVector(); 313 for (const SDValue &Op : Node->op_values()) 314 HasVectorValueOrOp |= Op.getValueType().isVector(); 315 316 if (!HasVectorValueOrOp) 317 return TranslateLegalizeResults(Op, Result); 318 319 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 320 EVT ValVT; 321 switch (Op.getOpcode()) { 322 default: 323 return TranslateLegalizeResults(Op, Result); 324 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 325 case ISD::STRICT_##DAGN: 326 #include "llvm/IR/ConstrainedOps.def" 327 ValVT = Node->getValueType(0); 328 if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP || 329 Op.getOpcode() == ISD::STRICT_UINT_TO_FP) 330 ValVT = Node->getOperand(1).getValueType(); 331 Action = TLI.getOperationAction(Node->getOpcode(), ValVT); 332 // If we're asked to expand a strict vector floating-point operation, 333 // by default we're going to simply unroll it. That is usually the 334 // best approach, except in the case where the resulting strict (scalar) 335 // operations would themselves use the fallback mutation to non-strict. 336 // In that specific case, just do the fallback on the vector op. 337 if (Action == TargetLowering::Expand && !TLI.isStrictFPEnabled() && 338 TLI.getStrictFPOperationAction(Node->getOpcode(), ValVT) == 339 TargetLowering::Legal) { 340 EVT EltVT = ValVT.getVectorElementType(); 341 if (TLI.getOperationAction(Node->getOpcode(), EltVT) 342 == TargetLowering::Expand && 343 TLI.getStrictFPOperationAction(Node->getOpcode(), EltVT) 344 == TargetLowering::Legal) 345 Action = TargetLowering::Legal; 346 } 347 break; 348 case ISD::ADD: 349 case ISD::SUB: 350 case ISD::MUL: 351 case ISD::MULHS: 352 case ISD::MULHU: 353 case ISD::SDIV: 354 case ISD::UDIV: 355 case ISD::SREM: 356 case ISD::UREM: 357 case ISD::SDIVREM: 358 case ISD::UDIVREM: 359 case ISD::FADD: 360 case ISD::FSUB: 361 case ISD::FMUL: 362 case ISD::FDIV: 363 case ISD::FREM: 364 case ISD::AND: 365 case ISD::OR: 366 case ISD::XOR: 367 case ISD::SHL: 368 case ISD::SRA: 369 case ISD::SRL: 370 case ISD::FSHL: 371 case ISD::FSHR: 372 case ISD::ROTL: 373 case ISD::ROTR: 374 case ISD::ABS: 375 case ISD::BSWAP: 376 case ISD::BITREVERSE: 377 case ISD::CTLZ: 378 case ISD::CTTZ: 379 case ISD::CTLZ_ZERO_UNDEF: 380 case ISD::CTTZ_ZERO_UNDEF: 381 case ISD::CTPOP: 382 case ISD::SELECT: 383 case ISD::VSELECT: 384 case ISD::SELECT_CC: 385 case ISD::SETCC: 386 case ISD::ZERO_EXTEND: 387 case ISD::ANY_EXTEND: 388 case ISD::TRUNCATE: 389 case ISD::SIGN_EXTEND: 390 case ISD::FP_TO_SINT: 391 case ISD::FP_TO_UINT: 392 case ISD::FNEG: 393 case ISD::FABS: 394 case ISD::FMINNUM: 395 case ISD::FMAXNUM: 396 case ISD::FMINNUM_IEEE: 397 case ISD::FMAXNUM_IEEE: 398 case ISD::FMINIMUM: 399 case ISD::FMAXIMUM: 400 case ISD::FCOPYSIGN: 401 case ISD::FSQRT: 402 case ISD::FSIN: 403 case ISD::FCOS: 404 case ISD::FPOWI: 405 case ISD::FPOW: 406 case ISD::FLOG: 407 case ISD::FLOG2: 408 case ISD::FLOG10: 409 case ISD::FEXP: 410 case ISD::FEXP2: 411 case ISD::FCEIL: 412 case ISD::FTRUNC: 413 case ISD::FRINT: 414 case ISD::FNEARBYINT: 415 case ISD::FROUND: 416 case ISD::FFLOOR: 417 case ISD::FP_ROUND: 418 case ISD::FP_EXTEND: 419 case ISD::FMA: 420 case ISD::SIGN_EXTEND_INREG: 421 case ISD::ANY_EXTEND_VECTOR_INREG: 422 case ISD::SIGN_EXTEND_VECTOR_INREG: 423 case ISD::ZERO_EXTEND_VECTOR_INREG: 424 case ISD::SMIN: 425 case ISD::SMAX: 426 case ISD::UMIN: 427 case ISD::UMAX: 428 case ISD::SMUL_LOHI: 429 case ISD::UMUL_LOHI: 430 case ISD::SADDO: 431 case ISD::UADDO: 432 case ISD::SSUBO: 433 case ISD::USUBO: 434 case ISD::SMULO: 435 case ISD::UMULO: 436 case ISD::FCANONICALIZE: 437 case ISD::SADDSAT: 438 case ISD::UADDSAT: 439 case ISD::SSUBSAT: 440 case ISD::USUBSAT: 441 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 442 break; 443 case ISD::SMULFIX: 444 case ISD::SMULFIXSAT: 445 case ISD::UMULFIX: 446 case ISD::UMULFIXSAT: 447 case ISD::SDIVFIX: 448 case ISD::UDIVFIX: { 449 unsigned Scale = Node->getConstantOperandVal(2); 450 Action = TLI.getFixedPointOperationAction(Node->getOpcode(), 451 Node->getValueType(0), Scale); 452 break; 453 } 454 case ISD::SINT_TO_FP: 455 case ISD::UINT_TO_FP: 456 case ISD::VECREDUCE_ADD: 457 case ISD::VECREDUCE_MUL: 458 case ISD::VECREDUCE_AND: 459 case ISD::VECREDUCE_OR: 460 case ISD::VECREDUCE_XOR: 461 case ISD::VECREDUCE_SMAX: 462 case ISD::VECREDUCE_SMIN: 463 case ISD::VECREDUCE_UMAX: 464 case ISD::VECREDUCE_UMIN: 465 case ISD::VECREDUCE_FADD: 466 case ISD::VECREDUCE_FMUL: 467 case ISD::VECREDUCE_FMAX: 468 case ISD::VECREDUCE_FMIN: 469 Action = TLI.getOperationAction(Node->getOpcode(), 470 Node->getOperand(0).getValueType()); 471 break; 472 } 473 474 LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG)); 475 476 switch (Action) { 477 default: llvm_unreachable("This action is not supported yet!"); 478 case TargetLowering::Promote: 479 Result = Promote(Op); 480 Changed = true; 481 break; 482 case TargetLowering::Legal: 483 LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n"); 484 break; 485 case TargetLowering::Custom: { 486 LLVM_DEBUG(dbgs() << "Trying custom legalization\n"); 487 if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) { 488 LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n"); 489 Result = Tmp1; 490 break; 491 } 492 LLVM_DEBUG(dbgs() << "Could not custom legalize node\n"); 493 LLVM_FALLTHROUGH; 494 } 495 case TargetLowering::Expand: 496 Result = Expand(Op); 497 } 498 499 // Make sure that the generated code is itself legal. 500 if (Result != Op) { 501 Result = LegalizeOp(Result); 502 Changed = true; 503 } 504 505 // Note that LegalizeOp may be reentered even from single-use nodes, which 506 // means that we always must cache transformed nodes. 507 AddLegalizedOperand(Op, Result); 508 return Result; 509 } 510 511 SDValue VectorLegalizer::Promote(SDValue Op) { 512 // For a few operations there is a specific concept for promotion based on 513 // the operand's type. 514 switch (Op.getOpcode()) { 515 case ISD::SINT_TO_FP: 516 case ISD::UINT_TO_FP: 517 case ISD::STRICT_SINT_TO_FP: 518 case ISD::STRICT_UINT_TO_FP: 519 // "Promote" the operation by extending the operand. 520 return PromoteINT_TO_FP(Op); 521 case ISD::FP_TO_UINT: 522 case ISD::FP_TO_SINT: 523 case ISD::STRICT_FP_TO_UINT: 524 case ISD::STRICT_FP_TO_SINT: 525 // Promote the operation by extending the operand. 526 return PromoteFP_TO_INT(Op); 527 case ISD::FP_ROUND: 528 case ISD::FP_EXTEND: 529 // These operations are used to do promotion so they can't be promoted 530 // themselves. 531 llvm_unreachable("Don't know how to promote this operation!"); 532 } 533 534 // There are currently two cases of vector promotion: 535 // 1) Bitcasting a vector of integers to a different type to a vector of the 536 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64. 537 // 2) Extending a vector of floats to a vector of the same number of larger 538 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32. 539 MVT VT = Op.getSimpleValueType(); 540 assert(Op.getNode()->getNumValues() == 1 && 541 "Can't promote a vector with multiple results!"); 542 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 543 SDLoc dl(Op); 544 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 545 546 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 547 if (Op.getOperand(j).getValueType().isVector()) 548 if (Op.getOperand(j) 549 .getValueType() 550 .getVectorElementType() 551 .isFloatingPoint() && 552 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()) 553 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j)); 554 else 555 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j)); 556 else 557 Operands[j] = Op.getOperand(j); 558 } 559 560 Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands, Op.getNode()->getFlags()); 561 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) || 562 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() && 563 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())) 564 return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0, dl)); 565 else 566 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 567 } 568 569 SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) { 570 // INT_TO_FP operations may require the input operand be promoted even 571 // when the type is otherwise legal. 572 bool IsStrict = Op->isStrictFPOpcode(); 573 MVT VT = Op.getOperand(IsStrict ? 1 : 0).getSimpleValueType(); 574 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 575 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() && 576 "Vectors have different number of elements!"); 577 578 SDLoc dl(Op); 579 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 580 581 unsigned Opc = (Op.getOpcode() == ISD::UINT_TO_FP || 582 Op.getOpcode() == ISD::STRICT_UINT_TO_FP) 583 ? ISD::ZERO_EXTEND 584 : ISD::SIGN_EXTEND; 585 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 586 if (Op.getOperand(j).getValueType().isVector()) 587 Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j)); 588 else 589 Operands[j] = Op.getOperand(j); 590 } 591 592 if (IsStrict) 593 return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other}, 594 Operands); 595 596 return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands); 597 } 598 599 // For FP_TO_INT we promote the result type to a vector type with wider 600 // elements and then truncate the result. This is different from the default 601 // PromoteVector which uses bitcast to promote thus assumning that the 602 // promoted vector type has the same overall size. 603 SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op) { 604 MVT VT = Op.getSimpleValueType(); 605 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 606 bool IsStrict = Op->isStrictFPOpcode(); 607 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() && 608 "Vectors have different number of elements!"); 609 610 unsigned NewOpc = Op->getOpcode(); 611 // Change FP_TO_UINT to FP_TO_SINT if possible. 612 // TODO: Should we only do this if FP_TO_UINT itself isn't legal? 613 if (NewOpc == ISD::FP_TO_UINT && 614 TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT)) 615 NewOpc = ISD::FP_TO_SINT; 616 617 if (NewOpc == ISD::STRICT_FP_TO_UINT && 618 TLI.isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, NVT)) 619 NewOpc = ISD::STRICT_FP_TO_SINT; 620 621 SDLoc dl(Op); 622 SDValue Promoted, Chain; 623 if (IsStrict) { 624 Promoted = DAG.getNode(NewOpc, dl, {NVT, MVT::Other}, 625 {Op.getOperand(0), Op.getOperand(1)}); 626 Chain = Promoted.getValue(1); 627 } else 628 Promoted = DAG.getNode(NewOpc, dl, NVT, Op.getOperand(0)); 629 630 // Assert that the converted value fits in the original type. If it doesn't 631 // (eg: because the value being converted is too big), then the result of the 632 // original operation was undefined anyway, so the assert is still correct. 633 if (Op->getOpcode() == ISD::FP_TO_UINT || 634 Op->getOpcode() == ISD::STRICT_FP_TO_UINT) 635 NewOpc = ISD::AssertZext; 636 else 637 NewOpc = ISD::AssertSext; 638 639 Promoted = DAG.getNode(NewOpc, dl, NVT, Promoted, 640 DAG.getValueType(VT.getScalarType())); 641 Promoted = DAG.getNode(ISD::TRUNCATE, dl, VT, Promoted); 642 if (IsStrict) 643 return DAG.getMergeValues({Promoted, Chain}, dl); 644 645 return Promoted; 646 } 647 648 std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDValue Op) { 649 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 650 651 EVT SrcVT = LD->getMemoryVT(); 652 EVT SrcEltVT = SrcVT.getScalarType(); 653 unsigned NumElem = SrcVT.getVectorNumElements(); 654 655 SDValue NewChain; 656 SDValue Value; 657 if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { 658 SDLoc dl(Op); 659 660 SmallVector<SDValue, 8> Vals; 661 SmallVector<SDValue, 8> LoadChains; 662 663 EVT DstEltVT = LD->getValueType(0).getScalarType(); 664 SDValue Chain = LD->getChain(); 665 SDValue BasePTR = LD->getBasePtr(); 666 ISD::LoadExtType ExtType = LD->getExtensionType(); 667 668 // When elements in a vector is not byte-addressable, we cannot directly 669 // load each element by advancing pointer, which could only address bytes. 670 // Instead, we load all significant words, mask bits off, and concatenate 671 // them to form each element. Finally, they are extended to destination 672 // scalar type to build the destination vector. 673 EVT WideVT = TLI.getPointerTy(DAG.getDataLayout()); 674 675 assert(WideVT.isRound() && 676 "Could not handle the sophisticated case when the widest integer is" 677 " not power of 2."); 678 assert(WideVT.bitsGE(SrcEltVT) && 679 "Type is not legalized?"); 680 681 unsigned WideBytes = WideVT.getStoreSize(); 682 unsigned Offset = 0; 683 unsigned RemainingBytes = SrcVT.getStoreSize(); 684 SmallVector<SDValue, 8> LoadVals; 685 while (RemainingBytes > 0) { 686 SDValue ScalarLoad; 687 unsigned LoadBytes = WideBytes; 688 689 if (RemainingBytes >= LoadBytes) { 690 ScalarLoad = 691 DAG.getLoad(WideVT, dl, Chain, BasePTR, 692 LD->getPointerInfo().getWithOffset(Offset), 693 MinAlign(LD->getAlignment(), Offset), 694 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 695 } else { 696 EVT LoadVT = WideVT; 697 while (RemainingBytes < LoadBytes) { 698 LoadBytes >>= 1; // Reduce the load size by half. 699 LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3); 700 } 701 ScalarLoad = 702 DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR, 703 LD->getPointerInfo().getWithOffset(Offset), LoadVT, 704 MinAlign(LD->getAlignment(), Offset), 705 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 706 } 707 708 RemainingBytes -= LoadBytes; 709 Offset += LoadBytes; 710 711 BasePTR = DAG.getObjectPtrOffset(dl, BasePTR, LoadBytes); 712 713 LoadVals.push_back(ScalarLoad.getValue(0)); 714 LoadChains.push_back(ScalarLoad.getValue(1)); 715 } 716 717 unsigned BitOffset = 0; 718 unsigned WideIdx = 0; 719 unsigned WideBits = WideVT.getSizeInBits(); 720 721 // Extract bits, pack and extend/trunc them into destination type. 722 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 723 SDValue SrcEltBitMask = DAG.getConstant( 724 APInt::getLowBitsSet(WideBits, SrcEltBits), dl, WideVT); 725 726 for (unsigned Idx = 0; Idx != NumElem; ++Idx) { 727 assert(BitOffset < WideBits && "Unexpected offset!"); 728 729 SDValue ShAmt = DAG.getConstant( 730 BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 731 SDValue Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt); 732 733 BitOffset += SrcEltBits; 734 if (BitOffset >= WideBits) { 735 WideIdx++; 736 BitOffset -= WideBits; 737 if (BitOffset > 0) { 738 ShAmt = DAG.getConstant( 739 SrcEltBits - BitOffset, dl, 740 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 741 SDValue Hi = 742 DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt); 743 Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi); 744 } 745 } 746 747 Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask); 748 749 switch (ExtType) { 750 default: llvm_unreachable("Unknown extended-load op!"); 751 case ISD::EXTLOAD: 752 Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT); 753 break; 754 case ISD::ZEXTLOAD: 755 Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT); 756 break; 757 case ISD::SEXTLOAD: 758 ShAmt = 759 DAG.getConstant(WideBits - SrcEltBits, dl, 760 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 761 Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt); 762 Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt); 763 Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT); 764 break; 765 } 766 Vals.push_back(Lo); 767 } 768 769 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 770 Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals); 771 } else { 772 std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG); 773 } 774 775 return std::make_pair(Value, NewChain); 776 } 777 778 SDValue VectorLegalizer::ExpandStore(SDValue Op) { 779 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 780 SDValue TF = TLI.scalarizeVectorStore(ST, DAG); 781 return TF; 782 } 783 784 SDValue VectorLegalizer::Expand(SDValue Op) { 785 switch (Op->getOpcode()) { 786 case ISD::SIGN_EXTEND_INREG: 787 return ExpandSEXTINREG(Op); 788 case ISD::ANY_EXTEND_VECTOR_INREG: 789 return ExpandANY_EXTEND_VECTOR_INREG(Op); 790 case ISD::SIGN_EXTEND_VECTOR_INREG: 791 return ExpandSIGN_EXTEND_VECTOR_INREG(Op); 792 case ISD::ZERO_EXTEND_VECTOR_INREG: 793 return ExpandZERO_EXTEND_VECTOR_INREG(Op); 794 case ISD::BSWAP: 795 return ExpandBSWAP(Op); 796 case ISD::VSELECT: 797 return ExpandVSELECT(Op); 798 case ISD::SELECT: 799 return ExpandSELECT(Op); 800 case ISD::FP_TO_UINT: 801 return ExpandFP_TO_UINT(Op); 802 case ISD::UINT_TO_FP: 803 return ExpandUINT_TO_FLOAT(Op); 804 case ISD::FNEG: 805 return ExpandFNEG(Op); 806 case ISD::FSUB: 807 return ExpandFSUB(Op); 808 case ISD::SETCC: 809 return UnrollVSETCC(Op); 810 case ISD::ABS: 811 return ExpandABS(Op); 812 case ISD::BITREVERSE: 813 return ExpandBITREVERSE(Op); 814 case ISD::CTPOP: 815 return ExpandCTPOP(Op); 816 case ISD::CTLZ: 817 case ISD::CTLZ_ZERO_UNDEF: 818 return ExpandCTLZ(Op); 819 case ISD::CTTZ: 820 case ISD::CTTZ_ZERO_UNDEF: 821 return ExpandCTTZ(Op); 822 case ISD::FSHL: 823 case ISD::FSHR: 824 return ExpandFunnelShift(Op); 825 case ISD::ROTL: 826 case ISD::ROTR: 827 return ExpandROT(Op); 828 case ISD::FMINNUM: 829 case ISD::FMAXNUM: 830 return ExpandFMINNUM_FMAXNUM(Op); 831 case ISD::UADDO: 832 case ISD::USUBO: 833 return ExpandUADDSUBO(Op); 834 case ISD::SADDO: 835 case ISD::SSUBO: 836 return ExpandSADDSUBO(Op); 837 case ISD::UMULO: 838 case ISD::SMULO: 839 return ExpandMULO(Op); 840 case ISD::USUBSAT: 841 case ISD::SSUBSAT: 842 case ISD::UADDSAT: 843 case ISD::SADDSAT: 844 return ExpandAddSubSat(Op); 845 case ISD::SMULFIX: 846 case ISD::UMULFIX: 847 return ExpandFixedPointMul(Op); 848 case ISD::SMULFIXSAT: 849 case ISD::UMULFIXSAT: 850 // FIXME: We do not expand SMULFIXSAT/UMULFIXSAT here yet, not sure exactly 851 // why. Maybe it results in worse codegen compared to the unroll for some 852 // targets? This should probably be investigated. And if we still prefer to 853 // unroll an explanation could be helpful. 854 return DAG.UnrollVectorOp(Op.getNode()); 855 case ISD::SDIVFIX: 856 case ISD::UDIVFIX: 857 return ExpandFixedPointDiv(Op); 858 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 859 case ISD::STRICT_##DAGN: 860 #include "llvm/IR/ConstrainedOps.def" 861 return ExpandStrictFPOp(Op); 862 case ISD::VECREDUCE_ADD: 863 case ISD::VECREDUCE_MUL: 864 case ISD::VECREDUCE_AND: 865 case ISD::VECREDUCE_OR: 866 case ISD::VECREDUCE_XOR: 867 case ISD::VECREDUCE_SMAX: 868 case ISD::VECREDUCE_SMIN: 869 case ISD::VECREDUCE_UMAX: 870 case ISD::VECREDUCE_UMIN: 871 case ISD::VECREDUCE_FADD: 872 case ISD::VECREDUCE_FMUL: 873 case ISD::VECREDUCE_FMAX: 874 case ISD::VECREDUCE_FMIN: 875 return TLI.expandVecReduce(Op.getNode(), DAG); 876 default: 877 return DAG.UnrollVectorOp(Op.getNode()); 878 } 879 } 880 881 SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { 882 // Lower a select instruction where the condition is a scalar and the 883 // operands are vectors. Lower this select to VSELECT and implement it 884 // using XOR AND OR. The selector bit is broadcasted. 885 EVT VT = Op.getValueType(); 886 SDLoc DL(Op); 887 888 SDValue Mask = Op.getOperand(0); 889 SDValue Op1 = Op.getOperand(1); 890 SDValue Op2 = Op.getOperand(2); 891 892 assert(VT.isVector() && !Mask.getValueType().isVector() 893 && Op1.getValueType() == Op2.getValueType() && "Invalid type"); 894 895 // If we can't even use the basic vector operations of 896 // AND,OR,XOR, we will have to scalarize the op. 897 // Notice that the operation may be 'promoted' which means that it is 898 // 'bitcasted' to another type which is handled. 899 // Also, we need to be able to construct a splat vector using BUILD_VECTOR. 900 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 901 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 902 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 903 TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand) 904 return DAG.UnrollVectorOp(Op.getNode()); 905 906 // Generate a mask operand. 907 EVT MaskTy = VT.changeVectorElementTypeToInteger(); 908 909 // What is the size of each element in the vector mask. 910 EVT BitTy = MaskTy.getScalarType(); 911 912 Mask = DAG.getSelect(DL, BitTy, Mask, 913 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, 914 BitTy), 915 DAG.getConstant(0, DL, BitTy)); 916 917 // Broadcast the mask so that the entire vector is all-one or all zero. 918 Mask = DAG.getSplatBuildVector(MaskTy, DL, Mask); 919 920 // Bitcast the operands to be the same type as the mask. 921 // This is needed when we select between FP types because 922 // the mask is a vector of integers. 923 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1); 924 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2); 925 926 SDValue AllOnes = DAG.getConstant( 927 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy); 928 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes); 929 930 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask); 931 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask); 932 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2); 933 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 934 } 935 936 SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) { 937 EVT VT = Op.getValueType(); 938 939 // Make sure that the SRA and SHL instructions are available. 940 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand || 941 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand) 942 return DAG.UnrollVectorOp(Op.getNode()); 943 944 SDLoc DL(Op); 945 EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT(); 946 947 unsigned BW = VT.getScalarSizeInBits(); 948 unsigned OrigBW = OrigTy.getScalarSizeInBits(); 949 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT); 950 951 Op = Op.getOperand(0); 952 Op = DAG.getNode(ISD::SHL, DL, VT, Op, ShiftSz); 953 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz); 954 } 955 956 // Generically expand a vector anyext in register to a shuffle of the relevant 957 // lanes into the appropriate locations, with other lanes left undef. 958 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) { 959 SDLoc DL(Op); 960 EVT VT = Op.getValueType(); 961 int NumElements = VT.getVectorNumElements(); 962 SDValue Src = Op.getOperand(0); 963 EVT SrcVT = Src.getValueType(); 964 int NumSrcElements = SrcVT.getVectorNumElements(); 965 966 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector 967 // into a larger vector type. 968 if (SrcVT.bitsLE(VT)) { 969 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 && 970 "ANY_EXTEND_VECTOR_INREG vector size mismatch"); 971 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits(); 972 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(), 973 NumSrcElements); 974 Src = DAG.getNode( 975 ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT), Src, 976 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 977 } 978 979 // Build a base mask of undef shuffles. 980 SmallVector<int, 16> ShuffleMask; 981 ShuffleMask.resize(NumSrcElements, -1); 982 983 // Place the extended lanes into the correct locations. 984 int ExtLaneScale = NumSrcElements / NumElements; 985 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 986 for (int i = 0; i < NumElements; ++i) 987 ShuffleMask[i * ExtLaneScale + EndianOffset] = i; 988 989 return DAG.getNode( 990 ISD::BITCAST, DL, VT, 991 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask)); 992 } 993 994 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) { 995 SDLoc DL(Op); 996 EVT VT = Op.getValueType(); 997 SDValue Src = Op.getOperand(0); 998 EVT SrcVT = Src.getValueType(); 999 1000 // First build an any-extend node which can be legalized above when we 1001 // recurse through it. 1002 Op = DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Src); 1003 1004 // Now we need sign extend. Do this by shifting the elements. Even if these 1005 // aren't legal operations, they have a better chance of being legalized 1006 // without full scalarization than the sign extension does. 1007 unsigned EltWidth = VT.getScalarSizeInBits(); 1008 unsigned SrcEltWidth = SrcVT.getScalarSizeInBits(); 1009 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT); 1010 return DAG.getNode(ISD::SRA, DL, VT, 1011 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount), 1012 ShiftAmount); 1013 } 1014 1015 // Generically expand a vector zext in register to a shuffle of the relevant 1016 // lanes into the appropriate locations, a blend of zero into the high bits, 1017 // and a bitcast to the wider element type. 1018 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) { 1019 SDLoc DL(Op); 1020 EVT VT = Op.getValueType(); 1021 int NumElements = VT.getVectorNumElements(); 1022 SDValue Src = Op.getOperand(0); 1023 EVT SrcVT = Src.getValueType(); 1024 int NumSrcElements = SrcVT.getVectorNumElements(); 1025 1026 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector 1027 // into a larger vector type. 1028 if (SrcVT.bitsLE(VT)) { 1029 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 && 1030 "ZERO_EXTEND_VECTOR_INREG vector size mismatch"); 1031 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits(); 1032 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(), 1033 NumSrcElements); 1034 Src = DAG.getNode( 1035 ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT), Src, 1036 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1037 } 1038 1039 // Build up a zero vector to blend into this one. 1040 SDValue Zero = DAG.getConstant(0, DL, SrcVT); 1041 1042 // Shuffle the incoming lanes into the correct position, and pull all other 1043 // lanes from the zero vector. 1044 SmallVector<int, 16> ShuffleMask; 1045 ShuffleMask.reserve(NumSrcElements); 1046 for (int i = 0; i < NumSrcElements; ++i) 1047 ShuffleMask.push_back(i); 1048 1049 int ExtLaneScale = NumSrcElements / NumElements; 1050 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 1051 for (int i = 0; i < NumElements; ++i) 1052 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i; 1053 1054 return DAG.getNode(ISD::BITCAST, DL, VT, 1055 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask)); 1056 } 1057 1058 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) { 1059 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; 1060 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) 1061 for (int J = ScalarSizeInBytes - 1; J >= 0; --J) 1062 ShuffleMask.push_back((I * ScalarSizeInBytes) + J); 1063 } 1064 1065 SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) { 1066 EVT VT = Op.getValueType(); 1067 1068 // Generate a byte wise shuffle mask for the BSWAP. 1069 SmallVector<int, 16> ShuffleMask; 1070 createBSWAPShuffleMask(VT, ShuffleMask); 1071 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size()); 1072 1073 // Only emit a shuffle if the mask is legal. 1074 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) 1075 return DAG.UnrollVectorOp(Op.getNode()); 1076 1077 SDLoc DL(Op); 1078 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 1079 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask); 1080 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 1081 } 1082 1083 SDValue VectorLegalizer::ExpandBITREVERSE(SDValue Op) { 1084 EVT VT = Op.getValueType(); 1085 1086 // If we have the scalar operation, it's probably cheaper to unroll it. 1087 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) 1088 return DAG.UnrollVectorOp(Op.getNode()); 1089 1090 // If the vector element width is a whole number of bytes, test if its legal 1091 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte 1092 // vector. This greatly reduces the number of bit shifts necessary. 1093 unsigned ScalarSizeInBits = VT.getScalarSizeInBits(); 1094 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) { 1095 SmallVector<int, 16> BSWAPMask; 1096 createBSWAPShuffleMask(VT, BSWAPMask); 1097 1098 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size()); 1099 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) && 1100 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) || 1101 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) && 1102 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) && 1103 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) && 1104 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) { 1105 SDLoc DL(Op); 1106 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 1107 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), 1108 BSWAPMask); 1109 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op); 1110 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 1111 } 1112 } 1113 1114 // If we have the appropriate vector bit operations, it is better to use them 1115 // than unrolling and expanding each component. 1116 if (!TLI.isOperationLegalOrCustom(ISD::SHL, VT) || 1117 !TLI.isOperationLegalOrCustom(ISD::SRL, VT) || 1118 !TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 1119 !TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) 1120 return DAG.UnrollVectorOp(Op.getNode()); 1121 1122 // Let LegalizeDAG handle this later. 1123 return Op; 1124 } 1125 1126 SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { 1127 // Implement VSELECT in terms of XOR, AND, OR 1128 // on platforms which do not support blend natively. 1129 SDLoc DL(Op); 1130 1131 SDValue Mask = Op.getOperand(0); 1132 SDValue Op1 = Op.getOperand(1); 1133 SDValue Op2 = Op.getOperand(2); 1134 1135 EVT VT = Mask.getValueType(); 1136 1137 // If we can't even use the basic vector operations of 1138 // AND,OR,XOR, we will have to scalarize the op. 1139 // Notice that the operation may be 'promoted' which means that it is 1140 // 'bitcasted' to another type which is handled. 1141 // This operation also isn't safe with AND, OR, XOR when the boolean 1142 // type is 0/1 as we need an all ones vector constant to mask with. 1143 // FIXME: Sign extend 1 to all ones if thats legal on the target. 1144 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 1145 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 1146 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 1147 TLI.getBooleanContents(Op1.getValueType()) != 1148 TargetLowering::ZeroOrNegativeOneBooleanContent) 1149 return DAG.UnrollVectorOp(Op.getNode()); 1150 1151 // If the mask and the type are different sizes, unroll the vector op. This 1152 // can occur when getSetCCResultType returns something that is different in 1153 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8. 1154 if (VT.getSizeInBits() != Op1.getValueSizeInBits()) 1155 return DAG.UnrollVectorOp(Op.getNode()); 1156 1157 // Bitcast the operands to be the same type as the mask. 1158 // This is needed when we select between FP types because 1159 // the mask is a vector of integers. 1160 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1); 1161 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2); 1162 1163 SDValue AllOnes = DAG.getConstant( 1164 APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT); 1165 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes); 1166 1167 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask); 1168 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask); 1169 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2); 1170 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 1171 } 1172 1173 SDValue VectorLegalizer::ExpandABS(SDValue Op) { 1174 // Attempt to expand using TargetLowering. 1175 SDValue Result; 1176 if (TLI.expandABS(Op.getNode(), Result, DAG)) 1177 return Result; 1178 1179 // Otherwise go ahead and unroll. 1180 return DAG.UnrollVectorOp(Op.getNode()); 1181 } 1182 1183 SDValue VectorLegalizer::ExpandFP_TO_UINT(SDValue Op) { 1184 // Attempt to expand using TargetLowering. 1185 SDValue Result, Chain; 1186 if (TLI.expandFP_TO_UINT(Op.getNode(), Result, Chain, DAG)) { 1187 if (Op->isStrictFPOpcode()) 1188 // Relink the chain 1189 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Chain); 1190 return Result; 1191 } 1192 1193 // Otherwise go ahead and unroll. 1194 if (Op->isStrictFPOpcode()) 1195 return UnrollStrictFPOp(Op); 1196 return DAG.UnrollVectorOp(Op.getNode()); 1197 } 1198 1199 SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) { 1200 bool IsStrict = Op.getNode()->isStrictFPOpcode(); 1201 unsigned OpNo = IsStrict ? 1 : 0; 1202 SDValue Src = Op.getOperand(OpNo); 1203 EVT VT = Src.getValueType(); 1204 SDLoc DL(Op); 1205 1206 // Attempt to expand using TargetLowering. 1207 SDValue Result; 1208 SDValue Chain; 1209 if (TLI.expandUINT_TO_FP(Op.getNode(), Result, Chain, DAG)) { 1210 if (IsStrict) 1211 // Relink the chain 1212 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Chain); 1213 return Result; 1214 } 1215 1216 // Make sure that the SINT_TO_FP and SRL instructions are available. 1217 if (((!IsStrict && TLI.getOperationAction(ISD::SINT_TO_FP, VT) == 1218 TargetLowering::Expand) || 1219 (IsStrict && TLI.getOperationAction(ISD::STRICT_SINT_TO_FP, VT) == 1220 TargetLowering::Expand)) || 1221 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) { 1222 if (IsStrict) 1223 return UnrollStrictFPOp(Op); 1224 return DAG.UnrollVectorOp(Op.getNode()); 1225 } 1226 1227 unsigned BW = VT.getScalarSizeInBits(); 1228 assert((BW == 64 || BW == 32) && 1229 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide"); 1230 1231 SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT); 1232 1233 // Constants to clear the upper part of the word. 1234 // Notice that we can also use SHL+SHR, but using a constant is slightly 1235 // faster on x86. 1236 uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF; 1237 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT); 1238 1239 // Two to the power of half-word-size. 1240 SDValue TWOHW = DAG.getConstantFP(1ULL << (BW / 2), DL, Op.getValueType()); 1241 1242 // Clear upper part of LO, lower HI 1243 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Src, HalfWord); 1244 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Src, HalfWordMask); 1245 1246 if (IsStrict) { 1247 // Convert hi and lo to floats 1248 // Convert the hi part back to the upper values 1249 // TODO: Can any fast-math-flags be set on these nodes? 1250 SDValue fHI = 1251 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {Op.getValueType(), MVT::Other}, 1252 {Op.getOperand(0), HI}); 1253 fHI = DAG.getNode(ISD::STRICT_FMUL, DL, {Op.getValueType(), MVT::Other}, 1254 {SDValue(fHI.getNode(), 1), fHI, TWOHW}); 1255 SDValue fLO = 1256 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {Op.getValueType(), MVT::Other}, 1257 {SDValue(fHI.getNode(), 1), LO}); 1258 1259 // Add the two halves 1260 SDValue Result = 1261 DAG.getNode(ISD::STRICT_FADD, DL, {Op.getValueType(), MVT::Other}, 1262 {SDValue(fLO.getNode(), 1), fHI, fLO}); 1263 1264 // Relink the chain 1265 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), SDValue(Result.getNode(), 1)); 1266 return Result; 1267 } 1268 1269 // Convert hi and lo to floats 1270 // Convert the hi part back to the upper values 1271 // TODO: Can any fast-math-flags be set on these nodes? 1272 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), HI); 1273 fHI = DAG.getNode(ISD::FMUL, DL, Op.getValueType(), fHI, TWOHW); 1274 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), LO); 1275 1276 // Add the two halves 1277 return DAG.getNode(ISD::FADD, DL, Op.getValueType(), fHI, fLO); 1278 } 1279 1280 SDValue VectorLegalizer::ExpandFNEG(SDValue Op) { 1281 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) { 1282 SDLoc DL(Op); 1283 SDValue Zero = DAG.getConstantFP(-0.0, DL, Op.getValueType()); 1284 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB. 1285 return DAG.getNode(ISD::FSUB, DL, Op.getValueType(), 1286 Zero, Op.getOperand(0)); 1287 } 1288 return DAG.UnrollVectorOp(Op.getNode()); 1289 } 1290 1291 SDValue VectorLegalizer::ExpandFSUB(SDValue Op) { 1292 // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal, 1293 // we can defer this to operation legalization where it will be lowered as 1294 // a+(-b). 1295 EVT VT = Op.getValueType(); 1296 if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) && 1297 TLI.isOperationLegalOrCustom(ISD::FADD, VT)) 1298 return Op; // Defer to LegalizeDAG 1299 1300 return DAG.UnrollVectorOp(Op.getNode()); 1301 } 1302 1303 SDValue VectorLegalizer::ExpandCTPOP(SDValue Op) { 1304 SDValue Result; 1305 if (TLI.expandCTPOP(Op.getNode(), Result, DAG)) 1306 return Result; 1307 1308 return DAG.UnrollVectorOp(Op.getNode()); 1309 } 1310 1311 SDValue VectorLegalizer::ExpandCTLZ(SDValue Op) { 1312 SDValue Result; 1313 if (TLI.expandCTLZ(Op.getNode(), Result, DAG)) 1314 return Result; 1315 1316 return DAG.UnrollVectorOp(Op.getNode()); 1317 } 1318 1319 SDValue VectorLegalizer::ExpandCTTZ(SDValue Op) { 1320 SDValue Result; 1321 if (TLI.expandCTTZ(Op.getNode(), Result, DAG)) 1322 return Result; 1323 1324 return DAG.UnrollVectorOp(Op.getNode()); 1325 } 1326 1327 SDValue VectorLegalizer::ExpandFunnelShift(SDValue Op) { 1328 SDValue Result; 1329 if (TLI.expandFunnelShift(Op.getNode(), Result, DAG)) 1330 return Result; 1331 1332 return DAG.UnrollVectorOp(Op.getNode()); 1333 } 1334 1335 SDValue VectorLegalizer::ExpandROT(SDValue Op) { 1336 SDValue Result; 1337 if (TLI.expandROT(Op.getNode(), Result, DAG)) 1338 return Result; 1339 1340 return DAG.UnrollVectorOp(Op.getNode()); 1341 } 1342 1343 SDValue VectorLegalizer::ExpandFMINNUM_FMAXNUM(SDValue Op) { 1344 if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Op.getNode(), DAG)) 1345 return Expanded; 1346 return DAG.UnrollVectorOp(Op.getNode()); 1347 } 1348 1349 SDValue VectorLegalizer::ExpandUADDSUBO(SDValue Op) { 1350 SDValue Result, Overflow; 1351 TLI.expandUADDSUBO(Op.getNode(), Result, Overflow, DAG); 1352 1353 if (Op.getResNo() == 0) { 1354 AddLegalizedOperand(Op.getValue(1), LegalizeOp(Overflow)); 1355 return Result; 1356 } else { 1357 AddLegalizedOperand(Op.getValue(0), LegalizeOp(Result)); 1358 return Overflow; 1359 } 1360 } 1361 1362 SDValue VectorLegalizer::ExpandSADDSUBO(SDValue Op) { 1363 SDValue Result, Overflow; 1364 TLI.expandSADDSUBO(Op.getNode(), Result, Overflow, DAG); 1365 1366 if (Op.getResNo() == 0) { 1367 AddLegalizedOperand(Op.getValue(1), LegalizeOp(Overflow)); 1368 return Result; 1369 } else { 1370 AddLegalizedOperand(Op.getValue(0), LegalizeOp(Result)); 1371 return Overflow; 1372 } 1373 } 1374 1375 SDValue VectorLegalizer::ExpandMULO(SDValue Op) { 1376 SDValue Result, Overflow; 1377 if (!TLI.expandMULO(Op.getNode(), Result, Overflow, DAG)) 1378 std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Op.getNode()); 1379 1380 if (Op.getResNo() == 0) { 1381 AddLegalizedOperand(Op.getValue(1), LegalizeOp(Overflow)); 1382 return Result; 1383 } else { 1384 AddLegalizedOperand(Op.getValue(0), LegalizeOp(Result)); 1385 return Overflow; 1386 } 1387 } 1388 1389 SDValue VectorLegalizer::ExpandAddSubSat(SDValue Op) { 1390 if (SDValue Expanded = TLI.expandAddSubSat(Op.getNode(), DAG)) 1391 return Expanded; 1392 return DAG.UnrollVectorOp(Op.getNode()); 1393 } 1394 1395 SDValue VectorLegalizer::ExpandFixedPointMul(SDValue Op) { 1396 if (SDValue Expanded = TLI.expandFixedPointMul(Op.getNode(), DAG)) 1397 return Expanded; 1398 return DAG.UnrollVectorOp(Op.getNode()); 1399 } 1400 1401 SDValue VectorLegalizer::ExpandFixedPointDiv(SDValue Op) { 1402 SDNode *N = Op.getNode(); 1403 if (SDValue Expanded = TLI.expandFixedPointDiv(N->getOpcode(), SDLoc(N), 1404 N->getOperand(0), N->getOperand(1), N->getConstantOperandVal(2), DAG)) 1405 return Expanded; 1406 return DAG.UnrollVectorOp(N); 1407 } 1408 1409 SDValue VectorLegalizer::ExpandStrictFPOp(SDValue Op) { 1410 if (Op.getOpcode() == ISD::STRICT_UINT_TO_FP) 1411 return ExpandUINT_TO_FLOAT(Op); 1412 if (Op.getOpcode() == ISD::STRICT_FP_TO_UINT) 1413 return ExpandFP_TO_UINT(Op); 1414 1415 return UnrollStrictFPOp(Op); 1416 } 1417 1418 SDValue VectorLegalizer::UnrollStrictFPOp(SDValue Op) { 1419 EVT VT = Op.getValue(0).getValueType(); 1420 EVT EltVT = VT.getVectorElementType(); 1421 unsigned NumElems = VT.getVectorNumElements(); 1422 unsigned NumOpers = Op.getNumOperands(); 1423 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1424 1425 EVT TmpEltVT = EltVT; 1426 if (Op->getOpcode() == ISD::STRICT_FSETCC || 1427 Op->getOpcode() == ISD::STRICT_FSETCCS) 1428 TmpEltVT = TLI.getSetCCResultType(DAG.getDataLayout(), 1429 *DAG.getContext(), TmpEltVT); 1430 1431 EVT ValueVTs[] = {TmpEltVT, MVT::Other}; 1432 SDValue Chain = Op.getOperand(0); 1433 SDLoc dl(Op); 1434 1435 SmallVector<SDValue, 32> OpValues; 1436 SmallVector<SDValue, 32> OpChains; 1437 for (unsigned i = 0; i < NumElems; ++i) { 1438 SmallVector<SDValue, 4> Opers; 1439 SDValue Idx = DAG.getConstant(i, dl, 1440 TLI.getVectorIdxTy(DAG.getDataLayout())); 1441 1442 // The Chain is the first operand. 1443 Opers.push_back(Chain); 1444 1445 // Now process the remaining operands. 1446 for (unsigned j = 1; j < NumOpers; ++j) { 1447 SDValue Oper = Op.getOperand(j); 1448 EVT OperVT = Oper.getValueType(); 1449 1450 if (OperVT.isVector()) 1451 Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 1452 OperVT.getVectorElementType(), Oper, Idx); 1453 1454 Opers.push_back(Oper); 1455 } 1456 1457 SDValue ScalarOp = DAG.getNode(Op->getOpcode(), dl, ValueVTs, Opers); 1458 SDValue ScalarResult = ScalarOp.getValue(0); 1459 SDValue ScalarChain = ScalarOp.getValue(1); 1460 1461 if (Op->getOpcode() == ISD::STRICT_FSETCC || 1462 Op->getOpcode() == ISD::STRICT_FSETCCS) 1463 ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult, 1464 DAG.getConstant(APInt::getAllOnesValue 1465 (EltVT.getSizeInBits()), dl, EltVT), 1466 DAG.getConstant(0, dl, EltVT)); 1467 1468 OpValues.push_back(ScalarResult); 1469 OpChains.push_back(ScalarChain); 1470 } 1471 1472 SDValue Result = DAG.getBuildVector(VT, dl, OpValues); 1473 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains); 1474 1475 AddLegalizedOperand(Op.getValue(0), Result); 1476 AddLegalizedOperand(Op.getValue(1), NewChain); 1477 1478 return Op.getResNo() ? NewChain : Result; 1479 } 1480 1481 SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) { 1482 EVT VT = Op.getValueType(); 1483 unsigned NumElems = VT.getVectorNumElements(); 1484 EVT EltVT = VT.getVectorElementType(); 1485 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2); 1486 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 1487 SDLoc dl(Op); 1488 SmallVector<SDValue, 8> Ops(NumElems); 1489 for (unsigned i = 0; i < NumElems; ++i) { 1490 SDValue LHSElem = DAG.getNode( 1491 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 1492 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1493 SDValue RHSElem = DAG.getNode( 1494 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 1495 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1496 Ops[i] = DAG.getNode(ISD::SETCC, dl, 1497 TLI.getSetCCResultType(DAG.getDataLayout(), 1498 *DAG.getContext(), TmpEltVT), 1499 LHSElem, RHSElem, CC); 1500 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i], 1501 DAG.getConstant(APInt::getAllOnesValue 1502 (EltVT.getSizeInBits()), dl, EltVT), 1503 DAG.getConstant(0, dl, EltVT)); 1504 } 1505 return DAG.getBuildVector(VT, dl, Ops); 1506 } 1507 1508 bool SelectionDAG::LegalizeVectors() { 1509 return VectorLegalizer(*this).Run(); 1510 } 1511