1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/IVDescriptors.h" 13 #include "llvm/Analysis/LoopInfo.h" 14 #include "llvm/Analysis/TargetTransformInfo.h" 15 #include "llvm/CodeGen/BasicTTIImpl.h" 16 #include "llvm/CodeGen/CostTable.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/IR/Intrinsics.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/IR/IntrinsicsAArch64.h" 21 #include "llvm/IR/PatternMatch.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Transforms/InstCombine/InstCombiner.h" 24 #include <algorithm> 25 using namespace llvm; 26 using namespace llvm::PatternMatch; 27 28 #define DEBUG_TYPE "aarch64tti" 29 30 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 31 cl::init(true), cl::Hidden); 32 33 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 34 const Function *Callee) const { 35 const TargetMachine &TM = getTLI()->getTargetMachine(); 36 37 const FeatureBitset &CallerBits = 38 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 39 const FeatureBitset &CalleeBits = 40 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 41 42 // Inline a callee if its target-features are a subset of the callers 43 // target-features. 44 return (CallerBits & CalleeBits) == CalleeBits; 45 } 46 47 /// Calculate the cost of materializing a 64-bit value. This helper 48 /// method might only calculate a fraction of a larger immediate. Therefore it 49 /// is valid to return a cost of ZERO. 50 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { 51 // Check if the immediate can be encoded within an instruction. 52 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 53 return 0; 54 55 if (Val < 0) 56 Val = ~Val; 57 58 // Calculate how many moves we will need to materialize this constant. 59 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 60 AArch64_IMM::expandMOVImm(Val, 64, Insn); 61 return Insn.size(); 62 } 63 64 /// Calculate the cost of materializing the given constant. 65 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 66 TTI::TargetCostKind CostKind) { 67 assert(Ty->isIntegerTy()); 68 69 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 70 if (BitSize == 0) 71 return ~0U; 72 73 // Sign-extend all constants to a multiple of 64-bit. 74 APInt ImmVal = Imm; 75 if (BitSize & 0x3f) 76 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 77 78 // Split the constant into 64-bit chunks and calculate the cost for each 79 // chunk. 80 InstructionCost Cost = 0; 81 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 82 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 83 int64_t Val = Tmp.getSExtValue(); 84 Cost += getIntImmCost(Val); 85 } 86 // We need at least one instruction to materialze the constant. 87 return std::max<InstructionCost>(1, Cost); 88 } 89 90 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 91 const APInt &Imm, Type *Ty, 92 TTI::TargetCostKind CostKind, 93 Instruction *Inst) { 94 assert(Ty->isIntegerTy()); 95 96 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 97 // There is no cost model for constants with a bit size of 0. Return TCC_Free 98 // here, so that constant hoisting will ignore this constant. 99 if (BitSize == 0) 100 return TTI::TCC_Free; 101 102 unsigned ImmIdx = ~0U; 103 switch (Opcode) { 104 default: 105 return TTI::TCC_Free; 106 case Instruction::GetElementPtr: 107 // Always hoist the base address of a GetElementPtr. 108 if (Idx == 0) 109 return 2 * TTI::TCC_Basic; 110 return TTI::TCC_Free; 111 case Instruction::Store: 112 ImmIdx = 0; 113 break; 114 case Instruction::Add: 115 case Instruction::Sub: 116 case Instruction::Mul: 117 case Instruction::UDiv: 118 case Instruction::SDiv: 119 case Instruction::URem: 120 case Instruction::SRem: 121 case Instruction::And: 122 case Instruction::Or: 123 case Instruction::Xor: 124 case Instruction::ICmp: 125 ImmIdx = 1; 126 break; 127 // Always return TCC_Free for the shift value of a shift instruction. 128 case Instruction::Shl: 129 case Instruction::LShr: 130 case Instruction::AShr: 131 if (Idx == 1) 132 return TTI::TCC_Free; 133 break; 134 case Instruction::Trunc: 135 case Instruction::ZExt: 136 case Instruction::SExt: 137 case Instruction::IntToPtr: 138 case Instruction::PtrToInt: 139 case Instruction::BitCast: 140 case Instruction::PHI: 141 case Instruction::Call: 142 case Instruction::Select: 143 case Instruction::Ret: 144 case Instruction::Load: 145 break; 146 } 147 148 if (Idx == ImmIdx) { 149 int NumConstants = (BitSize + 63) / 64; 150 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 151 return (Cost <= NumConstants * TTI::TCC_Basic) 152 ? static_cast<int>(TTI::TCC_Free) 153 : Cost; 154 } 155 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 156 } 157 158 InstructionCost 159 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 160 const APInt &Imm, Type *Ty, 161 TTI::TargetCostKind CostKind) { 162 assert(Ty->isIntegerTy()); 163 164 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 165 // There is no cost model for constants with a bit size of 0. Return TCC_Free 166 // here, so that constant hoisting will ignore this constant. 167 if (BitSize == 0) 168 return TTI::TCC_Free; 169 170 // Most (all?) AArch64 intrinsics do not support folding immediates into the 171 // selected instruction, so we compute the materialization cost for the 172 // immediate directly. 173 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 174 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 175 176 switch (IID) { 177 default: 178 return TTI::TCC_Free; 179 case Intrinsic::sadd_with_overflow: 180 case Intrinsic::uadd_with_overflow: 181 case Intrinsic::ssub_with_overflow: 182 case Intrinsic::usub_with_overflow: 183 case Intrinsic::smul_with_overflow: 184 case Intrinsic::umul_with_overflow: 185 if (Idx == 1) { 186 int NumConstants = (BitSize + 63) / 64; 187 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 188 return (Cost <= NumConstants * TTI::TCC_Basic) 189 ? static_cast<int>(TTI::TCC_Free) 190 : Cost; 191 } 192 break; 193 case Intrinsic::experimental_stackmap: 194 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 195 return TTI::TCC_Free; 196 break; 197 case Intrinsic::experimental_patchpoint_void: 198 case Intrinsic::experimental_patchpoint_i64: 199 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 200 return TTI::TCC_Free; 201 break; 202 case Intrinsic::experimental_gc_statepoint: 203 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 204 return TTI::TCC_Free; 205 break; 206 } 207 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 208 } 209 210 TargetTransformInfo::PopcntSupportKind 211 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 212 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 213 if (TyWidth == 32 || TyWidth == 64) 214 return TTI::PSK_FastHardware; 215 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 216 return TTI::PSK_Software; 217 } 218 219 InstructionCost 220 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 221 TTI::TargetCostKind CostKind) { 222 auto *RetTy = ICA.getReturnType(); 223 switch (ICA.getID()) { 224 case Intrinsic::umin: 225 case Intrinsic::umax: 226 case Intrinsic::smin: 227 case Intrinsic::smax: { 228 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 229 MVT::v8i16, MVT::v2i32, MVT::v4i32}; 230 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 231 // v2i64 types get converted to cmp+bif hence the cost of 2 232 if (LT.second == MVT::v2i64) 233 return LT.first * 2; 234 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 235 return LT.first; 236 break; 237 } 238 case Intrinsic::sadd_sat: 239 case Intrinsic::ssub_sat: 240 case Intrinsic::uadd_sat: 241 case Intrinsic::usub_sat: { 242 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 243 MVT::v8i16, MVT::v2i32, MVT::v4i32, 244 MVT::v2i64}; 245 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 246 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we 247 // need to extend the type, as it uses shr(qadd(shl, shl)). 248 unsigned Instrs = 249 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; 250 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 251 return LT.first * Instrs; 252 break; 253 } 254 case Intrinsic::abs: { 255 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 256 MVT::v8i16, MVT::v2i32, MVT::v4i32, 257 MVT::v2i64}; 258 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 259 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) 260 return LT.first; 261 break; 262 } 263 case Intrinsic::experimental_stepvector: { 264 InstructionCost Cost = 1; // Cost of the `index' instruction 265 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 266 // Legalisation of illegal vectors involves an `index' instruction plus 267 // (LT.first - 1) vector adds. 268 if (LT.first > 1) { 269 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); 270 InstructionCost AddCost = 271 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); 272 Cost += AddCost * (LT.first - 1); 273 } 274 return Cost; 275 } 276 case Intrinsic::bitreverse: { 277 static const CostTblEntry BitreverseTbl[] = { 278 {Intrinsic::bitreverse, MVT::i32, 1}, 279 {Intrinsic::bitreverse, MVT::i64, 1}, 280 {Intrinsic::bitreverse, MVT::v8i8, 1}, 281 {Intrinsic::bitreverse, MVT::v16i8, 1}, 282 {Intrinsic::bitreverse, MVT::v4i16, 2}, 283 {Intrinsic::bitreverse, MVT::v8i16, 2}, 284 {Intrinsic::bitreverse, MVT::v2i32, 2}, 285 {Intrinsic::bitreverse, MVT::v4i32, 2}, 286 {Intrinsic::bitreverse, MVT::v1i64, 2}, 287 {Intrinsic::bitreverse, MVT::v2i64, 2}, 288 }; 289 const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy); 290 const auto *Entry = 291 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second); 292 if (Entry) { 293 // Cost Model is using the legal type(i32) that i8 and i16 will be 294 // converted to +1 so that we match the actual lowering cost 295 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 || 296 TLI->getValueType(DL, RetTy, true) == MVT::i16) 297 return LegalisationCost.first * Entry->Cost + 1; 298 299 return LegalisationCost.first * Entry->Cost; 300 } 301 break; 302 } 303 case Intrinsic::ctpop: { 304 static const CostTblEntry CtpopCostTbl[] = { 305 {ISD::CTPOP, MVT::v2i64, 4}, 306 {ISD::CTPOP, MVT::v4i32, 3}, 307 {ISD::CTPOP, MVT::v8i16, 2}, 308 {ISD::CTPOP, MVT::v16i8, 1}, 309 {ISD::CTPOP, MVT::i64, 4}, 310 {ISD::CTPOP, MVT::v2i32, 3}, 311 {ISD::CTPOP, MVT::v4i16, 2}, 312 {ISD::CTPOP, MVT::v8i8, 1}, 313 {ISD::CTPOP, MVT::i32, 5}, 314 }; 315 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 316 MVT MTy = LT.second; 317 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) { 318 // Extra cost of +1 when illegal vector types are legalized by promoting 319 // the integer type. 320 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() != 321 RetTy->getScalarSizeInBits() 322 ? 1 323 : 0; 324 return LT.first * Entry->Cost + ExtraCost; 325 } 326 break; 327 } 328 default: 329 break; 330 } 331 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 332 } 333 334 /// The function will remove redundant reinterprets casting in the presence 335 /// of the control flow 336 static Optional<Instruction *> processPhiNode(InstCombiner &IC, 337 IntrinsicInst &II) { 338 SmallVector<Instruction *, 32> Worklist; 339 auto RequiredType = II.getType(); 340 341 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0)); 342 assert(PN && "Expected Phi Node!"); 343 344 // Don't create a new Phi unless we can remove the old one. 345 if (!PN->hasOneUse()) 346 return None; 347 348 for (Value *IncValPhi : PN->incoming_values()) { 349 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi); 350 if (!Reinterpret || 351 Reinterpret->getIntrinsicID() != 352 Intrinsic::aarch64_sve_convert_to_svbool || 353 RequiredType != Reinterpret->getArgOperand(0)->getType()) 354 return None; 355 } 356 357 // Create the new Phi 358 LLVMContext &Ctx = PN->getContext(); 359 IRBuilder<> Builder(Ctx); 360 Builder.SetInsertPoint(PN); 361 PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues()); 362 Worklist.push_back(PN); 363 364 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) { 365 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I)); 366 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I)); 367 Worklist.push_back(Reinterpret); 368 } 369 370 // Cleanup Phi Node and reinterprets 371 return IC.replaceInstUsesWith(II, NPN); 372 } 373 374 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC, 375 IntrinsicInst &II) { 376 // If the reinterpret instruction operand is a PHI Node 377 if (isa<PHINode>(II.getArgOperand(0))) 378 return processPhiNode(IC, II); 379 380 SmallVector<Instruction *, 32> CandidatesForRemoval; 381 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr; 382 383 const auto *IVTy = cast<VectorType>(II.getType()); 384 385 // Walk the chain of conversions. 386 while (Cursor) { 387 // If the type of the cursor has fewer lanes than the final result, zeroing 388 // must take place, which breaks the equivalence chain. 389 const auto *CursorVTy = cast<VectorType>(Cursor->getType()); 390 if (CursorVTy->getElementCount().getKnownMinValue() < 391 IVTy->getElementCount().getKnownMinValue()) 392 break; 393 394 // If the cursor has the same type as I, it is a viable replacement. 395 if (Cursor->getType() == IVTy) 396 EarliestReplacement = Cursor; 397 398 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor); 399 400 // If this is not an SVE conversion intrinsic, this is the end of the chain. 401 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() == 402 Intrinsic::aarch64_sve_convert_to_svbool || 403 IntrinsicCursor->getIntrinsicID() == 404 Intrinsic::aarch64_sve_convert_from_svbool)) 405 break; 406 407 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor); 408 Cursor = IntrinsicCursor->getOperand(0); 409 } 410 411 // If no viable replacement in the conversion chain was found, there is 412 // nothing to do. 413 if (!EarliestReplacement) 414 return None; 415 416 return IC.replaceInstUsesWith(II, EarliestReplacement); 417 } 418 419 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC, 420 IntrinsicInst &II) { 421 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 422 if (!Pg) 423 return None; 424 425 if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 426 return None; 427 428 const auto PTruePattern = 429 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 430 if (PTruePattern != AArch64SVEPredPattern::vl1) 431 return None; 432 433 // The intrinsic is inserting into lane zero so use an insert instead. 434 auto *IdxTy = Type::getInt64Ty(II.getContext()); 435 auto *Insert = InsertElementInst::Create( 436 II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0)); 437 Insert->insertBefore(&II); 438 Insert->takeName(&II); 439 440 return IC.replaceInstUsesWith(II, Insert); 441 } 442 443 static Optional<Instruction *> instCombineSVEDupX(InstCombiner &IC, 444 IntrinsicInst &II) { 445 // Replace DupX with a regular IR splat. 446 IRBuilder<> Builder(II.getContext()); 447 Builder.SetInsertPoint(&II); 448 auto *RetTy = cast<ScalableVectorType>(II.getType()); 449 Value *Splat = 450 Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0)); 451 Splat->takeName(&II); 452 return IC.replaceInstUsesWith(II, Splat); 453 } 454 455 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC, 456 IntrinsicInst &II) { 457 LLVMContext &Ctx = II.getContext(); 458 IRBuilder<> Builder(Ctx); 459 Builder.SetInsertPoint(&II); 460 461 // Check that the predicate is all active 462 auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 463 if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 464 return None; 465 466 const auto PTruePattern = 467 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 468 if (PTruePattern != AArch64SVEPredPattern::all) 469 return None; 470 471 // Check that we have a compare of zero.. 472 auto *SplatValue = 473 dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2))); 474 if (!SplatValue || !SplatValue->isZero()) 475 return None; 476 477 // ..against a dupq 478 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 479 if (!DupQLane || 480 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane) 481 return None; 482 483 // Where the dupq is a lane 0 replicate of a vector insert 484 if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero()) 485 return None; 486 487 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0)); 488 if (!VecIns || 489 VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert) 490 return None; 491 492 // Where the vector insert is a fixed constant vector insert into undef at 493 // index zero 494 if (!isa<UndefValue>(VecIns->getArgOperand(0))) 495 return None; 496 497 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero()) 498 return None; 499 500 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1)); 501 if (!ConstVec) 502 return None; 503 504 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType()); 505 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType()); 506 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements()) 507 return None; 508 509 unsigned NumElts = VecTy->getNumElements(); 510 unsigned PredicateBits = 0; 511 512 // Expand intrinsic operands to a 16-bit byte level predicate 513 for (unsigned I = 0; I < NumElts; ++I) { 514 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I)); 515 if (!Arg) 516 return None; 517 if (!Arg->isZero()) 518 PredicateBits |= 1 << (I * (16 / NumElts)); 519 } 520 521 // If all bits are zero bail early with an empty predicate 522 if (PredicateBits == 0) { 523 auto *PFalse = Constant::getNullValue(II.getType()); 524 PFalse->takeName(&II); 525 return IC.replaceInstUsesWith(II, PFalse); 526 } 527 528 // Calculate largest predicate type used (where byte predicate is largest) 529 unsigned Mask = 8; 530 for (unsigned I = 0; I < 16; ++I) 531 if ((PredicateBits & (1 << I)) != 0) 532 Mask |= (I % 8); 533 534 unsigned PredSize = Mask & -Mask; 535 auto *PredType = ScalableVectorType::get( 536 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8)); 537 538 // Ensure all relevant bits are set 539 for (unsigned I = 0; I < 16; I += PredSize) 540 if ((PredicateBits & (1 << I)) == 0) 541 return None; 542 543 auto *PTruePat = 544 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 545 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 546 {PredType}, {PTruePat}); 547 auto *ConvertToSVBool = Builder.CreateIntrinsic( 548 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue}); 549 auto *ConvertFromSVBool = 550 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, 551 {II.getType()}, {ConvertToSVBool}); 552 553 ConvertFromSVBool->takeName(&II); 554 return IC.replaceInstUsesWith(II, ConvertFromSVBool); 555 } 556 557 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC, 558 IntrinsicInst &II) { 559 IRBuilder<> Builder(II.getContext()); 560 Builder.SetInsertPoint(&II); 561 Value *Pg = II.getArgOperand(0); 562 Value *Vec = II.getArgOperand(1); 563 auto IntrinsicID = II.getIntrinsicID(); 564 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta; 565 566 // lastX(splat(X)) --> X 567 if (auto *SplatVal = getSplatValue(Vec)) 568 return IC.replaceInstUsesWith(II, SplatVal); 569 570 // If x and/or y is a splat value then: 571 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y)) 572 Value *LHS, *RHS; 573 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) { 574 if (isSplatValue(LHS) || isSplatValue(RHS)) { 575 auto *OldBinOp = cast<BinaryOperator>(Vec); 576 auto OpC = OldBinOp->getOpcode(); 577 auto *NewLHS = 578 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS}); 579 auto *NewRHS = 580 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS}); 581 auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags( 582 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II); 583 return IC.replaceInstUsesWith(II, NewBinOp); 584 } 585 } 586 587 auto *C = dyn_cast<Constant>(Pg); 588 if (IsAfter && C && C->isNullValue()) { 589 // The intrinsic is extracting lane 0 so use an extract instead. 590 auto *IdxTy = Type::getInt64Ty(II.getContext()); 591 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); 592 Extract->insertBefore(&II); 593 Extract->takeName(&II); 594 return IC.replaceInstUsesWith(II, Extract); 595 } 596 597 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg); 598 if (!IntrPG) 599 return None; 600 601 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 602 return None; 603 604 const auto PTruePattern = 605 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue(); 606 607 // Can the intrinsic's predicate be converted to a known constant index? 608 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern); 609 if (!MinNumElts) 610 return None; 611 612 unsigned Idx = MinNumElts - 1; 613 // Increment the index if extracting the element after the last active 614 // predicate element. 615 if (IsAfter) 616 ++Idx; 617 618 // Ignore extracts whose index is larger than the known minimum vector 619 // length. NOTE: This is an artificial constraint where we prefer to 620 // maintain what the user asked for until an alternative is proven faster. 621 auto *PgVTy = cast<ScalableVectorType>(Pg->getType()); 622 if (Idx >= PgVTy->getMinNumElements()) 623 return None; 624 625 // The intrinsic is extracting a fixed lane so use an extract instead. 626 auto *IdxTy = Type::getInt64Ty(II.getContext()); 627 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); 628 Extract->insertBefore(&II); 629 Extract->takeName(&II); 630 return IC.replaceInstUsesWith(II, Extract); 631 } 632 633 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC, 634 IntrinsicInst &II) { 635 LLVMContext &Ctx = II.getContext(); 636 IRBuilder<> Builder(Ctx); 637 Builder.SetInsertPoint(&II); 638 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr 639 // can work with RDFFR_PP for ptest elimination. 640 auto *AllPat = 641 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 642 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 643 {II.getType()}, {AllPat}); 644 auto *RDFFR = 645 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); 646 RDFFR->takeName(&II); 647 return IC.replaceInstUsesWith(II, RDFFR); 648 } 649 650 static Optional<Instruction *> 651 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) { 652 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue(); 653 654 if (Pattern == AArch64SVEPredPattern::all) { 655 LLVMContext &Ctx = II.getContext(); 656 IRBuilder<> Builder(Ctx); 657 Builder.SetInsertPoint(&II); 658 659 Constant *StepVal = ConstantInt::get(II.getType(), NumElts); 660 auto *VScale = Builder.CreateVScale(StepVal); 661 VScale->takeName(&II); 662 return IC.replaceInstUsesWith(II, VScale); 663 } 664 665 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern); 666 667 return MinNumElts && NumElts >= MinNumElts 668 ? Optional<Instruction *>(IC.replaceInstUsesWith( 669 II, ConstantInt::get(II.getType(), MinNumElts))) 670 : None; 671 } 672 673 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC, 674 IntrinsicInst &II) { 675 IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 676 IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 677 678 if (Op1 && Op2 && 679 Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 680 Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 681 Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) { 682 683 IRBuilder<> Builder(II.getContext()); 684 Builder.SetInsertPoint(&II); 685 686 Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)}; 687 Type *Tys[] = {Op1->getArgOperand(0)->getType()}; 688 689 auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); 690 691 PTest->takeName(&II); 692 return IC.replaceInstUsesWith(II, PTest); 693 } 694 695 return None; 696 } 697 698 static Optional<Instruction *> instCombineSVEVectorFMLA(InstCombiner &IC, 699 IntrinsicInst &II) { 700 // fold (fadd p a (fmul p b c)) -> (fma p a b c) 701 Value *p, *FMul, *a, *b, *c; 702 auto m_SVEFAdd = [](auto p, auto w, auto x) { 703 return m_CombineOr(m_Intrinsic<Intrinsic::aarch64_sve_fadd>(p, w, x), 704 m_Intrinsic<Intrinsic::aarch64_sve_fadd>(p, x, w)); 705 }; 706 auto m_SVEFMul = [](auto p, auto y, auto z) { 707 return m_Intrinsic<Intrinsic::aarch64_sve_fmul>(p, y, z); 708 }; 709 if (!match(&II, m_SVEFAdd(m_Value(p), m_Value(a), 710 m_CombineAnd(m_Value(FMul), 711 m_SVEFMul(m_Deferred(p), m_Value(b), 712 m_Value(c)))))) 713 return None; 714 715 if (!FMul->hasOneUse()) 716 return None; 717 718 llvm::FastMathFlags FAddFlags = II.getFastMathFlags(); 719 llvm::FastMathFlags FMulFlags = cast<CallInst>(FMul)->getFastMathFlags(); 720 // Don't combine when FMul & Fadd flags differ to prevent the loss of any 721 // additional important flags 722 if (FAddFlags != FMulFlags) 723 return None; 724 bool AllowReassoc = FAddFlags.allowReassoc() && FMulFlags.allowReassoc(); 725 bool AllowContract = FAddFlags.allowContract() && FMulFlags.allowContract(); 726 if (!AllowReassoc || !AllowContract) 727 return None; 728 729 IRBuilder<> Builder(II.getContext()); 730 Builder.SetInsertPoint(&II); 731 auto FMLA = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fmla, 732 {II.getType()}, {p, a, b, c}, &II); 733 FMLA->setFastMathFlags(FAddFlags); 734 return IC.replaceInstUsesWith(II, FMLA); 735 } 736 737 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) { 738 switch (Intrinsic) { 739 case Intrinsic::aarch64_sve_fmul: 740 return Instruction::BinaryOps::FMul; 741 case Intrinsic::aarch64_sve_fadd: 742 return Instruction::BinaryOps::FAdd; 743 case Intrinsic::aarch64_sve_fsub: 744 return Instruction::BinaryOps::FSub; 745 default: 746 return Instruction::BinaryOpsEnd; 747 } 748 } 749 750 static Optional<Instruction *> instCombineSVEVectorBinOp(InstCombiner &IC, 751 IntrinsicInst &II) { 752 auto *OpPredicate = II.getOperand(0); 753 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID()); 754 if (BinOpCode == Instruction::BinaryOpsEnd || 755 !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 756 m_ConstantInt<AArch64SVEPredPattern::all>()))) 757 return None; 758 IRBuilder<> Builder(II.getContext()); 759 Builder.SetInsertPoint(&II); 760 Builder.setFastMathFlags(II.getFastMathFlags()); 761 auto BinOp = 762 Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2)); 763 return IC.replaceInstUsesWith(II, BinOp); 764 } 765 766 static Optional<Instruction *> instCombineSVEVectorFAdd(InstCombiner &IC, 767 IntrinsicInst &II) { 768 auto FMLA = instCombineSVEVectorFMLA(IC, II); 769 if (FMLA) 770 return FMLA; 771 return instCombineSVEVectorBinOp(IC, II); 772 } 773 774 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC, 775 IntrinsicInst &II) { 776 auto *OpPredicate = II.getOperand(0); 777 auto *OpMultiplicand = II.getOperand(1); 778 auto *OpMultiplier = II.getOperand(2); 779 780 IRBuilder<> Builder(II.getContext()); 781 Builder.SetInsertPoint(&II); 782 783 // Return true if a given instruction is a unit splat value, false otherwise. 784 auto IsUnitSplat = [](auto *I) { 785 auto *SplatValue = getSplatValue(I); 786 if (!SplatValue) 787 return false; 788 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 789 }; 790 791 // Return true if a given instruction is an aarch64_sve_dup intrinsic call 792 // with a unit splat value, false otherwise. 793 auto IsUnitDup = [](auto *I) { 794 auto *IntrI = dyn_cast<IntrinsicInst>(I); 795 if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup) 796 return false; 797 798 auto *SplatValue = IntrI->getOperand(2); 799 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 800 }; 801 802 // The OpMultiplier variable should always point to the dup (if any), so 803 // swap if necessary. 804 if (IsUnitDup(OpMultiplicand) || IsUnitSplat(OpMultiplicand)) 805 std::swap(OpMultiplier, OpMultiplicand); 806 807 if (IsUnitSplat(OpMultiplier)) { 808 // [f]mul pg (dupx 1) %n => %n 809 OpMultiplicand->takeName(&II); 810 return IC.replaceInstUsesWith(II, OpMultiplicand); 811 } else if (IsUnitDup(OpMultiplier)) { 812 // [f]mul pg (dup pg 1) %n => %n 813 auto *DupInst = cast<IntrinsicInst>(OpMultiplier); 814 auto *DupPg = DupInst->getOperand(1); 815 // TODO: this is naive. The optimization is still valid if DupPg 816 // 'encompasses' OpPredicate, not only if they're the same predicate. 817 if (OpPredicate == DupPg) { 818 OpMultiplicand->takeName(&II); 819 return IC.replaceInstUsesWith(II, OpMultiplicand); 820 } 821 } 822 823 return instCombineSVEVectorBinOp(IC, II); 824 } 825 826 static Optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC, 827 IntrinsicInst &II) { 828 IRBuilder<> Builder(II.getContext()); 829 Builder.SetInsertPoint(&II); 830 Value *UnpackArg = II.getArgOperand(0); 831 auto *RetTy = cast<ScalableVectorType>(II.getType()); 832 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi || 833 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo; 834 835 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X)) 836 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X)) 837 if (auto *ScalarArg = getSplatValue(UnpackArg)) { 838 ScalarArg = 839 Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned); 840 Value *NewVal = 841 Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg); 842 NewVal->takeName(&II); 843 return IC.replaceInstUsesWith(II, NewVal); 844 } 845 846 return None; 847 } 848 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC, 849 IntrinsicInst &II) { 850 auto *OpVal = II.getOperand(0); 851 auto *OpIndices = II.getOperand(1); 852 VectorType *VTy = cast<VectorType>(II.getType()); 853 854 // Check whether OpIndices is a constant splat value < minimal element count 855 // of result. 856 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices)); 857 if (!SplatValue || 858 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue())) 859 return None; 860 861 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to 862 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization. 863 IRBuilder<> Builder(II.getContext()); 864 Builder.SetInsertPoint(&II); 865 auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue); 866 auto *VectorSplat = 867 Builder.CreateVectorSplat(VTy->getElementCount(), Extract); 868 869 VectorSplat->takeName(&II); 870 return IC.replaceInstUsesWith(II, VectorSplat); 871 } 872 873 static Optional<Instruction *> instCombineSVETupleGet(InstCombiner &IC, 874 IntrinsicInst &II) { 875 // Try to remove sequences of tuple get/set. 876 Value *SetTuple, *SetIndex, *SetValue; 877 auto *GetTuple = II.getArgOperand(0); 878 auto *GetIndex = II.getArgOperand(1); 879 // Check that we have tuple_get(GetTuple, GetIndex) where GetTuple is a 880 // call to tuple_set i.e. tuple_set(SetTuple, SetIndex, SetValue). 881 // Make sure that the types of the current intrinsic and SetValue match 882 // in order to safely remove the sequence. 883 if (!match(GetTuple, 884 m_Intrinsic<Intrinsic::aarch64_sve_tuple_set>( 885 m_Value(SetTuple), m_Value(SetIndex), m_Value(SetValue))) || 886 SetValue->getType() != II.getType()) 887 return None; 888 // Case where we get the same index right after setting it. 889 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) --> SetValue 890 if (GetIndex == SetIndex) 891 return IC.replaceInstUsesWith(II, SetValue); 892 // If we are getting a different index than what was set in the tuple_set 893 // intrinsic. We can just set the input tuple to the one up in the chain. 894 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) 895 // --> tuple_get(SetTuple, GetIndex) 896 return IC.replaceOperand(II, 0, SetTuple); 897 } 898 899 static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC, 900 IntrinsicInst &II) { 901 // zip1(uzp1(A, B), uzp2(A, B)) --> A 902 // zip2(uzp1(A, B), uzp2(A, B)) --> B 903 Value *A, *B; 904 if (match(II.getArgOperand(0), 905 m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) && 906 match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>( 907 m_Specific(A), m_Specific(B)))) 908 return IC.replaceInstUsesWith( 909 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B)); 910 911 return None; 912 } 913 914 Optional<Instruction *> 915 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, 916 IntrinsicInst &II) const { 917 Intrinsic::ID IID = II.getIntrinsicID(); 918 switch (IID) { 919 default: 920 break; 921 case Intrinsic::aarch64_sve_convert_from_svbool: 922 return instCombineConvertFromSVBool(IC, II); 923 case Intrinsic::aarch64_sve_dup: 924 return instCombineSVEDup(IC, II); 925 case Intrinsic::aarch64_sve_dup_x: 926 return instCombineSVEDupX(IC, II); 927 case Intrinsic::aarch64_sve_cmpne: 928 case Intrinsic::aarch64_sve_cmpne_wide: 929 return instCombineSVECmpNE(IC, II); 930 case Intrinsic::aarch64_sve_rdffr: 931 return instCombineRDFFR(IC, II); 932 case Intrinsic::aarch64_sve_lasta: 933 case Intrinsic::aarch64_sve_lastb: 934 return instCombineSVELast(IC, II); 935 case Intrinsic::aarch64_sve_cntd: 936 return instCombineSVECntElts(IC, II, 2); 937 case Intrinsic::aarch64_sve_cntw: 938 return instCombineSVECntElts(IC, II, 4); 939 case Intrinsic::aarch64_sve_cnth: 940 return instCombineSVECntElts(IC, II, 8); 941 case Intrinsic::aarch64_sve_cntb: 942 return instCombineSVECntElts(IC, II, 16); 943 case Intrinsic::aarch64_sve_ptest_any: 944 case Intrinsic::aarch64_sve_ptest_first: 945 case Intrinsic::aarch64_sve_ptest_last: 946 return instCombineSVEPTest(IC, II); 947 case Intrinsic::aarch64_sve_mul: 948 case Intrinsic::aarch64_sve_fmul: 949 return instCombineSVEVectorMul(IC, II); 950 case Intrinsic::aarch64_sve_fadd: 951 return instCombineSVEVectorFAdd(IC, II); 952 case Intrinsic::aarch64_sve_fsub: 953 return instCombineSVEVectorBinOp(IC, II); 954 case Intrinsic::aarch64_sve_tbl: 955 return instCombineSVETBL(IC, II); 956 case Intrinsic::aarch64_sve_uunpkhi: 957 case Intrinsic::aarch64_sve_uunpklo: 958 case Intrinsic::aarch64_sve_sunpkhi: 959 case Intrinsic::aarch64_sve_sunpklo: 960 return instCombineSVEUnpack(IC, II); 961 case Intrinsic::aarch64_sve_tuple_get: 962 return instCombineSVETupleGet(IC, II); 963 case Intrinsic::aarch64_sve_zip1: 964 case Intrinsic::aarch64_sve_zip2: 965 return instCombineSVEZip(IC, II); 966 } 967 968 return None; 969 } 970 971 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 972 ArrayRef<const Value *> Args) { 973 974 // A helper that returns a vector type from the given type. The number of 975 // elements in type Ty determine the vector width. 976 auto toVectorTy = [&](Type *ArgTy) { 977 return VectorType::get(ArgTy->getScalarType(), 978 cast<VectorType>(DstTy)->getElementCount()); 979 }; 980 981 // Exit early if DstTy is not a vector type whose elements are at least 982 // 16-bits wide. 983 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 984 return false; 985 986 // Determine if the operation has a widening variant. We consider both the 987 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 988 // instructions. 989 // 990 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we 991 // verify that their extending operands are eliminated during code 992 // generation. 993 switch (Opcode) { 994 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 995 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 996 break; 997 default: 998 return false; 999 } 1000 1001 // To be a widening instruction (either the "wide" or "long" versions), the 1002 // second operand must be a sign- or zero extend having a single user. We 1003 // only consider extends having a single user because they may otherwise not 1004 // be eliminated. 1005 if (Args.size() != 2 || 1006 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) || 1007 !Args[1]->hasOneUse()) 1008 return false; 1009 auto *Extend = cast<CastInst>(Args[1]); 1010 1011 // Legalize the destination type and ensure it can be used in a widening 1012 // operation. 1013 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 1014 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 1015 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 1016 return false; 1017 1018 // Legalize the source type and ensure it can be used in a widening 1019 // operation. 1020 auto *SrcTy = toVectorTy(Extend->getSrcTy()); 1021 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 1022 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 1023 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 1024 return false; 1025 1026 // Get the total number of vector elements in the legalized types. 1027 InstructionCost NumDstEls = 1028 DstTyL.first * DstTyL.second.getVectorMinNumElements(); 1029 InstructionCost NumSrcEls = 1030 SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 1031 1032 // Return true if the legalized types have the same number of vector elements 1033 // and the destination element type size is twice that of the source type. 1034 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 1035 } 1036 1037 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1038 Type *Src, 1039 TTI::CastContextHint CCH, 1040 TTI::TargetCostKind CostKind, 1041 const Instruction *I) { 1042 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1043 assert(ISD && "Invalid opcode"); 1044 1045 // If the cast is observable, and it is used by a widening instruction (e.g., 1046 // uaddl, saddw, etc.), it may be free. 1047 if (I && I->hasOneUse()) { 1048 auto *SingleUser = cast<Instruction>(*I->user_begin()); 1049 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 1050 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 1051 // If the cast is the second operand, it is free. We will generate either 1052 // a "wide" or "long" version of the widening instruction. 1053 if (I == SingleUser->getOperand(1)) 1054 return 0; 1055 // If the cast is not the second operand, it will be free if it looks the 1056 // same as the second operand. In this case, we will generate a "long" 1057 // version of the widening instruction. 1058 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 1059 if (I->getOpcode() == unsigned(Cast->getOpcode()) && 1060 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 1061 return 0; 1062 } 1063 } 1064 1065 // TODO: Allow non-throughput costs that aren't binary. 1066 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1067 if (CostKind != TTI::TCK_RecipThroughput) 1068 return Cost == 0 ? 0 : 1; 1069 return Cost; 1070 }; 1071 1072 EVT SrcTy = TLI->getValueType(DL, Src); 1073 EVT DstTy = TLI->getValueType(DL, Dst); 1074 1075 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1076 return AdjustCost( 1077 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1078 1079 static const TypeConversionCostTblEntry 1080 ConversionTbl[] = { 1081 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1082 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 1083 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1084 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 1085 1086 // Truncations on nxvmiN 1087 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, 1088 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, 1089 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, 1090 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, 1091 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, 1092 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, 1093 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, 1094 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, 1095 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, 1096 { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 }, 1097 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, 1098 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, 1099 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, 1100 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, 1101 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, 1102 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, 1103 1104 // The number of shll instructions for the extension. 1105 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1106 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1107 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1108 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1109 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1110 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1111 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1112 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1113 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1114 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1115 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1116 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1117 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1118 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1119 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1120 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1121 1122 // LowerVectorINT_TO_FP: 1123 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1124 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1125 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1126 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1127 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1128 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1129 1130 // Complex: to v2f32 1131 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1132 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1133 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1134 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1135 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1136 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1137 1138 // Complex: to v4f32 1139 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 1140 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1141 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1142 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1143 1144 // Complex: to v8f32 1145 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1146 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1147 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1148 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1149 1150 // Complex: to v16f32 1151 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1152 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1153 1154 // Complex: to v2f64 1155 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1156 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1157 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1158 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1159 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1160 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1161 1162 1163 // LowerVectorFP_TO_INT 1164 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 1165 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 1166 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1167 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1168 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1169 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1170 1171 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 1172 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 1173 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 1174 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 1175 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 1176 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 1177 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 1178 1179 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 1180 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 1181 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 1182 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 1183 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 1184 1185 // Complex, from nxv2f32. 1186 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1187 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1188 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1189 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1190 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1191 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1192 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1193 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1194 1195 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 1196 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 1197 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 1198 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 1199 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 1200 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 1201 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 1202 1203 // Complex, from nxv2f64. 1204 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1205 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1206 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1207 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1208 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1209 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1210 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1211 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1212 1213 // Complex, from nxv4f32. 1214 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1215 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1216 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1217 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1218 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1219 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1220 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1221 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1222 1223 // Complex, from nxv8f64. Illegal -> illegal conversions not required. 1224 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1225 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1226 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1227 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1228 1229 // Complex, from nxv4f64. Illegal -> illegal conversions not required. 1230 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1231 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1232 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1233 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1234 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1235 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1236 1237 // Complex, from nxv8f32. Illegal -> illegal conversions not required. 1238 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1239 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1240 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1241 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1242 1243 // Complex, from nxv8f16. 1244 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1245 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1246 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1247 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1248 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1249 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1250 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1251 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1252 1253 // Complex, from nxv4f16. 1254 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1255 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1256 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1257 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1258 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1259 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1260 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1261 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1262 1263 // Complex, from nxv2f16. 1264 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1265 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1266 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1267 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1268 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1269 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1270 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1271 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1272 1273 // Truncate from nxvmf32 to nxvmf16. 1274 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, 1275 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, 1276 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, 1277 1278 // Truncate from nxvmf64 to nxvmf16. 1279 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, 1280 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, 1281 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, 1282 1283 // Truncate from nxvmf64 to nxvmf32. 1284 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, 1285 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, 1286 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, 1287 1288 // Extend from nxvmf16 to nxvmf32. 1289 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, 1290 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, 1291 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, 1292 1293 // Extend from nxvmf16 to nxvmf64. 1294 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, 1295 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, 1296 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, 1297 1298 // Extend from nxvmf32 to nxvmf64. 1299 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, 1300 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, 1301 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, 1302 1303 }; 1304 1305 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 1306 DstTy.getSimpleVT(), 1307 SrcTy.getSimpleVT())) 1308 return AdjustCost(Entry->Cost); 1309 1310 return AdjustCost( 1311 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1312 } 1313 1314 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, 1315 Type *Dst, 1316 VectorType *VecTy, 1317 unsigned Index) { 1318 1319 // Make sure we were given a valid extend opcode. 1320 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 1321 "Invalid opcode"); 1322 1323 // We are extending an element we extract from a vector, so the source type 1324 // of the extend is the element type of the vector. 1325 auto *Src = VecTy->getElementType(); 1326 1327 // Sign- and zero-extends are for integer types only. 1328 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 1329 1330 // Get the cost for the extract. We compute the cost (if any) for the extend 1331 // below. 1332 InstructionCost Cost = 1333 getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 1334 1335 // Legalize the types. 1336 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 1337 auto DstVT = TLI->getValueType(DL, Dst); 1338 auto SrcVT = TLI->getValueType(DL, Src); 1339 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1340 1341 // If the resulting type is still a vector and the destination type is legal, 1342 // we may get the extension for free. If not, get the default cost for the 1343 // extend. 1344 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 1345 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1346 CostKind); 1347 1348 // The destination type should be larger than the element type. If not, get 1349 // the default cost for the extend. 1350 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 1351 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1352 CostKind); 1353 1354 switch (Opcode) { 1355 default: 1356 llvm_unreachable("Opcode should be either SExt or ZExt"); 1357 1358 // For sign-extends, we only need a smov, which performs the extension 1359 // automatically. 1360 case Instruction::SExt: 1361 return Cost; 1362 1363 // For zero-extends, the extend is performed automatically by a umov unless 1364 // the destination type is i64 and the element type is i8 or i16. 1365 case Instruction::ZExt: 1366 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 1367 return Cost; 1368 } 1369 1370 // If we are unable to perform the extend for free, get the default cost. 1371 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1372 CostKind); 1373 } 1374 1375 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 1376 TTI::TargetCostKind CostKind, 1377 const Instruction *I) { 1378 if (CostKind != TTI::TCK_RecipThroughput) 1379 return Opcode == Instruction::PHI ? 0 : 1; 1380 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 1381 // Branches are assumed to be predicted. 1382 return 0; 1383 } 1384 1385 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 1386 unsigned Index) { 1387 assert(Val->isVectorTy() && "This must be a vector type"); 1388 1389 if (Index != -1U) { 1390 // Legalize the type. 1391 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1392 1393 // This type is legalized to a scalar type. 1394 if (!LT.second.isVector()) 1395 return 0; 1396 1397 // The type may be split. Normalize the index to the new type. 1398 unsigned Width = LT.second.getVectorNumElements(); 1399 Index = Index % Width; 1400 1401 // The element at index zero is already inside the vector. 1402 if (Index == 0) 1403 return 0; 1404 } 1405 1406 // All other insert/extracts cost this much. 1407 return ST->getVectorInsertExtractBaseCost(); 1408 } 1409 1410 InstructionCost AArch64TTIImpl::getArithmeticInstrCost( 1411 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 1412 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, 1413 TTI::OperandValueProperties Opd1PropInfo, 1414 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 1415 const Instruction *CxtI) { 1416 // TODO: Handle more cost kinds. 1417 if (CostKind != TTI::TCK_RecipThroughput) 1418 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1419 Opd2Info, Opd1PropInfo, 1420 Opd2PropInfo, Args, CxtI); 1421 1422 // Legalize the type. 1423 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1424 1425 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), 1426 // add in the widening overhead specified by the sub-target. Since the 1427 // extends feeding widening instructions are performed automatically, they 1428 // aren't present in the generated code and have a zero cost. By adding a 1429 // widening overhead here, we attach the total cost of the combined operation 1430 // to the widening instruction. 1431 InstructionCost Cost = 0; 1432 if (isWideningInstruction(Ty, Opcode, Args)) 1433 Cost += ST->getWideningBaseCost(); 1434 1435 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1436 1437 switch (ISD) { 1438 default: 1439 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1440 Opd2Info, 1441 Opd1PropInfo, Opd2PropInfo); 1442 case ISD::SDIV: 1443 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 1444 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 1445 // On AArch64, scalar signed division by constants power-of-two are 1446 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 1447 // The OperandValue properties many not be same as that of previous 1448 // operation; conservatively assume OP_None. 1449 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, 1450 Opd1Info, Opd2Info, 1451 TargetTransformInfo::OP_None, 1452 TargetTransformInfo::OP_None); 1453 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, 1454 Opd1Info, Opd2Info, 1455 TargetTransformInfo::OP_None, 1456 TargetTransformInfo::OP_None); 1457 Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind, 1458 Opd1Info, Opd2Info, 1459 TargetTransformInfo::OP_None, 1460 TargetTransformInfo::OP_None); 1461 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 1462 Opd1Info, Opd2Info, 1463 TargetTransformInfo::OP_None, 1464 TargetTransformInfo::OP_None); 1465 return Cost; 1466 } 1467 LLVM_FALLTHROUGH; 1468 case ISD::UDIV: 1469 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { 1470 auto VT = TLI->getValueType(DL, Ty); 1471 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 1472 // Vector signed division by constant are expanded to the 1473 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 1474 // to MULHS + SUB + SRL + ADD + SRL. 1475 InstructionCost MulCost = getArithmeticInstrCost( 1476 Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info, 1477 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1478 InstructionCost AddCost = getArithmeticInstrCost( 1479 Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, 1480 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1481 InstructionCost ShrCost = getArithmeticInstrCost( 1482 Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, 1483 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1484 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 1485 } 1486 } 1487 1488 Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1489 Opd2Info, 1490 Opd1PropInfo, Opd2PropInfo); 1491 if (Ty->isVectorTy()) { 1492 // On AArch64, vector divisions are not supported natively and are 1493 // expanded into scalar divisions of each pair of elements. 1494 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, 1495 Opd1Info, Opd2Info, Opd1PropInfo, 1496 Opd2PropInfo); 1497 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 1498 Opd1Info, Opd2Info, Opd1PropInfo, 1499 Opd2PropInfo); 1500 // TODO: if one of the arguments is scalar, then it's not necessary to 1501 // double the cost of handling the vector elements. 1502 Cost += Cost; 1503 } 1504 return Cost; 1505 1506 case ISD::MUL: 1507 if (LT.second != MVT::v2i64) 1508 return (Cost + 1) * LT.first; 1509 // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive 1510 // as elements are extracted from the vectors and the muls scalarized. 1511 // As getScalarizationOverhead is a bit too pessimistic, we estimate the 1512 // cost for a i64 vector directly here, which is: 1513 // - four i64 extracts, 1514 // - two i64 inserts, and 1515 // - two muls. 1516 // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with 1517 // LT.first = 2 the cost is 16. 1518 return LT.first * 8; 1519 case ISD::ADD: 1520 case ISD::XOR: 1521 case ISD::OR: 1522 case ISD::AND: 1523 // These nodes are marked as 'custom' for combining purposes only. 1524 // We know that they are legal. See LowerAdd in ISelLowering. 1525 return (Cost + 1) * LT.first; 1526 1527 case ISD::FADD: 1528 case ISD::FSUB: 1529 case ISD::FMUL: 1530 case ISD::FDIV: 1531 case ISD::FNEG: 1532 // These nodes are marked as 'custom' just to lower them to SVE. 1533 // We know said lowering will incur no additional cost. 1534 if (!Ty->getScalarType()->isFP128Ty()) 1535 return (Cost + 2) * LT.first; 1536 1537 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1538 Opd2Info, 1539 Opd1PropInfo, Opd2PropInfo); 1540 } 1541 } 1542 1543 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, 1544 ScalarEvolution *SE, 1545 const SCEV *Ptr) { 1546 // Address computations in vectorized code with non-consecutive addresses will 1547 // likely result in more instructions compared to scalar code where the 1548 // computation can more often be merged into the index mode. The resulting 1549 // extra micro-ops can significantly decrease throughput. 1550 unsigned NumVectorInstToHideOverhead = 10; 1551 int MaxMergeDistance = 64; 1552 1553 if (Ty->isVectorTy() && SE && 1554 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 1555 return NumVectorInstToHideOverhead; 1556 1557 // In many cases the address computation is not merged into the instruction 1558 // addressing mode. 1559 return 1; 1560 } 1561 1562 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 1563 Type *CondTy, 1564 CmpInst::Predicate VecPred, 1565 TTI::TargetCostKind CostKind, 1566 const Instruction *I) { 1567 // TODO: Handle other cost kinds. 1568 if (CostKind != TTI::TCK_RecipThroughput) 1569 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 1570 I); 1571 1572 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1573 // We don't lower some vector selects well that are wider than the register 1574 // width. 1575 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 1576 // We would need this many instructions to hide the scalarization happening. 1577 const int AmortizationCost = 20; 1578 1579 // If VecPred is not set, check if we can get a predicate from the context 1580 // instruction, if its type matches the requested ValTy. 1581 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 1582 CmpInst::Predicate CurrentPred; 1583 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 1584 m_Value()))) 1585 VecPred = CurrentPred; 1586 } 1587 // Check if we have a compare/select chain that can be lowered using CMxx & 1588 // BFI pair. 1589 if (CmpInst::isIntPredicate(VecPred)) { 1590 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 1591 MVT::v8i16, MVT::v2i32, MVT::v4i32, 1592 MVT::v2i64}; 1593 auto LT = TLI->getTypeLegalizationCost(DL, ValTy); 1594 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 1595 return LT.first; 1596 } 1597 1598 static const TypeConversionCostTblEntry 1599 VectorSelectTbl[] = { 1600 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 1601 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 1602 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 1603 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 1604 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 1605 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 1606 }; 1607 1608 EVT SelCondTy = TLI->getValueType(DL, CondTy); 1609 EVT SelValTy = TLI->getValueType(DL, ValTy); 1610 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 1611 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 1612 SelCondTy.getSimpleVT(), 1613 SelValTy.getSimpleVT())) 1614 return Entry->Cost; 1615 } 1616 } 1617 // The base case handles scalable vectors fine for now, since it treats the 1618 // cost as 1 * legalization cost. 1619 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 1620 } 1621 1622 AArch64TTIImpl::TTI::MemCmpExpansionOptions 1623 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 1624 TTI::MemCmpExpansionOptions Options; 1625 if (ST->requiresStrictAlign()) { 1626 // TODO: Add cost modeling for strict align. Misaligned loads expand to 1627 // a bunch of instructions when strict align is enabled. 1628 return Options; 1629 } 1630 Options.AllowOverlappingLoads = true; 1631 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 1632 Options.NumLoadsPerBlock = Options.MaxNumLoads; 1633 // TODO: Though vector loads usually perform well on AArch64, in some targets 1634 // they may wake up the FP unit, which raises the power consumption. Perhaps 1635 // they could be used with no holds barred (-O3). 1636 Options.LoadSizes = {8, 4, 2, 1}; 1637 return Options; 1638 } 1639 1640 InstructionCost 1641 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 1642 Align Alignment, unsigned AddressSpace, 1643 TTI::TargetCostKind CostKind) { 1644 if (!isa<ScalableVectorType>(Src)) 1645 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1646 CostKind); 1647 auto LT = TLI->getTypeLegalizationCost(DL, Src); 1648 if (!LT.first.isValid()) 1649 return InstructionCost::getInvalid(); 1650 1651 // The code-generator is currently not able to handle scalable vectors 1652 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 1653 // it. This change will be removed when code-generation for these types is 1654 // sufficiently reliable. 1655 if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1)) 1656 return InstructionCost::getInvalid(); 1657 1658 return LT.first * 2; 1659 } 1660 1661 InstructionCost AArch64TTIImpl::getGatherScatterOpCost( 1662 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1663 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 1664 if (useNeonVector(DataTy)) 1665 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1666 Alignment, CostKind, I); 1667 auto *VT = cast<VectorType>(DataTy); 1668 auto LT = TLI->getTypeLegalizationCost(DL, DataTy); 1669 if (!LT.first.isValid()) 1670 return InstructionCost::getInvalid(); 1671 1672 // The code-generator is currently not able to handle scalable vectors 1673 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 1674 // it. This change will be removed when code-generation for these types is 1675 // sufficiently reliable. 1676 if (cast<VectorType>(DataTy)->getElementCount() == 1677 ElementCount::getScalable(1)) 1678 return InstructionCost::getInvalid(); 1679 1680 ElementCount LegalVF = LT.second.getVectorElementCount(); 1681 InstructionCost MemOpCost = 1682 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); 1683 return LT.first * MemOpCost * getMaxNumElements(LegalVF); 1684 } 1685 1686 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 1687 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 1688 } 1689 1690 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 1691 MaybeAlign Alignment, 1692 unsigned AddressSpace, 1693 TTI::TargetCostKind CostKind, 1694 const Instruction *I) { 1695 EVT VT = TLI->getValueType(DL, Ty, true); 1696 // Type legalization can't handle structs 1697 if (VT == MVT::Other) 1698 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 1699 CostKind); 1700 1701 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 1702 if (!LT.first.isValid()) 1703 return InstructionCost::getInvalid(); 1704 1705 // The code-generator is currently not able to handle scalable vectors 1706 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 1707 // it. This change will be removed when code-generation for these types is 1708 // sufficiently reliable. 1709 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty)) 1710 if (VTy->getElementCount() == ElementCount::getScalable(1)) 1711 return InstructionCost::getInvalid(); 1712 1713 // TODO: consider latency as well for TCK_SizeAndLatency. 1714 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 1715 return LT.first; 1716 1717 if (CostKind != TTI::TCK_RecipThroughput) 1718 return 1; 1719 1720 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 1721 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 1722 // Unaligned stores are extremely inefficient. We don't split all 1723 // unaligned 128-bit stores because the negative impact that has shown in 1724 // practice on inlined block copy code. 1725 // We make such stores expensive so that we will only vectorize if there 1726 // are 6 other instructions getting vectorized. 1727 const int AmortizationCost = 6; 1728 1729 return LT.first * 2 * AmortizationCost; 1730 } 1731 1732 // Check truncating stores and extending loads. 1733 if (useNeonVector(Ty) && 1734 Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) { 1735 // v4i8 types are lowered to scalar a load/store and sshll/xtn. 1736 if (VT == MVT::v4i8) 1737 return 2; 1738 // Otherwise we need to scalarize. 1739 return cast<FixedVectorType>(Ty)->getNumElements() * 2; 1740 } 1741 1742 return LT.first; 1743 } 1744 1745 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( 1746 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1747 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1748 bool UseMaskForCond, bool UseMaskForGaps) { 1749 assert(Factor >= 2 && "Invalid interleave factor"); 1750 auto *VecVTy = cast<FixedVectorType>(VecTy); 1751 1752 if (!UseMaskForCond && !UseMaskForGaps && 1753 Factor <= TLI->getMaxSupportedInterleaveFactor()) { 1754 unsigned NumElts = VecVTy->getNumElements(); 1755 auto *SubVecTy = 1756 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1757 1758 // ldN/stN only support legal vector types of size 64 or 128 in bits. 1759 // Accesses having vector types that are a multiple of 128 bits can be 1760 // matched to more than one ldN/stN instruction. 1761 if (NumElts % Factor == 0 && 1762 TLI->isLegalInterleavedAccessType(SubVecTy, DL)) 1763 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1764 } 1765 1766 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1767 Alignment, AddressSpace, CostKind, 1768 UseMaskForCond, UseMaskForGaps); 1769 } 1770 1771 InstructionCost 1772 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 1773 InstructionCost Cost = 0; 1774 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1775 for (auto *I : Tys) { 1776 if (!I->isVectorTy()) 1777 continue; 1778 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 1779 128) 1780 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 1781 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 1782 } 1783 return Cost; 1784 } 1785 1786 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 1787 return ST->getMaxInterleaveFactor(); 1788 } 1789 1790 // For Falkor, we want to avoid having too many strided loads in a loop since 1791 // that can exhaust the HW prefetcher resources. We adjust the unroller 1792 // MaxCount preference below to attempt to ensure unrolling doesn't create too 1793 // many strided loads. 1794 static void 1795 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1796 TargetTransformInfo::UnrollingPreferences &UP) { 1797 enum { MaxStridedLoads = 7 }; 1798 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 1799 int StridedLoads = 0; 1800 // FIXME? We could make this more precise by looking at the CFG and 1801 // e.g. not counting loads in each side of an if-then-else diamond. 1802 for (const auto BB : L->blocks()) { 1803 for (auto &I : *BB) { 1804 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 1805 if (!LMemI) 1806 continue; 1807 1808 Value *PtrValue = LMemI->getPointerOperand(); 1809 if (L->isLoopInvariant(PtrValue)) 1810 continue; 1811 1812 const SCEV *LSCEV = SE.getSCEV(PtrValue); 1813 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 1814 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 1815 continue; 1816 1817 // FIXME? We could take pairing of unrolled load copies into account 1818 // by looking at the AddRec, but we would probably have to limit this 1819 // to loops with no stores or other memory optimization barriers. 1820 ++StridedLoads; 1821 // We've seen enough strided loads that seeing more won't make a 1822 // difference. 1823 if (StridedLoads > MaxStridedLoads / 2) 1824 return StridedLoads; 1825 } 1826 } 1827 return StridedLoads; 1828 }; 1829 1830 int StridedLoads = countStridedLoads(L, SE); 1831 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 1832 << " strided loads\n"); 1833 // Pick the largest power of 2 unroll count that won't result in too many 1834 // strided loads. 1835 if (StridedLoads) { 1836 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 1837 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 1838 << UP.MaxCount << '\n'); 1839 } 1840 } 1841 1842 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1843 TTI::UnrollingPreferences &UP, 1844 OptimizationRemarkEmitter *ORE) { 1845 // Enable partial unrolling and runtime unrolling. 1846 BaseT::getUnrollingPreferences(L, SE, UP, ORE); 1847 1848 UP.UpperBound = true; 1849 1850 // For inner loop, it is more likely to be a hot one, and the runtime check 1851 // can be promoted out from LICM pass, so the overhead is less, let's try 1852 // a larger threshold to unroll more loops. 1853 if (L->getLoopDepth() > 1) 1854 UP.PartialThreshold *= 2; 1855 1856 // Disable partial & runtime unrolling on -Os. 1857 UP.PartialOptSizeThreshold = 0; 1858 1859 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 1860 EnableFalkorHWPFUnrollFix) 1861 getFalkorUnrollingPreferences(L, SE, UP); 1862 1863 // Scan the loop: don't unroll loops with calls as this could prevent 1864 // inlining. Don't unroll vector loops either, as they don't benefit much from 1865 // unrolling. 1866 for (auto *BB : L->getBlocks()) { 1867 for (auto &I : *BB) { 1868 // Don't unroll vectorised loop. 1869 if (I.getType()->isVectorTy()) 1870 return; 1871 1872 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 1873 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 1874 if (!isLoweredToCall(F)) 1875 continue; 1876 } 1877 return; 1878 } 1879 } 1880 } 1881 1882 // Enable runtime unrolling for in-order models 1883 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by 1884 // checking for that case, we can ensure that the default behaviour is 1885 // unchanged 1886 if (ST->getProcFamily() != AArch64Subtarget::Others && 1887 !ST->getSchedModel().isOutOfOrder()) { 1888 UP.Runtime = true; 1889 UP.Partial = true; 1890 UP.UnrollRemainder = true; 1891 UP.DefaultUnrollRuntimeCount = 4; 1892 1893 UP.UnrollAndJam = true; 1894 UP.UnrollAndJamInnerLoopThreshold = 60; 1895 } 1896 } 1897 1898 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1899 TTI::PeelingPreferences &PP) { 1900 BaseT::getPeelingPreferences(L, SE, PP); 1901 } 1902 1903 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 1904 Type *ExpectedType) { 1905 switch (Inst->getIntrinsicID()) { 1906 default: 1907 return nullptr; 1908 case Intrinsic::aarch64_neon_st2: 1909 case Intrinsic::aarch64_neon_st3: 1910 case Intrinsic::aarch64_neon_st4: { 1911 // Create a struct type 1912 StructType *ST = dyn_cast<StructType>(ExpectedType); 1913 if (!ST) 1914 return nullptr; 1915 unsigned NumElts = Inst->arg_size() - 1; 1916 if (ST->getNumElements() != NumElts) 1917 return nullptr; 1918 for (unsigned i = 0, e = NumElts; i != e; ++i) { 1919 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 1920 return nullptr; 1921 } 1922 Value *Res = UndefValue::get(ExpectedType); 1923 IRBuilder<> Builder(Inst); 1924 for (unsigned i = 0, e = NumElts; i != e; ++i) { 1925 Value *L = Inst->getArgOperand(i); 1926 Res = Builder.CreateInsertValue(Res, L, i); 1927 } 1928 return Res; 1929 } 1930 case Intrinsic::aarch64_neon_ld2: 1931 case Intrinsic::aarch64_neon_ld3: 1932 case Intrinsic::aarch64_neon_ld4: 1933 if (Inst->getType() == ExpectedType) 1934 return Inst; 1935 return nullptr; 1936 } 1937 } 1938 1939 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 1940 MemIntrinsicInfo &Info) { 1941 switch (Inst->getIntrinsicID()) { 1942 default: 1943 break; 1944 case Intrinsic::aarch64_neon_ld2: 1945 case Intrinsic::aarch64_neon_ld3: 1946 case Intrinsic::aarch64_neon_ld4: 1947 Info.ReadMem = true; 1948 Info.WriteMem = false; 1949 Info.PtrVal = Inst->getArgOperand(0); 1950 break; 1951 case Intrinsic::aarch64_neon_st2: 1952 case Intrinsic::aarch64_neon_st3: 1953 case Intrinsic::aarch64_neon_st4: 1954 Info.ReadMem = false; 1955 Info.WriteMem = true; 1956 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1); 1957 break; 1958 } 1959 1960 switch (Inst->getIntrinsicID()) { 1961 default: 1962 return false; 1963 case Intrinsic::aarch64_neon_ld2: 1964 case Intrinsic::aarch64_neon_st2: 1965 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 1966 break; 1967 case Intrinsic::aarch64_neon_ld3: 1968 case Intrinsic::aarch64_neon_st3: 1969 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 1970 break; 1971 case Intrinsic::aarch64_neon_ld4: 1972 case Intrinsic::aarch64_neon_st4: 1973 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 1974 break; 1975 } 1976 return true; 1977 } 1978 1979 /// See if \p I should be considered for address type promotion. We check if \p 1980 /// I is a sext with right type and used in memory accesses. If it used in a 1981 /// "complex" getelementptr, we allow it to be promoted without finding other 1982 /// sext instructions that sign extended the same initial value. A getelementptr 1983 /// is considered as "complex" if it has more than 2 operands. 1984 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 1985 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 1986 bool Considerable = false; 1987 AllowPromotionWithoutCommonHeader = false; 1988 if (!isa<SExtInst>(&I)) 1989 return false; 1990 Type *ConsideredSExtType = 1991 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 1992 if (I.getType() != ConsideredSExtType) 1993 return false; 1994 // See if the sext is the one with the right type and used in at least one 1995 // GetElementPtrInst. 1996 for (const User *U : I.users()) { 1997 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 1998 Considerable = true; 1999 // A getelementptr is considered as "complex" if it has more than 2 2000 // operands. We will promote a SExt used in such complex GEP as we 2001 // expect some computation to be merged if they are done on 64 bits. 2002 if (GEPInst->getNumOperands() > 2) { 2003 AllowPromotionWithoutCommonHeader = true; 2004 break; 2005 } 2006 } 2007 } 2008 return Considerable; 2009 } 2010 2011 bool AArch64TTIImpl::isLegalToVectorizeReduction( 2012 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 2013 if (!VF.isScalable()) 2014 return true; 2015 2016 Type *Ty = RdxDesc.getRecurrenceType(); 2017 if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty)) 2018 return false; 2019 2020 switch (RdxDesc.getRecurrenceKind()) { 2021 case RecurKind::Add: 2022 case RecurKind::FAdd: 2023 case RecurKind::And: 2024 case RecurKind::Or: 2025 case RecurKind::Xor: 2026 case RecurKind::SMin: 2027 case RecurKind::SMax: 2028 case RecurKind::UMin: 2029 case RecurKind::UMax: 2030 case RecurKind::FMin: 2031 case RecurKind::FMax: 2032 case RecurKind::SelectICmp: 2033 case RecurKind::SelectFCmp: 2034 return true; 2035 default: 2036 return false; 2037 } 2038 } 2039 2040 InstructionCost 2041 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 2042 bool IsUnsigned, 2043 TTI::TargetCostKind CostKind) { 2044 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 2045 2046 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16()) 2047 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 2048 2049 assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) && 2050 "Both vector needs to be equally scalable"); 2051 2052 InstructionCost LegalizationCost = 0; 2053 if (LT.first > 1) { 2054 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 2055 unsigned MinMaxOpcode = 2056 Ty->isFPOrFPVectorTy() 2057 ? Intrinsic::maxnum 2058 : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin); 2059 IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy}); 2060 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1); 2061 } 2062 2063 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 2064 } 2065 2066 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( 2067 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { 2068 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2069 InstructionCost LegalizationCost = 0; 2070 if (LT.first > 1) { 2071 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 2072 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 2073 LegalizationCost *= LT.first - 1; 2074 } 2075 2076 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2077 assert(ISD && "Invalid opcode"); 2078 // Add the final reduction cost for the legal horizontal reduction 2079 switch (ISD) { 2080 case ISD::ADD: 2081 case ISD::AND: 2082 case ISD::OR: 2083 case ISD::XOR: 2084 case ISD::FADD: 2085 return LegalizationCost + 2; 2086 default: 2087 return InstructionCost::getInvalid(); 2088 } 2089 } 2090 2091 InstructionCost 2092 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 2093 Optional<FastMathFlags> FMF, 2094 TTI::TargetCostKind CostKind) { 2095 if (TTI::requiresOrderedReduction(FMF)) { 2096 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) { 2097 InstructionCost BaseCost = 2098 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2099 // Add on extra cost to reflect the extra overhead on some CPUs. We still 2100 // end up vectorizing for more computationally intensive loops. 2101 return BaseCost + FixedVTy->getNumElements(); 2102 } 2103 2104 if (Opcode != Instruction::FAdd) 2105 return InstructionCost::getInvalid(); 2106 2107 auto *VTy = cast<ScalableVectorType>(ValTy); 2108 InstructionCost Cost = 2109 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind); 2110 Cost *= getMaxNumElements(VTy->getElementCount()); 2111 return Cost; 2112 } 2113 2114 if (isa<ScalableVectorType>(ValTy)) 2115 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind); 2116 2117 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2118 MVT MTy = LT.second; 2119 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2120 assert(ISD && "Invalid opcode"); 2121 2122 // Horizontal adds can use the 'addv' instruction. We model the cost of these 2123 // instructions as twice a normal vector add, plus 1 for each legalization 2124 // step (LT.first). This is the only arithmetic vector reduction operation for 2125 // which we have an instruction. 2126 // OR, XOR and AND costs should match the codegen from: 2127 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll 2128 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll 2129 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll 2130 static const CostTblEntry CostTblNoPairwise[]{ 2131 {ISD::ADD, MVT::v8i8, 2}, 2132 {ISD::ADD, MVT::v16i8, 2}, 2133 {ISD::ADD, MVT::v4i16, 2}, 2134 {ISD::ADD, MVT::v8i16, 2}, 2135 {ISD::ADD, MVT::v4i32, 2}, 2136 {ISD::OR, MVT::v8i8, 15}, 2137 {ISD::OR, MVT::v16i8, 17}, 2138 {ISD::OR, MVT::v4i16, 7}, 2139 {ISD::OR, MVT::v8i16, 9}, 2140 {ISD::OR, MVT::v2i32, 3}, 2141 {ISD::OR, MVT::v4i32, 5}, 2142 {ISD::OR, MVT::v2i64, 3}, 2143 {ISD::XOR, MVT::v8i8, 15}, 2144 {ISD::XOR, MVT::v16i8, 17}, 2145 {ISD::XOR, MVT::v4i16, 7}, 2146 {ISD::XOR, MVT::v8i16, 9}, 2147 {ISD::XOR, MVT::v2i32, 3}, 2148 {ISD::XOR, MVT::v4i32, 5}, 2149 {ISD::XOR, MVT::v2i64, 3}, 2150 {ISD::AND, MVT::v8i8, 15}, 2151 {ISD::AND, MVT::v16i8, 17}, 2152 {ISD::AND, MVT::v4i16, 7}, 2153 {ISD::AND, MVT::v8i16, 9}, 2154 {ISD::AND, MVT::v2i32, 3}, 2155 {ISD::AND, MVT::v4i32, 5}, 2156 {ISD::AND, MVT::v2i64, 3}, 2157 }; 2158 switch (ISD) { 2159 default: 2160 break; 2161 case ISD::ADD: 2162 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 2163 return (LT.first - 1) + Entry->Cost; 2164 break; 2165 case ISD::XOR: 2166 case ISD::AND: 2167 case ISD::OR: 2168 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy); 2169 if (!Entry) 2170 break; 2171 auto *ValVTy = cast<FixedVectorType>(ValTy); 2172 if (!ValVTy->getElementType()->isIntegerTy(1) && 2173 MTy.getVectorNumElements() <= ValVTy->getNumElements() && 2174 isPowerOf2_32(ValVTy->getNumElements())) { 2175 InstructionCost ExtraCost = 0; 2176 if (LT.first != 1) { 2177 // Type needs to be split, so there is an extra cost of LT.first - 1 2178 // arithmetic ops. 2179 auto *Ty = FixedVectorType::get(ValTy->getElementType(), 2180 MTy.getVectorNumElements()); 2181 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 2182 ExtraCost *= LT.first - 1; 2183 } 2184 return Entry->Cost + ExtraCost; 2185 } 2186 break; 2187 } 2188 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2189 } 2190 2191 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) { 2192 static const CostTblEntry ShuffleTbl[] = { 2193 { TTI::SK_Splice, MVT::nxv16i8, 1 }, 2194 { TTI::SK_Splice, MVT::nxv8i16, 1 }, 2195 { TTI::SK_Splice, MVT::nxv4i32, 1 }, 2196 { TTI::SK_Splice, MVT::nxv2i64, 1 }, 2197 { TTI::SK_Splice, MVT::nxv2f16, 1 }, 2198 { TTI::SK_Splice, MVT::nxv4f16, 1 }, 2199 { TTI::SK_Splice, MVT::nxv8f16, 1 }, 2200 { TTI::SK_Splice, MVT::nxv2bf16, 1 }, 2201 { TTI::SK_Splice, MVT::nxv4bf16, 1 }, 2202 { TTI::SK_Splice, MVT::nxv8bf16, 1 }, 2203 { TTI::SK_Splice, MVT::nxv2f32, 1 }, 2204 { TTI::SK_Splice, MVT::nxv4f32, 1 }, 2205 { TTI::SK_Splice, MVT::nxv2f64, 1 }, 2206 }; 2207 2208 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2209 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext()); 2210 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2211 EVT PromotedVT = LT.second.getScalarType() == MVT::i1 2212 ? TLI->getPromotedVTForPredicate(EVT(LT.second)) 2213 : LT.second; 2214 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext()); 2215 InstructionCost LegalizationCost = 0; 2216 if (Index < 0) { 2217 LegalizationCost = 2218 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy, 2219 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 2220 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy, 2221 CmpInst::BAD_ICMP_PREDICATE, CostKind); 2222 } 2223 2224 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp 2225 // Cost performed on a promoted type. 2226 if (LT.second.getScalarType() == MVT::i1) { 2227 LegalizationCost += 2228 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy, 2229 TTI::CastContextHint::None, CostKind) + 2230 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy, 2231 TTI::CastContextHint::None, CostKind); 2232 } 2233 const auto *Entry = 2234 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT()); 2235 assert(Entry && "Illegal Type for Splice"); 2236 LegalizationCost += Entry->Cost; 2237 return LegalizationCost * LT.first; 2238 } 2239 2240 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 2241 VectorType *Tp, 2242 ArrayRef<int> Mask, int Index, 2243 VectorType *SubTp) { 2244 Kind = improveShuffleKindFromMask(Kind, Mask); 2245 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 2246 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || 2247 Kind == TTI::SK_Reverse) { 2248 static const CostTblEntry ShuffleTbl[] = { 2249 // Broadcast shuffle kinds can be performed with 'dup'. 2250 { TTI::SK_Broadcast, MVT::v8i8, 1 }, 2251 { TTI::SK_Broadcast, MVT::v16i8, 1 }, 2252 { TTI::SK_Broadcast, MVT::v4i16, 1 }, 2253 { TTI::SK_Broadcast, MVT::v8i16, 1 }, 2254 { TTI::SK_Broadcast, MVT::v2i32, 1 }, 2255 { TTI::SK_Broadcast, MVT::v4i32, 1 }, 2256 { TTI::SK_Broadcast, MVT::v2i64, 1 }, 2257 { TTI::SK_Broadcast, MVT::v2f32, 1 }, 2258 { TTI::SK_Broadcast, MVT::v4f32, 1 }, 2259 { TTI::SK_Broadcast, MVT::v2f64, 1 }, 2260 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 2261 // 'zip1/zip2' instructions. 2262 { TTI::SK_Transpose, MVT::v8i8, 1 }, 2263 { TTI::SK_Transpose, MVT::v16i8, 1 }, 2264 { TTI::SK_Transpose, MVT::v4i16, 1 }, 2265 { TTI::SK_Transpose, MVT::v8i16, 1 }, 2266 { TTI::SK_Transpose, MVT::v2i32, 1 }, 2267 { TTI::SK_Transpose, MVT::v4i32, 1 }, 2268 { TTI::SK_Transpose, MVT::v2i64, 1 }, 2269 { TTI::SK_Transpose, MVT::v2f32, 1 }, 2270 { TTI::SK_Transpose, MVT::v4f32, 1 }, 2271 { TTI::SK_Transpose, MVT::v2f64, 1 }, 2272 // Select shuffle kinds. 2273 // TODO: handle vXi8/vXi16. 2274 { TTI::SK_Select, MVT::v2i32, 1 }, // mov. 2275 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). 2276 { TTI::SK_Select, MVT::v2i64, 1 }, // mov. 2277 { TTI::SK_Select, MVT::v2f32, 1 }, // mov. 2278 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). 2279 { TTI::SK_Select, MVT::v2f64, 1 }, // mov. 2280 // PermuteSingleSrc shuffle kinds. 2281 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. 2282 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. 2283 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. 2284 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. 2285 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. 2286 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. 2287 { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case. 2288 { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case. 2289 { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case. 2290 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl 2291 { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl 2292 { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl 2293 { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl 2294 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl 2295 // Reverse can be lowered with `rev`. 2296 { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov. 2297 { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT 2298 { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov. 2299 { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov. 2300 { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT 2301 { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov. 2302 // Broadcast shuffle kinds for scalable vectors 2303 { TTI::SK_Broadcast, MVT::nxv16i8, 1 }, 2304 { TTI::SK_Broadcast, MVT::nxv8i16, 1 }, 2305 { TTI::SK_Broadcast, MVT::nxv4i32, 1 }, 2306 { TTI::SK_Broadcast, MVT::nxv2i64, 1 }, 2307 { TTI::SK_Broadcast, MVT::nxv2f16, 1 }, 2308 { TTI::SK_Broadcast, MVT::nxv4f16, 1 }, 2309 { TTI::SK_Broadcast, MVT::nxv8f16, 1 }, 2310 { TTI::SK_Broadcast, MVT::nxv2bf16, 1 }, 2311 { TTI::SK_Broadcast, MVT::nxv4bf16, 1 }, 2312 { TTI::SK_Broadcast, MVT::nxv8bf16, 1 }, 2313 { TTI::SK_Broadcast, MVT::nxv2f32, 1 }, 2314 { TTI::SK_Broadcast, MVT::nxv4f32, 1 }, 2315 { TTI::SK_Broadcast, MVT::nxv2f64, 1 }, 2316 { TTI::SK_Broadcast, MVT::nxv16i1, 1 }, 2317 { TTI::SK_Broadcast, MVT::nxv8i1, 1 }, 2318 { TTI::SK_Broadcast, MVT::nxv4i1, 1 }, 2319 { TTI::SK_Broadcast, MVT::nxv2i1, 1 }, 2320 // Handle the cases for vector.reverse with scalable vectors 2321 { TTI::SK_Reverse, MVT::nxv16i8, 1 }, 2322 { TTI::SK_Reverse, MVT::nxv8i16, 1 }, 2323 { TTI::SK_Reverse, MVT::nxv4i32, 1 }, 2324 { TTI::SK_Reverse, MVT::nxv2i64, 1 }, 2325 { TTI::SK_Reverse, MVT::nxv2f16, 1 }, 2326 { TTI::SK_Reverse, MVT::nxv4f16, 1 }, 2327 { TTI::SK_Reverse, MVT::nxv8f16, 1 }, 2328 { TTI::SK_Reverse, MVT::nxv2bf16, 1 }, 2329 { TTI::SK_Reverse, MVT::nxv4bf16, 1 }, 2330 { TTI::SK_Reverse, MVT::nxv8bf16, 1 }, 2331 { TTI::SK_Reverse, MVT::nxv2f32, 1 }, 2332 { TTI::SK_Reverse, MVT::nxv4f32, 1 }, 2333 { TTI::SK_Reverse, MVT::nxv2f64, 1 }, 2334 { TTI::SK_Reverse, MVT::nxv16i1, 1 }, 2335 { TTI::SK_Reverse, MVT::nxv8i1, 1 }, 2336 { TTI::SK_Reverse, MVT::nxv4i1, 1 }, 2337 { TTI::SK_Reverse, MVT::nxv2i1, 1 }, 2338 }; 2339 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2340 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 2341 return LT.first * Entry->Cost; 2342 } 2343 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp)) 2344 return getSpliceCost(Tp, Index); 2345 return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 2346 } 2347