1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARMTargetTransformInfo.h" 10 #include "ARMSubtarget.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "llvm/ADT/APInt.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/ISDOpcodes.h" 17 #include "llvm/CodeGen/ValueTypes.h" 18 #include "llvm/IR/BasicBlock.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Instruction.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/IntrinsicsARM.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/MC/SubtargetFeature.h" 29 #include "llvm/Support/Casting.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MachineValueType.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include "llvm/Transforms/InstCombine/InstCombiner.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include "llvm/Transforms/Utils/LoopUtils.h" 36 #include <algorithm> 37 #include <cassert> 38 #include <cstdint> 39 #include <utility> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "armtti" 44 45 static cl::opt<bool> EnableMaskedLoadStores( 46 "enable-arm-maskedldst", cl::Hidden, cl::init(true), 47 cl::desc("Enable the generation of masked loads and stores")); 48 49 static cl::opt<bool> DisableLowOverheadLoops( 50 "disable-arm-loloops", cl::Hidden, cl::init(false), 51 cl::desc("Disable the generation of low-overhead loops")); 52 53 static cl::opt<bool> 54 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true), 55 cl::desc("Enable the generation of WLS loops")); 56 57 extern cl::opt<TailPredication::Mode> EnableTailPredication; 58 59 extern cl::opt<bool> EnableMaskedGatherScatters; 60 61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor; 62 63 /// Convert a vector load intrinsic into a simple llvm load instruction. 64 /// This is beneficial when the underlying object being addressed comes 65 /// from a constant, since we get constant-folding for free. 66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, 67 InstCombiner::BuilderTy &Builder) { 68 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1)); 69 70 if (!IntrAlign) 71 return nullptr; 72 73 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign 74 ? MemAlign 75 : IntrAlign->getLimitedValue(); 76 77 if (!isPowerOf2_32(Alignment)) 78 return nullptr; 79 80 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), 81 PointerType::get(II.getType(), 0)); 82 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment)); 83 } 84 85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller, 86 const Function *Callee) const { 87 const TargetMachine &TM = getTLI()->getTargetMachine(); 88 const FeatureBitset &CallerBits = 89 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 90 const FeatureBitset &CalleeBits = 91 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 92 93 // To inline a callee, all features not in the allowed list must match exactly. 94 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) == 95 (CalleeBits & ~InlineFeaturesAllowed); 96 // For features in the allowed list, the callee's features must be a subset of 97 // the callers'. 98 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) == 99 (CalleeBits & InlineFeaturesAllowed); 100 return MatchExact && MatchSubset; 101 } 102 103 TTI::AddressingModeKind 104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L, 105 ScalarEvolution *SE) const { 106 if (ST->hasMVEIntegerOps()) 107 return TTI::AMK_PostIndexed; 108 109 if (L->getHeader()->getParent()->hasOptSize()) 110 return TTI::AMK_None; 111 112 if (ST->isMClass() && ST->isThumb2() && 113 L->getNumBlocks() == 1) 114 return TTI::AMK_PreIndexed; 115 116 return TTI::AMK_None; 117 } 118 119 Optional<Instruction *> 120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { 121 using namespace PatternMatch; 122 Intrinsic::ID IID = II.getIntrinsicID(); 123 switch (IID) { 124 default: 125 break; 126 case Intrinsic::arm_neon_vld1: { 127 Align MemAlign = 128 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 129 &IC.getAssumptionCache(), &IC.getDominatorTree()); 130 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) { 131 return IC.replaceInstUsesWith(II, V); 132 } 133 break; 134 } 135 136 case Intrinsic::arm_neon_vld2: 137 case Intrinsic::arm_neon_vld3: 138 case Intrinsic::arm_neon_vld4: 139 case Intrinsic::arm_neon_vld2lane: 140 case Intrinsic::arm_neon_vld3lane: 141 case Intrinsic::arm_neon_vld4lane: 142 case Intrinsic::arm_neon_vst1: 143 case Intrinsic::arm_neon_vst2: 144 case Intrinsic::arm_neon_vst3: 145 case Intrinsic::arm_neon_vst4: 146 case Intrinsic::arm_neon_vst2lane: 147 case Intrinsic::arm_neon_vst3lane: 148 case Intrinsic::arm_neon_vst4lane: { 149 Align MemAlign = 150 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 151 &IC.getAssumptionCache(), &IC.getDominatorTree()); 152 unsigned AlignArg = II.getNumArgOperands() - 1; 153 Value *AlignArgOp = II.getArgOperand(AlignArg); 154 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue(); 155 if (Align && *Align < MemAlign) { 156 return IC.replaceOperand( 157 II, AlignArg, 158 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(), 159 false)); 160 } 161 break; 162 } 163 164 case Intrinsic::arm_mve_pred_i2v: { 165 Value *Arg = II.getArgOperand(0); 166 Value *ArgArg; 167 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 168 PatternMatch::m_Value(ArgArg))) && 169 II.getType() == ArgArg->getType()) { 170 return IC.replaceInstUsesWith(II, ArgArg); 171 } 172 Constant *XorMask; 173 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 174 PatternMatch::m_Value(ArgArg)), 175 PatternMatch::m_Constant(XorMask))) && 176 II.getType() == ArgArg->getType()) { 177 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) { 178 if (CI->getValue().trunc(16).isAllOnesValue()) { 179 auto TrueVector = IC.Builder.CreateVectorSplat( 180 cast<FixedVectorType>(II.getType())->getNumElements(), 181 IC.Builder.getTrue()); 182 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); 183 } 184 } 185 } 186 KnownBits ScalarKnown(32); 187 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16), 188 ScalarKnown, 0)) { 189 return &II; 190 } 191 break; 192 } 193 case Intrinsic::arm_mve_pred_v2i: { 194 Value *Arg = II.getArgOperand(0); 195 Value *ArgArg; 196 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>( 197 PatternMatch::m_Value(ArgArg)))) { 198 return IC.replaceInstUsesWith(II, ArgArg); 199 } 200 if (!II.getMetadata(LLVMContext::MD_range)) { 201 Type *IntTy32 = Type::getInt32Ty(II.getContext()); 202 Metadata *M[] = { 203 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), 204 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))}; 205 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M)); 206 return &II; 207 } 208 break; 209 } 210 case Intrinsic::arm_mve_vadc: 211 case Intrinsic::arm_mve_vadc_predicated: { 212 unsigned CarryOp = 213 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2; 214 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 && 215 "Bad type for intrinsic!"); 216 217 KnownBits CarryKnown(32); 218 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29), 219 CarryKnown)) { 220 return &II; 221 } 222 break; 223 } 224 case Intrinsic::arm_mve_vmldava: { 225 Instruction *I = cast<Instruction>(&II); 226 if (I->hasOneUse()) { 227 auto *User = cast<Instruction>(*I->user_begin()); 228 Value *OpZ; 229 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) && 230 match(I->getOperand(3), m_Zero())) { 231 Value *OpX = I->getOperand(4); 232 Value *OpY = I->getOperand(5); 233 Type *OpTy = OpX->getType(); 234 235 IC.Builder.SetInsertPoint(User); 236 Value *V = 237 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy}, 238 {I->getOperand(0), I->getOperand(1), 239 I->getOperand(2), OpZ, OpX, OpY}); 240 241 IC.replaceInstUsesWith(*User, V); 242 return IC.eraseInstFromFunction(*User); 243 } 244 } 245 return None; 246 } 247 } 248 return None; 249 } 250 251 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 252 TTI::TargetCostKind CostKind) { 253 assert(Ty->isIntegerTy()); 254 255 unsigned Bits = Ty->getPrimitiveSizeInBits(); 256 if (Bits == 0 || Imm.getActiveBits() >= 64) 257 return 4; 258 259 int64_t SImmVal = Imm.getSExtValue(); 260 uint64_t ZImmVal = Imm.getZExtValue(); 261 if (!ST->isThumb()) { 262 if ((SImmVal >= 0 && SImmVal < 65536) || 263 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 264 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 265 return 1; 266 return ST->hasV6T2Ops() ? 2 : 3; 267 } 268 if (ST->isThumb2()) { 269 if ((SImmVal >= 0 && SImmVal < 65536) || 270 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 271 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 272 return 1; 273 return ST->hasV6T2Ops() ? 2 : 3; 274 } 275 // Thumb1, any i8 imm cost 1. 276 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) 277 return 1; 278 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 279 return 2; 280 // Load from constantpool. 281 return 3; 282 } 283 284 // Constants smaller than 256 fit in the immediate field of 285 // Thumb1 instructions so we return a zero cost and 1 otherwise. 286 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 287 const APInt &Imm, Type *Ty) { 288 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 289 return 0; 290 291 return 1; 292 } 293 294 // Checks whether Inst is part of a min(max()) or max(min()) pattern 295 // that will match to an SSAT instruction 296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) { 297 Value *LHS, *RHS; 298 ConstantInt *C; 299 SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor; 300 301 if (InstSPF == SPF_SMAX && 302 PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) && 303 C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) { 304 305 auto isSSatMin = [&](Value *MinInst) { 306 if (isa<SelectInst>(MinInst)) { 307 Value *MinLHS, *MinRHS; 308 ConstantInt *MinC; 309 SelectPatternFlavor MinSPF = 310 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor; 311 if (MinSPF == SPF_SMIN && 312 PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) && 313 MinC->getValue() == ((-Imm) - 1)) 314 return true; 315 } 316 return false; 317 }; 318 319 if (isSSatMin(Inst->getOperand(1)) || 320 (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) || 321 isSSatMin(*(++Inst->user_begin()))))) 322 return true; 323 } 324 return false; 325 } 326 327 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 328 const APInt &Imm, Type *Ty, 329 TTI::TargetCostKind CostKind, 330 Instruction *Inst) { 331 // Division by a constant can be turned into multiplication, but only if we 332 // know it's constant. So it's not so much that the immediate is cheap (it's 333 // not), but that the alternative is worse. 334 // FIXME: this is probably unneeded with GlobalISel. 335 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 336 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 337 Idx == 1) 338 return 0; 339 340 if (Opcode == Instruction::And) { 341 // UXTB/UXTH 342 if (Imm == 255 || Imm == 65535) 343 return 0; 344 // Conversion to BIC is free, and means we can use ~Imm instead. 345 return std::min(getIntImmCost(Imm, Ty, CostKind), 346 getIntImmCost(~Imm, Ty, CostKind)); 347 } 348 349 if (Opcode == Instruction::Add) 350 // Conversion to SUB is free, and means we can use -Imm instead. 351 return std::min(getIntImmCost(Imm, Ty, CostKind), 352 getIntImmCost(-Imm, Ty, CostKind)); 353 354 if (Opcode == Instruction::ICmp && Imm.isNegative() && 355 Ty->getIntegerBitWidth() == 32) { 356 int64_t NegImm = -Imm.getSExtValue(); 357 if (ST->isThumb2() && NegImm < 1<<12) 358 // icmp X, #-C -> cmn X, #C 359 return 0; 360 if (ST->isThumb() && NegImm < 1<<8) 361 // icmp X, #-C -> adds X, #C 362 return 0; 363 } 364 365 // xor a, -1 can always be folded to MVN 366 if (Opcode == Instruction::Xor && Imm.isAllOnesValue()) 367 return 0; 368 369 // Ensures negative constant of min(max()) or max(min()) patterns that 370 // match to SSAT instructions don't get hoisted 371 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) && 372 Ty->getIntegerBitWidth() <= 32) { 373 if (isSSATMinMaxPattern(Inst, Imm) || 374 (isa<ICmpInst>(Inst) && Inst->hasOneUse() && 375 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm))) 376 return 0; 377 } 378 379 return getIntImmCost(Imm, Ty, CostKind); 380 } 381 382 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode, 383 TTI::TargetCostKind CostKind, 384 const Instruction *I) { 385 if (CostKind == TTI::TCK_RecipThroughput && 386 (ST->hasNEON() || ST->hasMVEIntegerOps())) { 387 // FIXME: The vectorizer is highly sensistive to the cost of these 388 // instructions, which suggests that it may be using the costs incorrectly. 389 // But, for now, just make them free to avoid performance regressions for 390 // vector targets. 391 return 0; 392 } 393 return BaseT::getCFInstrCost(Opcode, CostKind, I); 394 } 395 396 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 397 Type *Src, 398 TTI::CastContextHint CCH, 399 TTI::TargetCostKind CostKind, 400 const Instruction *I) { 401 int ISD = TLI->InstructionOpcodeToISD(Opcode); 402 assert(ISD && "Invalid opcode"); 403 404 // TODO: Allow non-throughput costs that aren't binary. 405 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 406 if (CostKind != TTI::TCK_RecipThroughput) 407 return Cost == 0 ? 0 : 1; 408 return Cost; 409 }; 410 auto IsLegalFPType = [this](EVT VT) { 411 EVT EltVT = VT.getScalarType(); 412 return (EltVT == MVT::f32 && ST->hasVFP2Base()) || 413 (EltVT == MVT::f64 && ST->hasFP64()) || 414 (EltVT == MVT::f16 && ST->hasFullFP16()); 415 }; 416 417 EVT SrcTy = TLI->getValueType(DL, Src); 418 EVT DstTy = TLI->getValueType(DL, Dst); 419 420 if (!SrcTy.isSimple() || !DstTy.isSimple()) 421 return AdjustCost( 422 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 423 424 // Extending masked load/Truncating masked stores is expensive because we 425 // currently don't split them. This means that we'll likely end up 426 // loading/storing each element individually (hence the high cost). 427 if ((ST->hasMVEIntegerOps() && 428 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt || 429 Opcode == Instruction::SExt)) || 430 (ST->hasMVEFloatOps() && 431 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) && 432 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))) 433 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128) 434 return 2 * DstTy.getVectorNumElements() * 435 ST->getMVEVectorCostFactor(CostKind); 436 437 // The extend of other kinds of load is free 438 if (CCH == TTI::CastContextHint::Normal || 439 CCH == TTI::CastContextHint::Masked) { 440 static const TypeConversionCostTblEntry LoadConversionTbl[] = { 441 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0}, 442 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0}, 443 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0}, 444 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0}, 445 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0}, 446 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0}, 447 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1}, 448 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1}, 449 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1}, 450 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1}, 451 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1}, 452 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1}, 453 }; 454 if (const auto *Entry = ConvertCostTableLookup( 455 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 456 return AdjustCost(Entry->Cost); 457 458 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = { 459 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 460 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 461 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 462 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 463 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 464 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 465 // The following extend from a legal type to an illegal type, so need to 466 // split the load. This introduced an extra load operation, but the 467 // extend is still "free". 468 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 469 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 470 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 471 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 472 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 473 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 474 }; 475 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 476 if (const auto *Entry = 477 ConvertCostTableLookup(MVELoadConversionTbl, ISD, 478 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 479 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 480 } 481 482 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = { 483 // FPExtends are similar but also require the VCVT instructions. 484 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1}, 485 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3}, 486 }; 487 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 488 if (const auto *Entry = 489 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD, 490 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 491 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 492 } 493 494 // The truncate of a store is free. This is the mirror of extends above. 495 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = { 496 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0}, 497 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0}, 498 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0}, 499 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1}, 500 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1}, 501 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3}, 502 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1}, 503 }; 504 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 505 if (const auto *Entry = 506 ConvertCostTableLookup(MVEStoreConversionTbl, ISD, 507 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 508 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 509 } 510 511 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = { 512 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1}, 513 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3}, 514 }; 515 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 516 if (const auto *Entry = 517 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD, 518 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 519 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 520 } 521 } 522 523 // NEON vector operations that can extend their inputs. 524 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) && 525 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) { 526 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = { 527 // vaddl 528 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 }, 529 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 }, 530 // vsubl 531 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 }, 532 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 }, 533 // vmull 534 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 }, 535 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 }, 536 // vshll 537 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 }, 538 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 }, 539 }; 540 541 auto *User = cast<Instruction>(*I->user_begin()); 542 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode()); 543 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD, 544 DstTy.getSimpleVT(), 545 SrcTy.getSimpleVT())) { 546 return AdjustCost(Entry->Cost); 547 } 548 } 549 550 // Single to/from double precision conversions. 551 if (Src->isVectorTy() && ST->hasNEON() && 552 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 && 553 DstTy.getScalarType() == MVT::f32) || 554 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 && 555 DstTy.getScalarType() == MVT::f64))) { 556 static const CostTblEntry NEONFltDblTbl[] = { 557 // Vector fptrunc/fpext conversions. 558 {ISD::FP_ROUND, MVT::v2f64, 2}, 559 {ISD::FP_EXTEND, MVT::v2f32, 2}, 560 {ISD::FP_EXTEND, MVT::v4f32, 4}}; 561 562 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 563 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 564 return AdjustCost(LT.first * Entry->Cost); 565 } 566 567 // Some arithmetic, load and store operations have specific instructions 568 // to cast up/down their types automatically at no extra cost. 569 // TODO: Get these tables to know at least what the related operations are. 570 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 571 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 572 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 573 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 574 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 575 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 576 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 577 578 // The number of vmovl instructions for the extension. 579 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 580 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 581 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 582 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 583 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 584 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 585 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 586 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 587 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 588 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 589 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 590 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 591 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 592 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 593 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 594 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 595 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 596 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 597 598 // Operations that we legalize using splitting. 599 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 600 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 601 602 // Vector float <-> i32 conversions. 603 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 604 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 605 606 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 607 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 608 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 609 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 610 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 611 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 612 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 613 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 615 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 616 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 617 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 618 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 619 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 620 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 621 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 622 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 623 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 624 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 625 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 626 627 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 628 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 629 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 630 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 631 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 632 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 633 634 // Vector double <-> i32 conversions. 635 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 636 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 637 638 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 639 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 640 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 641 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 642 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 643 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 644 645 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 646 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 647 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 648 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 649 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 650 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 651 }; 652 653 if (SrcTy.isVector() && ST->hasNEON()) { 654 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 655 DstTy.getSimpleVT(), 656 SrcTy.getSimpleVT())) 657 return AdjustCost(Entry->Cost); 658 } 659 660 // Scalar float to integer conversions. 661 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 662 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 663 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 664 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 665 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 666 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 667 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 668 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 669 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 670 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 671 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 672 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 673 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 674 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 675 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 676 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 677 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 678 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 679 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 680 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 681 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 682 }; 683 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 684 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 685 DstTy.getSimpleVT(), 686 SrcTy.getSimpleVT())) 687 return AdjustCost(Entry->Cost); 688 } 689 690 // Scalar integer to float conversions. 691 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 692 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 693 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 694 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 695 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 696 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 697 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 698 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 699 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 700 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 701 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 702 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 703 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 704 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 705 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 706 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 707 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 708 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 709 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 710 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 711 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 712 }; 713 714 if (SrcTy.isInteger() && ST->hasNEON()) { 715 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 716 ISD, DstTy.getSimpleVT(), 717 SrcTy.getSimpleVT())) 718 return AdjustCost(Entry->Cost); 719 } 720 721 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one 722 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext 723 // are linearised so take more. 724 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = { 725 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 726 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 727 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 728 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 729 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, 730 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, 731 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 732 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 733 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, 734 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 735 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, 736 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, 737 }; 738 739 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 740 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl, 741 ISD, DstTy.getSimpleVT(), 742 SrcTy.getSimpleVT())) 743 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 744 } 745 746 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) { 747 // As general rule, fp converts that were not matched above are scalarized 748 // and cost 1 vcvt for each lane, so long as the instruction is available. 749 // If not it will become a series of function calls. 750 const InstructionCost CallCost = 751 getCallInstrCost(nullptr, Dst, {Src}, CostKind); 752 int Lanes = 1; 753 if (SrcTy.isFixedLengthVector()) 754 Lanes = SrcTy.getVectorNumElements(); 755 756 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)) 757 return Lanes; 758 else 759 return Lanes * CallCost; 760 } 761 762 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() && 763 SrcTy.isFixedLengthVector()) { 764 // Treat a truncate with larger than legal source (128bits for MVE) as 765 // expensive, 2 instructions per lane. 766 if ((SrcTy.getScalarType() == MVT::i8 || 767 SrcTy.getScalarType() == MVT::i16 || 768 SrcTy.getScalarType() == MVT::i32) && 769 SrcTy.getSizeInBits() > 128 && 770 SrcTy.getSizeInBits() > DstTy.getSizeInBits()) 771 return SrcTy.getVectorNumElements() * 2; 772 } 773 774 // Scalar integer conversion costs. 775 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 776 // i16 -> i64 requires two dependent operations. 777 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 778 779 // Truncates on i64 are assumed to be free. 780 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 781 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 782 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 783 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 784 }; 785 786 if (SrcTy.isInteger()) { 787 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 788 DstTy.getSimpleVT(), 789 SrcTy.getSimpleVT())) 790 return AdjustCost(Entry->Cost); 791 } 792 793 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 794 ? ST->getMVEVectorCostFactor(CostKind) 795 : 1; 796 return AdjustCost( 797 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 798 } 799 800 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 801 unsigned Index) { 802 // Penalize inserting into an D-subregister. We end up with a three times 803 // lower estimated throughput on swift. 804 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 805 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 806 return 3; 807 808 if (ST->hasNEON() && (Opcode == Instruction::InsertElement || 809 Opcode == Instruction::ExtractElement)) { 810 // Cross-class copies are expensive on many microarchitectures, 811 // so assume they are expensive by default. 812 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy()) 813 return 3; 814 815 // Even if it's not a cross class copy, this likely leads to mixing 816 // of NEON and VFP code and should be therefore penalized. 817 if (ValTy->isVectorTy() && 818 ValTy->getScalarSizeInBits() <= 32) 819 return std::max<InstructionCost>( 820 BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 821 } 822 823 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement || 824 Opcode == Instruction::ExtractElement)) { 825 // Integer cross-lane moves are more expensive than float, which can 826 // sometimes just be vmovs. Integer involve being passes to GPR registers, 827 // causing more of a delay. 828 std::pair<unsigned, MVT> LT = 829 getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType()); 830 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1); 831 } 832 833 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 834 } 835 836 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 837 Type *CondTy, 838 CmpInst::Predicate VecPred, 839 TTI::TargetCostKind CostKind, 840 const Instruction *I) { 841 int ISD = TLI->InstructionOpcodeToISD(Opcode); 842 843 // Thumb scalar code size cost for select. 844 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT && 845 ST->isThumb() && !ValTy->isVectorTy()) { 846 // Assume expensive structs. 847 if (TLI->getValueType(DL, ValTy, true) == MVT::Other) 848 return TTI::TCC_Expensive; 849 850 // Select costs can vary because they: 851 // - may require one or more conditional mov (including an IT), 852 // - can't operate directly on immediates, 853 // - require live flags, which we can't copy around easily. 854 int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first; 855 856 // Possible IT instruction for Thumb2, or more for Thumb1. 857 ++Cost; 858 859 // i1 values may need rematerialising by using mov immediates and/or 860 // flag setting instructions. 861 if (ValTy->isIntegerTy(1)) 862 ++Cost; 863 864 return Cost; 865 } 866 867 // If this is a vector min/max/abs, use the cost of that intrinsic directly 868 // instead. Hopefully when min/max intrinsics are more prevalent this code 869 // will not be needed. 870 const Instruction *Sel = I; 871 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel && 872 Sel->hasOneUse()) 873 Sel = cast<Instruction>(Sel->user_back()); 874 if (Sel && ValTy->isVectorTy() && 875 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) { 876 const Value *LHS, *RHS; 877 SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor; 878 unsigned IID = 0; 879 switch (SPF) { 880 case SPF_ABS: 881 IID = Intrinsic::abs; 882 break; 883 case SPF_SMIN: 884 IID = Intrinsic::smin; 885 break; 886 case SPF_SMAX: 887 IID = Intrinsic::smax; 888 break; 889 case SPF_UMIN: 890 IID = Intrinsic::umin; 891 break; 892 case SPF_UMAX: 893 IID = Intrinsic::umax; 894 break; 895 case SPF_FMINNUM: 896 IID = Intrinsic::minnum; 897 break; 898 case SPF_FMAXNUM: 899 IID = Intrinsic::maxnum; 900 break; 901 default: 902 break; 903 } 904 if (IID) { 905 // The ICmp is free, the select gets the cost of the min/max/etc 906 if (Sel != I) 907 return 0; 908 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy}); 909 return getIntrinsicInstrCost(CostAttrs, CostKind); 910 } 911 } 912 913 // On NEON a vector select gets lowered to vbsl. 914 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) { 915 // Lowering of some vector selects is currently far from perfect. 916 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 917 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 918 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 919 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 920 }; 921 922 EVT SelCondTy = TLI->getValueType(DL, CondTy); 923 EVT SelValTy = TLI->getValueType(DL, ValTy); 924 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 925 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 926 SelCondTy.getSimpleVT(), 927 SelValTy.getSimpleVT())) 928 return Entry->Cost; 929 } 930 931 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 932 return LT.first; 933 } 934 935 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() && 936 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 937 cast<FixedVectorType>(ValTy)->getNumElements() > 1) { 938 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy); 939 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy); 940 if (!VecCondTy) 941 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy)); 942 943 // If we don't have mve.fp any fp operations will need to be scalarized. 944 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) { 945 // One scalaization insert, one scalarization extract and the cost of the 946 // fcmps. 947 return BaseT::getScalarizationOverhead(VecValTy, false, true) + 948 BaseT::getScalarizationOverhead(VecCondTy, true, false) + 949 VecValTy->getNumElements() * 950 getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 951 VecCondTy->getScalarType(), VecPred, CostKind, 952 I); 953 } 954 955 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 956 int BaseCost = ST->getMVEVectorCostFactor(CostKind); 957 // There are two types - the input that specifies the type of the compare 958 // and the output vXi1 type. Because we don't know how the output will be 959 // split, we may need an expensive shuffle to get two in sync. This has the 960 // effect of making larger than legal compares (v8i32 for example) 961 // expensive. 962 if (LT.second.getVectorNumElements() > 2) { 963 if (LT.first > 1) 964 return LT.first * BaseCost + 965 BaseT::getScalarizationOverhead(VecCondTy, true, false); 966 return BaseCost; 967 } 968 } 969 970 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 971 // for "multiple beats" potentially needed by MVE instructions. 972 int BaseCost = 1; 973 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy()) 974 BaseCost = ST->getMVEVectorCostFactor(CostKind); 975 976 return BaseCost * 977 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 978 } 979 980 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty, 981 ScalarEvolution *SE, 982 const SCEV *Ptr) { 983 // Address computations in vectorized code with non-consecutive addresses will 984 // likely result in more instructions compared to scalar code where the 985 // computation can more often be merged into the index mode. The resulting 986 // extra micro-ops can significantly decrease throughput. 987 unsigned NumVectorInstToHideOverhead = 10; 988 int MaxMergeDistance = 64; 989 990 if (ST->hasNEON()) { 991 if (Ty->isVectorTy() && SE && 992 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 993 return NumVectorInstToHideOverhead; 994 995 // In many cases the address computation is not merged into the instruction 996 // addressing mode. 997 return 1; 998 } 999 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1000 } 1001 1002 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) { 1003 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1004 // If a VCTP is part of a chain, it's already profitable and shouldn't be 1005 // optimized, else LSR may block tail-predication. 1006 switch (II->getIntrinsicID()) { 1007 case Intrinsic::arm_mve_vctp8: 1008 case Intrinsic::arm_mve_vctp16: 1009 case Intrinsic::arm_mve_vctp32: 1010 case Intrinsic::arm_mve_vctp64: 1011 return true; 1012 default: 1013 break; 1014 } 1015 } 1016 return false; 1017 } 1018 1019 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 1020 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) 1021 return false; 1022 1023 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) { 1024 // Don't support v2i1 yet. 1025 if (VecTy->getNumElements() == 2) 1026 return false; 1027 1028 // We don't support extending fp types. 1029 unsigned VecWidth = DataTy->getPrimitiveSizeInBits(); 1030 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy()) 1031 return false; 1032 } 1033 1034 unsigned EltWidth = DataTy->getScalarSizeInBits(); 1035 return (EltWidth == 32 && Alignment >= 4) || 1036 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8); 1037 } 1038 1039 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) { 1040 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps()) 1041 return false; 1042 1043 // This method is called in 2 places: 1044 // - from the vectorizer with a scalar type, in which case we need to get 1045 // this as good as we can with the limited info we have (and rely on the cost 1046 // model for the rest). 1047 // - from the masked intrinsic lowering pass with the actual vector type. 1048 // For MVE, we have a custom lowering pass that will already have custom 1049 // legalised any gathers that we can to MVE intrinsics, and want to expand all 1050 // the rest. The pass runs before the masked intrinsic lowering pass, so if we 1051 // are here, we know we want to expand. 1052 if (isa<VectorType>(Ty)) 1053 return false; 1054 1055 unsigned EltWidth = Ty->getScalarSizeInBits(); 1056 return ((EltWidth == 32 && Alignment >= 4) || 1057 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8); 1058 } 1059 1060 /// Given a memcpy/memset/memmove instruction, return the number of memory 1061 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a 1062 /// call is used. 1063 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const { 1064 MemOp MOp; 1065 unsigned DstAddrSpace = ~0u; 1066 unsigned SrcAddrSpace = ~0u; 1067 const Function *F = I->getParent()->getParent(); 1068 1069 if (const auto *MC = dyn_cast<MemTransferInst>(I)) { 1070 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength()); 1071 // If 'size' is not a constant, a library call will be generated. 1072 if (!C) 1073 return -1; 1074 1075 const unsigned Size = C->getValue().getZExtValue(); 1076 const Align DstAlign = *MC->getDestAlign(); 1077 const Align SrcAlign = *MC->getSourceAlign(); 1078 1079 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign, 1080 /*IsVolatile*/ false); 1081 DstAddrSpace = MC->getDestAddressSpace(); 1082 SrcAddrSpace = MC->getSourceAddressSpace(); 1083 } 1084 else if (const auto *MS = dyn_cast<MemSetInst>(I)) { 1085 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength()); 1086 // If 'size' is not a constant, a library call will be generated. 1087 if (!C) 1088 return -1; 1089 1090 const unsigned Size = C->getValue().getZExtValue(); 1091 const Align DstAlign = *MS->getDestAlign(); 1092 1093 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign, 1094 /*IsZeroMemset*/ false, /*IsVolatile*/ false); 1095 DstAddrSpace = MS->getDestAddressSpace(); 1096 } 1097 else 1098 llvm_unreachable("Expected a memcpy/move or memset!"); 1099 1100 unsigned Limit, Factor = 2; 1101 switch(I->getIntrinsicID()) { 1102 case Intrinsic::memcpy: 1103 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize()); 1104 break; 1105 case Intrinsic::memmove: 1106 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); 1107 break; 1108 case Intrinsic::memset: 1109 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize()); 1110 Factor = 1; 1111 break; 1112 default: 1113 llvm_unreachable("Expected a memcpy/move or memset!"); 1114 } 1115 1116 // MemOps will be poplulated with a list of data types that needs to be 1117 // loaded and stored. That's why we multiply the number of elements by 2 to 1118 // get the cost for this memcpy. 1119 std::vector<EVT> MemOps; 1120 if (getTLI()->findOptimalMemOpLowering( 1121 MemOps, Limit, MOp, DstAddrSpace, 1122 SrcAddrSpace, F->getAttributes())) 1123 return MemOps.size() * Factor; 1124 1125 // If we can't find an optimal memop lowering, return the default cost 1126 return -1; 1127 } 1128 1129 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) { 1130 int NumOps = getNumMemOps(cast<IntrinsicInst>(I)); 1131 1132 // To model the cost of a library call, we assume 1 for the call, and 1133 // 3 for the argument setup. 1134 if (NumOps == -1) 1135 return 4; 1136 return NumOps; 1137 } 1138 1139 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1140 VectorType *Tp, ArrayRef<int> Mask, 1141 int Index, VectorType *SubTp) { 1142 if (ST->hasNEON()) { 1143 if (Kind == TTI::SK_Broadcast) { 1144 static const CostTblEntry NEONDupTbl[] = { 1145 // VDUP handles these cases. 1146 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1147 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1148 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1149 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1150 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1151 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1152 1153 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1154 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1155 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1156 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}}; 1157 1158 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1159 1160 if (const auto *Entry = 1161 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1162 return LT.first * Entry->Cost; 1163 } 1164 if (Kind == TTI::SK_Reverse) { 1165 static const CostTblEntry NEONShuffleTbl[] = { 1166 // Reverse shuffle cost one instruction if we are shuffling within a 1167 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 1168 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1169 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1170 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1171 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1172 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1173 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1174 1175 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1176 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1177 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 1178 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 1179 1180 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1181 1182 if (const auto *Entry = 1183 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1184 return LT.first * Entry->Cost; 1185 } 1186 if (Kind == TTI::SK_Select) { 1187 static const CostTblEntry NEONSelShuffleTbl[] = { 1188 // Select shuffle cost table for ARM. Cost is the number of 1189 // instructions 1190 // required to create the shuffled vector. 1191 1192 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1193 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1194 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1195 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1196 1197 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1198 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1199 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 1200 1201 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 1202 1203 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 1204 1205 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1206 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl, 1207 ISD::VECTOR_SHUFFLE, LT.second)) 1208 return LT.first * Entry->Cost; 1209 } 1210 } 1211 if (ST->hasMVEIntegerOps()) { 1212 if (Kind == TTI::SK_Broadcast) { 1213 static const CostTblEntry MVEDupTbl[] = { 1214 // VDUP handles these cases. 1215 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1216 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1217 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}, 1218 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1219 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}}; 1220 1221 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1222 1223 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE, 1224 LT.second)) 1225 return LT.first * Entry->Cost * 1226 ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput); 1227 } 1228 1229 if (!Mask.empty()) { 1230 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1231 if (Mask.size() <= LT.second.getVectorNumElements() && 1232 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) || 1233 isVREVMask(Mask, LT.second, 64))) 1234 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first; 1235 } 1236 } 1237 1238 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy() 1239 ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) 1240 : 1; 1241 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 1242 } 1243 1244 InstructionCost ARMTTIImpl::getArithmeticInstrCost( 1245 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 1246 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 1247 TTI::OperandValueProperties Opd1PropInfo, 1248 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 1249 const Instruction *CxtI) { 1250 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 1251 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) { 1252 // Make operations on i1 relatively expensive as this often involves 1253 // combining predicates. AND and XOR should be easier to handle with IT 1254 // blocks. 1255 switch (ISDOpcode) { 1256 default: 1257 break; 1258 case ISD::AND: 1259 case ISD::XOR: 1260 return 2; 1261 case ISD::OR: 1262 return 3; 1263 } 1264 } 1265 1266 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1267 1268 if (ST->hasNEON()) { 1269 const unsigned FunctionCallDivCost = 20; 1270 const unsigned ReciprocalDivCost = 10; 1271 static const CostTblEntry CostTbl[] = { 1272 // Division. 1273 // These costs are somewhat random. Choose a cost of 20 to indicate that 1274 // vectorizing devision (added function call) is going to be very expensive. 1275 // Double registers types. 1276 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1277 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1278 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1279 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1280 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1281 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1282 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1283 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1284 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 1285 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 1286 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1287 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1288 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 1289 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 1290 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1291 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1292 // Quad register types. 1293 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1294 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1295 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1296 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1297 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1298 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1299 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1300 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1301 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1302 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1303 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1304 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1305 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1306 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1307 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1308 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1309 // Multiplication. 1310 }; 1311 1312 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 1313 return LT.first * Entry->Cost; 1314 1315 InstructionCost Cost = BaseT::getArithmeticInstrCost( 1316 Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo); 1317 1318 // This is somewhat of a hack. The problem that we are facing is that SROA 1319 // creates a sequence of shift, and, or instructions to construct values. 1320 // These sequences are recognized by the ISel and have zero-cost. Not so for 1321 // the vectorized code. Because we have support for v2i64 but not i64 those 1322 // sequences look particularly beneficial to vectorize. 1323 // To work around this we increase the cost of v2i64 operations to make them 1324 // seem less beneficial. 1325 if (LT.second == MVT::v2i64 && 1326 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 1327 Cost += 4; 1328 1329 return Cost; 1330 } 1331 1332 // If this operation is a shift on arm/thumb2, it might well be folded into 1333 // the following instruction, hence having a cost of 0. 1334 auto LooksLikeAFreeShift = [&]() { 1335 if (ST->isThumb1Only() || Ty->isVectorTy()) 1336 return false; 1337 1338 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift()) 1339 return false; 1340 if (Op2Info != TargetTransformInfo::OK_UniformConstantValue) 1341 return false; 1342 1343 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB 1344 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) { 1345 case Instruction::Add: 1346 case Instruction::Sub: 1347 case Instruction::And: 1348 case Instruction::Xor: 1349 case Instruction::Or: 1350 case Instruction::ICmp: 1351 return true; 1352 default: 1353 return false; 1354 } 1355 }; 1356 if (LooksLikeAFreeShift()) 1357 return 0; 1358 1359 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 1360 // for "multiple beats" potentially needed by MVE instructions. 1361 int BaseCost = 1; 1362 if (ST->hasMVEIntegerOps() && Ty->isVectorTy()) 1363 BaseCost = ST->getMVEVectorCostFactor(CostKind); 1364 1365 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost, 1366 // without treating floats as more expensive that scalars or increasing the 1367 // costs for custom operations. The results is also multiplied by the 1368 // MVEVectorCostFactor where appropriate. 1369 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second)) 1370 return LT.first * BaseCost; 1371 1372 // Else this is expand, assume that we need to scalarize this op. 1373 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) { 1374 unsigned Num = VTy->getNumElements(); 1375 InstructionCost Cost = 1376 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind); 1377 // Return the cost of multiple scalar invocation plus the cost of 1378 // inserting and extracting the values. 1379 SmallVector<Type *> Tys(Args.size(), Ty); 1380 return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost; 1381 } 1382 1383 return BaseCost; 1384 } 1385 1386 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 1387 MaybeAlign Alignment, 1388 unsigned AddressSpace, 1389 TTI::TargetCostKind CostKind, 1390 const Instruction *I) { 1391 // TODO: Handle other cost kinds. 1392 if (CostKind != TTI::TCK_RecipThroughput) 1393 return 1; 1394 1395 // Type legalization can't handle structs 1396 if (TLI->getValueType(DL, Src, true) == MVT::Other) 1397 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1398 CostKind); 1399 1400 if (ST->hasNEON() && Src->isVectorTy() && 1401 (Alignment && *Alignment != Align(16)) && 1402 cast<VectorType>(Src)->getElementType()->isDoubleTy()) { 1403 // Unaligned loads/stores are extremely inefficient. 1404 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 1405 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1406 return LT.first * 4; 1407 } 1408 1409 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load. 1410 // Same for stores. 1411 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I && 1412 ((Opcode == Instruction::Load && I->hasOneUse() && 1413 isa<FPExtInst>(*I->user_begin())) || 1414 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) { 1415 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src); 1416 Type *DstTy = 1417 Opcode == Instruction::Load 1418 ? (*I->user_begin())->getType() 1419 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType(); 1420 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() && 1421 DstTy->getScalarType()->isFloatTy()) 1422 return ST->getMVEVectorCostFactor(CostKind); 1423 } 1424 1425 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 1426 ? ST->getMVEVectorCostFactor(CostKind) 1427 : 1; 1428 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1429 CostKind, I); 1430 } 1431 1432 InstructionCost 1433 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 1434 unsigned AddressSpace, 1435 TTI::TargetCostKind CostKind) { 1436 if (ST->hasMVEIntegerOps()) { 1437 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment)) 1438 return ST->getMVEVectorCostFactor(CostKind); 1439 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment)) 1440 return ST->getMVEVectorCostFactor(CostKind); 1441 } 1442 if (!isa<FixedVectorType>(Src)) 1443 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1444 CostKind); 1445 // Scalar cost, which is currently very high due to the efficiency of the 1446 // generated code. 1447 return cast<FixedVectorType>(Src)->getNumElements() * 8; 1448 } 1449 1450 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost( 1451 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1452 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1453 bool UseMaskForCond, bool UseMaskForGaps) { 1454 assert(Factor >= 2 && "Invalid interleave factor"); 1455 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 1456 1457 // vldN/vstN doesn't support vector types of i64/f64 element. 1458 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 1459 1460 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && 1461 !UseMaskForCond && !UseMaskForGaps) { 1462 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements(); 1463 auto *SubVecTy = 1464 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1465 1466 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 1467 // Accesses having vector types that are a multiple of 128 bits can be 1468 // matched to more than one vldN/vstN instruction. 1469 int BaseCost = 1470 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1; 1471 if (NumElts % Factor == 0 && 1472 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL)) 1473 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1474 1475 // Some smaller than legal interleaved patterns are cheap as we can make 1476 // use of the vmovn or vrev patterns to interleave a standard load. This is 1477 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is 1478 // promoted differently). The cost of 2 here is then a load and vrev or 1479 // vmovn. 1480 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && 1481 VecTy->isIntOrIntVectorTy() && 1482 DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64) 1483 return 2 * BaseCost; 1484 } 1485 1486 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1487 Alignment, AddressSpace, CostKind, 1488 UseMaskForCond, UseMaskForGaps); 1489 } 1490 1491 InstructionCost ARMTTIImpl::getGatherScatterOpCost( 1492 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1493 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 1494 using namespace PatternMatch; 1495 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters) 1496 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1497 Alignment, CostKind, I); 1498 1499 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!"); 1500 auto *VTy = cast<FixedVectorType>(DataTy); 1501 1502 // TODO: Splitting, once we do that. 1503 1504 unsigned NumElems = VTy->getNumElements(); 1505 unsigned EltSize = VTy->getScalarSizeInBits(); 1506 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy); 1507 1508 // For now, it is assumed that for the MVE gather instructions the loads are 1509 // all effectively serialised. This means the cost is the scalar cost 1510 // multiplied by the number of elements being loaded. This is possibly very 1511 // conservative, but even so we still end up vectorising loops because the 1512 // cost per iteration for many loops is lower than for scalar loops. 1513 unsigned VectorCost = 1514 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind); 1515 // The scalarization cost should be a lot higher. We use the number of vector 1516 // elements plus the scalarization overhead. 1517 InstructionCost ScalarCost = 1518 NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) + 1519 BaseT::getScalarizationOverhead(VTy, false, true); 1520 1521 if (EltSize < 8 || Alignment < EltSize / 8) 1522 return ScalarCost; 1523 1524 unsigned ExtSize = EltSize; 1525 // Check whether there's a single user that asks for an extended type 1526 if (I != nullptr) { 1527 // Dependent of the caller of this function, a gather instruction will 1528 // either have opcode Instruction::Load or be a call to the masked_gather 1529 // intrinsic 1530 if ((I->getOpcode() == Instruction::Load || 1531 match(I, m_Intrinsic<Intrinsic::masked_gather>())) && 1532 I->hasOneUse()) { 1533 const User *Us = *I->users().begin(); 1534 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) { 1535 // only allow valid type combinations 1536 unsigned TypeSize = 1537 cast<Instruction>(Us)->getType()->getScalarSizeInBits(); 1538 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) || 1539 (TypeSize == 16 && EltSize == 8)) && 1540 TypeSize * NumElems == 128) { 1541 ExtSize = TypeSize; 1542 } 1543 } 1544 } 1545 // Check whether the input data needs to be truncated 1546 TruncInst *T; 1547 if ((I->getOpcode() == Instruction::Store || 1548 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) && 1549 (T = dyn_cast<TruncInst>(I->getOperand(0)))) { 1550 // Only allow valid type combinations 1551 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits(); 1552 if (((EltSize == 16 && TypeSize == 32) || 1553 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) && 1554 TypeSize * NumElems == 128) 1555 ExtSize = TypeSize; 1556 } 1557 } 1558 1559 if (ExtSize * NumElems != 128 || NumElems < 4) 1560 return ScalarCost; 1561 1562 // Any (aligned) i32 gather will not need to be scalarised. 1563 if (ExtSize == 32) 1564 return VectorCost; 1565 // For smaller types, we need to ensure that the gep's inputs are correctly 1566 // extended from a small enough value. Other sizes (including i64) are 1567 // scalarized for now. 1568 if (ExtSize != 8 && ExtSize != 16) 1569 return ScalarCost; 1570 1571 if (const auto *BC = dyn_cast<BitCastInst>(Ptr)) 1572 Ptr = BC->getOperand(0); 1573 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1574 if (GEP->getNumOperands() != 2) 1575 return ScalarCost; 1576 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType()); 1577 // Scale needs to be correct (which is only relevant for i16s). 1578 if (Scale != 1 && Scale * 8 != ExtSize) 1579 return ScalarCost; 1580 // And we need to zext (not sext) the indexes from a small enough type. 1581 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) { 1582 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize) 1583 return VectorCost; 1584 } 1585 return ScalarCost; 1586 } 1587 return ScalarCost; 1588 } 1589 1590 InstructionCost 1591 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 1592 bool IsPairwiseForm, 1593 TTI::TargetCostKind CostKind) { 1594 EVT ValVT = TLI->getValueType(DL, ValTy); 1595 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1596 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD) 1597 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1598 CostKind); 1599 1600 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1601 1602 static const CostTblEntry CostTblAdd[]{ 1603 {ISD::ADD, MVT::v16i8, 1}, 1604 {ISD::ADD, MVT::v8i16, 1}, 1605 {ISD::ADD, MVT::v4i32, 1}, 1606 }; 1607 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second)) 1608 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first; 1609 1610 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1611 CostKind); 1612 } 1613 1614 InstructionCost 1615 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned, 1616 Type *ResTy, VectorType *ValTy, 1617 TTI::TargetCostKind CostKind) { 1618 EVT ValVT = TLI->getValueType(DL, ValTy); 1619 EVT ResVT = TLI->getValueType(DL, ResTy); 1620 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) { 1621 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1622 if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) || 1623 (LT.second == MVT::v8i16 && 1624 ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) || 1625 (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64)) 1626 return ST->getMVEVectorCostFactor(CostKind) * LT.first; 1627 } 1628 1629 return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy, 1630 CostKind); 1631 } 1632 1633 InstructionCost 1634 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1635 TTI::TargetCostKind CostKind) { 1636 switch (ICA.getID()) { 1637 case Intrinsic::get_active_lane_mask: 1638 // Currently we make a somewhat optimistic assumption that 1639 // active_lane_mask's are always free. In reality it may be freely folded 1640 // into a tail predicated loop, expanded into a VCPT or expanded into a lot 1641 // of add/icmp code. We may need to improve this in the future, but being 1642 // able to detect if it is free or not involves looking at a lot of other 1643 // code. We currently assume that the vectorizer inserted these, and knew 1644 // what it was doing in adding one. 1645 if (ST->hasMVEIntegerOps()) 1646 return 0; 1647 break; 1648 case Intrinsic::sadd_sat: 1649 case Intrinsic::ssub_sat: 1650 case Intrinsic::uadd_sat: 1651 case Intrinsic::usub_sat: { 1652 if (!ST->hasMVEIntegerOps()) 1653 break; 1654 Type *VT = ICA.getReturnType(); 1655 1656 std::pair<int, MVT> LT = 1657 TLI->getTypeLegalizationCost(DL, VT); 1658 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1659 LT.second == MVT::v16i8) { 1660 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we 1661 // need to extend the type, as it uses shr(qadd(shl, shl)). 1662 unsigned Instrs = 1663 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4; 1664 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs; 1665 } 1666 break; 1667 } 1668 case Intrinsic::abs: 1669 case Intrinsic::smin: 1670 case Intrinsic::smax: 1671 case Intrinsic::umin: 1672 case Intrinsic::umax: { 1673 if (!ST->hasMVEIntegerOps()) 1674 break; 1675 Type *VT = ICA.getReturnType(); 1676 1677 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1678 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1679 LT.second == MVT::v16i8) 1680 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1681 break; 1682 } 1683 case Intrinsic::minnum: 1684 case Intrinsic::maxnum: { 1685 if (!ST->hasMVEFloatOps()) 1686 break; 1687 Type *VT = ICA.getReturnType(); 1688 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1689 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) 1690 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1691 break; 1692 } 1693 } 1694 1695 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 1696 } 1697 1698 bool ARMTTIImpl::isLoweredToCall(const Function *F) { 1699 if (!F->isIntrinsic()) 1700 BaseT::isLoweredToCall(F); 1701 1702 // Assume all Arm-specific intrinsics map to an instruction. 1703 if (F->getName().startswith("llvm.arm")) 1704 return false; 1705 1706 switch (F->getIntrinsicID()) { 1707 default: break; 1708 case Intrinsic::powi: 1709 case Intrinsic::sin: 1710 case Intrinsic::cos: 1711 case Intrinsic::pow: 1712 case Intrinsic::log: 1713 case Intrinsic::log10: 1714 case Intrinsic::log2: 1715 case Intrinsic::exp: 1716 case Intrinsic::exp2: 1717 return true; 1718 case Intrinsic::sqrt: 1719 case Intrinsic::fabs: 1720 case Intrinsic::copysign: 1721 case Intrinsic::floor: 1722 case Intrinsic::ceil: 1723 case Intrinsic::trunc: 1724 case Intrinsic::rint: 1725 case Intrinsic::nearbyint: 1726 case Intrinsic::round: 1727 case Intrinsic::canonicalize: 1728 case Intrinsic::lround: 1729 case Intrinsic::llround: 1730 case Intrinsic::lrint: 1731 case Intrinsic::llrint: 1732 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64()) 1733 return true; 1734 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16()) 1735 return true; 1736 // Some operations can be handled by vector instructions and assume 1737 // unsupported vectors will be expanded into supported scalar ones. 1738 // TODO Handle scalar operations properly. 1739 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base(); 1740 case Intrinsic::masked_store: 1741 case Intrinsic::masked_load: 1742 case Intrinsic::masked_gather: 1743 case Intrinsic::masked_scatter: 1744 return !ST->hasMVEIntegerOps(); 1745 case Intrinsic::sadd_with_overflow: 1746 case Intrinsic::uadd_with_overflow: 1747 case Intrinsic::ssub_with_overflow: 1748 case Intrinsic::usub_with_overflow: 1749 case Intrinsic::sadd_sat: 1750 case Intrinsic::uadd_sat: 1751 case Intrinsic::ssub_sat: 1752 case Intrinsic::usub_sat: 1753 return false; 1754 } 1755 1756 return BaseT::isLoweredToCall(F); 1757 } 1758 1759 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) { 1760 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode()); 1761 EVT VT = TLI->getValueType(DL, I.getType(), true); 1762 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall) 1763 return true; 1764 1765 // Check if an intrinsic will be lowered to a call and assume that any 1766 // other CallInst will generate a bl. 1767 if (auto *Call = dyn_cast<CallInst>(&I)) { 1768 if (auto *II = dyn_cast<IntrinsicInst>(Call)) { 1769 switch(II->getIntrinsicID()) { 1770 case Intrinsic::memcpy: 1771 case Intrinsic::memset: 1772 case Intrinsic::memmove: 1773 return getNumMemOps(II) == -1; 1774 default: 1775 if (const Function *F = Call->getCalledFunction()) 1776 return isLoweredToCall(F); 1777 } 1778 } 1779 return true; 1780 } 1781 1782 // FPv5 provides conversions between integer, double-precision, 1783 // single-precision, and half-precision formats. 1784 switch (I.getOpcode()) { 1785 default: 1786 break; 1787 case Instruction::FPToSI: 1788 case Instruction::FPToUI: 1789 case Instruction::SIToFP: 1790 case Instruction::UIToFP: 1791 case Instruction::FPTrunc: 1792 case Instruction::FPExt: 1793 return !ST->hasFPARMv8Base(); 1794 } 1795 1796 // FIXME: Unfortunately the approach of checking the Operation Action does 1797 // not catch all cases of Legalization that use library calls. Our 1798 // Legalization step categorizes some transformations into library calls as 1799 // Custom, Expand or even Legal when doing type legalization. So for now 1800 // we have to special case for instance the SDIV of 64bit integers and the 1801 // use of floating point emulation. 1802 if (VT.isInteger() && VT.getSizeInBits() >= 64) { 1803 switch (ISD) { 1804 default: 1805 break; 1806 case ISD::SDIV: 1807 case ISD::UDIV: 1808 case ISD::SREM: 1809 case ISD::UREM: 1810 case ISD::SDIVREM: 1811 case ISD::UDIVREM: 1812 return true; 1813 } 1814 } 1815 1816 // Assume all other non-float operations are supported. 1817 if (!VT.isFloatingPoint()) 1818 return false; 1819 1820 // We'll need a library call to handle most floats when using soft. 1821 if (TLI->useSoftFloat()) { 1822 switch (I.getOpcode()) { 1823 default: 1824 return true; 1825 case Instruction::Alloca: 1826 case Instruction::Load: 1827 case Instruction::Store: 1828 case Instruction::Select: 1829 case Instruction::PHI: 1830 return false; 1831 } 1832 } 1833 1834 // We'll need a libcall to perform double precision operations on a single 1835 // precision only FPU. 1836 if (I.getType()->isDoubleTy() && !ST->hasFP64()) 1837 return true; 1838 1839 // Likewise for half precision arithmetic. 1840 if (I.getType()->isHalfTy() && !ST->hasFullFP16()) 1841 return true; 1842 1843 return false; 1844 } 1845 1846 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 1847 AssumptionCache &AC, 1848 TargetLibraryInfo *LibInfo, 1849 HardwareLoopInfo &HWLoopInfo) { 1850 // Low-overhead branches are only supported in the 'low-overhead branch' 1851 // extension of v8.1-m. 1852 if (!ST->hasLOB() || DisableLowOverheadLoops) { 1853 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n"); 1854 return false; 1855 } 1856 1857 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) { 1858 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n"); 1859 return false; 1860 } 1861 1862 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1863 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 1864 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n"); 1865 return false; 1866 } 1867 1868 const SCEV *TripCountSCEV = 1869 SE.getAddExpr(BackedgeTakenCount, 1870 SE.getOne(BackedgeTakenCount->getType())); 1871 1872 // We need to store the trip count in LR, a 32-bit register. 1873 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) { 1874 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n"); 1875 return false; 1876 } 1877 1878 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little 1879 // point in generating a hardware loop if that's going to happen. 1880 1881 auto IsHardwareLoopIntrinsic = [](Instruction &I) { 1882 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) { 1883 switch (Call->getIntrinsicID()) { 1884 default: 1885 break; 1886 case Intrinsic::start_loop_iterations: 1887 case Intrinsic::test_start_loop_iterations: 1888 case Intrinsic::loop_decrement: 1889 case Intrinsic::loop_decrement_reg: 1890 return true; 1891 } 1892 } 1893 return false; 1894 }; 1895 1896 // Scan the instructions to see if there's any that we know will turn into a 1897 // call or if this loop is already a low-overhead loop or will become a tail 1898 // predicated loop. 1899 bool IsTailPredLoop = false; 1900 auto ScanLoop = [&](Loop *L) { 1901 for (auto *BB : L->getBlocks()) { 1902 for (auto &I : *BB) { 1903 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) || 1904 isa<InlineAsm>(I)) { 1905 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n"); 1906 return false; 1907 } 1908 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1909 IsTailPredLoop |= 1910 II->getIntrinsicID() == Intrinsic::get_active_lane_mask || 1911 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 || 1912 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 || 1913 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 || 1914 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64; 1915 } 1916 } 1917 return true; 1918 }; 1919 1920 // Visit inner loops. 1921 for (auto Inner : *L) 1922 if (!ScanLoop(Inner)) 1923 return false; 1924 1925 if (!ScanLoop(L)) 1926 return false; 1927 1928 // TODO: Check whether the trip count calculation is expensive. If L is the 1929 // inner loop but we know it has a low trip count, calculating that trip 1930 // count (in the parent loop) may be detrimental. 1931 1932 LLVMContext &C = L->getHeader()->getContext(); 1933 HWLoopInfo.CounterInReg = true; 1934 HWLoopInfo.IsNestingLegal = false; 1935 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop; 1936 HWLoopInfo.CountType = Type::getInt32Ty(C); 1937 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 1938 return true; 1939 } 1940 1941 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) { 1942 // We don't allow icmp's, and because we only look at single block loops, 1943 // we simply count the icmps, i.e. there should only be 1 for the backedge. 1944 if (isa<ICmpInst>(&I) && ++ICmpCount > 1) 1945 return false; 1946 1947 if (isa<FCmpInst>(&I)) 1948 return false; 1949 1950 // We could allow extending/narrowing FP loads/stores, but codegen is 1951 // too inefficient so reject this for now. 1952 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I)) 1953 return false; 1954 1955 // Extends have to be extending-loads 1956 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) ) 1957 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0))) 1958 return false; 1959 1960 // Truncs have to be narrowing-stores 1961 if (isa<TruncInst>(&I) ) 1962 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin())) 1963 return false; 1964 1965 return true; 1966 } 1967 1968 // To set up a tail-predicated loop, we need to know the total number of 1969 // elements processed by that loop. Thus, we need to determine the element 1970 // size and: 1971 // 1) it should be uniform for all operations in the vector loop, so we 1972 // e.g. don't want any widening/narrowing operations. 1973 // 2) it should be smaller than i64s because we don't have vector operations 1974 // that work on i64s. 1975 // 3) we don't want elements to be reversed or shuffled, to make sure the 1976 // tail-predication masks/predicates the right lanes. 1977 // 1978 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, 1979 const DataLayout &DL, 1980 const LoopAccessInfo *LAI) { 1981 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n"); 1982 1983 // If there are live-out values, it is probably a reduction. We can predicate 1984 // most reduction operations freely under MVE using a combination of 1985 // prefer-predicated-reduction-select and inloop reductions. We limit this to 1986 // floating point and integer reductions, but don't check for operators 1987 // specifically here. If the value ends up not being a reduction (and so the 1988 // vectorizer cannot tailfold the loop), we should fall back to standard 1989 // vectorization automatically. 1990 SmallVector< Instruction *, 8 > LiveOuts; 1991 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L); 1992 bool ReductionsDisabled = 1993 EnableTailPredication == TailPredication::EnabledNoReductions || 1994 EnableTailPredication == TailPredication::ForceEnabledNoReductions; 1995 1996 for (auto *I : LiveOuts) { 1997 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() && 1998 !I->getType()->isHalfTy()) { 1999 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float " 2000 "live-out value\n"); 2001 return false; 2002 } 2003 if (ReductionsDisabled) { 2004 LLVM_DEBUG(dbgs() << "Reductions not enabled\n"); 2005 return false; 2006 } 2007 } 2008 2009 // Next, check that all instructions can be tail-predicated. 2010 PredicatedScalarEvolution PSE = LAI->getPSE(); 2011 SmallVector<Instruction *, 16> LoadStores; 2012 int ICmpCount = 0; 2013 2014 for (BasicBlock *BB : L->blocks()) { 2015 for (Instruction &I : BB->instructionsWithoutDebug()) { 2016 if (isa<PHINode>(&I)) 2017 continue; 2018 if (!canTailPredicateInstruction(I, ICmpCount)) { 2019 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump()); 2020 return false; 2021 } 2022 2023 Type *T = I.getType(); 2024 if (T->isPointerTy()) 2025 T = T->getPointerElementType(); 2026 2027 if (T->getScalarSizeInBits() > 32) { 2028 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); 2029 return false; 2030 } 2031 if (isa<StoreInst>(I) || isa<LoadInst>(I)) { 2032 Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1); 2033 int64_t NextStride = getPtrStride(PSE, Ptr, L); 2034 if (NextStride == 1) { 2035 // TODO: for now only allow consecutive strides of 1. We could support 2036 // other strides as long as it is uniform, but let's keep it simple 2037 // for now. 2038 continue; 2039 } else if (NextStride == -1 || 2040 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) || 2041 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) { 2042 LLVM_DEBUG(dbgs() 2043 << "Consecutive strides of 2 found, vld2/vstr2 can't " 2044 "be tail-predicated\n."); 2045 return false; 2046 // TODO: don't tail predicate if there is a reversed load? 2047 } else if (EnableMaskedGatherScatters) { 2048 // Gather/scatters do allow loading from arbitrary strides, at 2049 // least if they are loop invariant. 2050 // TODO: Loop variant strides should in theory work, too, but 2051 // this requires further testing. 2052 const SCEV *PtrScev = 2053 replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr); 2054 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) { 2055 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 2056 if (PSE.getSE()->isLoopInvariant(Step, L)) 2057 continue; 2058 } 2059 } 2060 LLVM_DEBUG(dbgs() << "Bad stride found, can't " 2061 "tail-predicate\n."); 2062 return false; 2063 } 2064 } 2065 } 2066 2067 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n"); 2068 return true; 2069 } 2070 2071 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, 2072 ScalarEvolution &SE, 2073 AssumptionCache &AC, 2074 TargetLibraryInfo *TLI, 2075 DominatorTree *DT, 2076 const LoopAccessInfo *LAI) { 2077 if (!EnableTailPredication) { 2078 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n"); 2079 return false; 2080 } 2081 2082 // Creating a predicated vector loop is the first step for generating a 2083 // tail-predicated hardware loop, for which we need the MVE masked 2084 // load/stores instructions: 2085 if (!ST->hasMVEIntegerOps()) 2086 return false; 2087 2088 // For now, restrict this to single block loops. 2089 if (L->getNumBlocks() > 1) { 2090 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block " 2091 "loop.\n"); 2092 return false; 2093 } 2094 2095 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected"); 2096 2097 HardwareLoopInfo HWLoopInfo(L); 2098 if (!HWLoopInfo.canAnalyze(*LI)) { 2099 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2100 "analyzable.\n"); 2101 return false; 2102 } 2103 2104 // This checks if we have the low-overhead branch architecture 2105 // extension, and if we will create a hardware-loop: 2106 if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) { 2107 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2108 "profitable.\n"); 2109 return false; 2110 } 2111 2112 if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) { 2113 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2114 "a candidate.\n"); 2115 return false; 2116 } 2117 2118 return canTailPredicateLoop(L, LI, SE, DL, LAI); 2119 } 2120 2121 bool ARMTTIImpl::emitGetActiveLaneMask() const { 2122 if (!ST->hasMVEIntegerOps() || !EnableTailPredication) 2123 return false; 2124 2125 // Intrinsic @llvm.get.active.lane.mask is supported. 2126 // It is used in the MVETailPredication pass, which requires the number of 2127 // elements processed by this vector loop to setup the tail-predicated 2128 // loop. 2129 return true; 2130 } 2131 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2132 TTI::UnrollingPreferences &UP) { 2133 // Enable Upper bound unrolling universally, not dependant upon the conditions 2134 // below. 2135 UP.UpperBound = true; 2136 2137 // Only currently enable these preferences for M-Class cores. 2138 if (!ST->isMClass()) 2139 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP); 2140 2141 // Disable loop unrolling for Oz and Os. 2142 UP.OptSizeThreshold = 0; 2143 UP.PartialOptSizeThreshold = 0; 2144 if (L->getHeader()->getParent()->hasOptSize()) 2145 return; 2146 2147 SmallVector<BasicBlock*, 4> ExitingBlocks; 2148 L->getExitingBlocks(ExitingBlocks); 2149 LLVM_DEBUG(dbgs() << "Loop has:\n" 2150 << "Blocks: " << L->getNumBlocks() << "\n" 2151 << "Exit blocks: " << ExitingBlocks.size() << "\n"); 2152 2153 // Only allow another exit other than the latch. This acts as an early exit 2154 // as it mirrors the profitability calculation of the runtime unroller. 2155 if (ExitingBlocks.size() > 2) 2156 return; 2157 2158 // Limit the CFG of the loop body for targets with a branch predictor. 2159 // Allowing 4 blocks permits if-then-else diamonds in the body. 2160 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) 2161 return; 2162 2163 // Don't unroll vectorized loops, including the remainder loop 2164 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized")) 2165 return; 2166 2167 // Scan the loop: don't unroll loops with calls as this could prevent 2168 // inlining. 2169 InstructionCost Cost = 0; 2170 for (auto *BB : L->getBlocks()) { 2171 for (auto &I : *BB) { 2172 // Don't unroll vectorised loop. MVE does not benefit from it as much as 2173 // scalar code. 2174 if (I.getType()->isVectorTy()) 2175 return; 2176 2177 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 2178 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 2179 if (!isLoweredToCall(F)) 2180 continue; 2181 } 2182 return; 2183 } 2184 2185 SmallVector<const Value*, 4> Operands(I.operand_values()); 2186 Cost += 2187 getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency); 2188 } 2189 } 2190 2191 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); 2192 2193 UP.Partial = true; 2194 UP.Runtime = true; 2195 UP.UnrollRemainder = true; 2196 UP.DefaultUnrollRuntimeCount = 4; 2197 UP.UnrollAndJam = true; 2198 UP.UnrollAndJamInnerLoopThreshold = 60; 2199 2200 // Force unrolling small loops can be very useful because of the branch 2201 // taken cost of the backedge. 2202 if (Cost < 12) 2203 UP.Force = true; 2204 } 2205 2206 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2207 TTI::PeelingPreferences &PP) { 2208 BaseT::getPeelingPreferences(L, SE, PP); 2209 } 2210 2211 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty, 2212 TTI::ReductionFlags Flags) const { 2213 if (!ST->hasMVEIntegerOps()) 2214 return false; 2215 2216 unsigned ScalarBits = Ty->getScalarSizeInBits(); 2217 switch (Opcode) { 2218 case Instruction::Add: 2219 return ScalarBits <= 64; 2220 default: 2221 return false; 2222 } 2223 } 2224 2225 bool ARMTTIImpl::preferPredicatedReductionSelect( 2226 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const { 2227 if (!ST->hasMVEIntegerOps()) 2228 return false; 2229 return true; 2230 } 2231