1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARMTargetTransformInfo.h" 10 #include "ARMSubtarget.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "llvm/ADT/APInt.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/ISDOpcodes.h" 17 #include "llvm/CodeGen/ValueTypes.h" 18 #include "llvm/IR/BasicBlock.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Instruction.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/IntrinsicsARM.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/MC/SubtargetFeature.h" 28 #include "llvm/Support/Casting.h" 29 #include "llvm/Support/MachineValueType.h" 30 #include "llvm/Target/TargetMachine.h" 31 #include "llvm/Transforms/InstCombine/InstCombiner.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include "llvm/Transforms/Utils/LoopUtils.h" 34 #include <algorithm> 35 #include <cassert> 36 #include <cstdint> 37 #include <utility> 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "armtti" 42 43 static cl::opt<bool> EnableMaskedLoadStores( 44 "enable-arm-maskedldst", cl::Hidden, cl::init(true), 45 cl::desc("Enable the generation of masked loads and stores")); 46 47 static cl::opt<bool> DisableLowOverheadLoops( 48 "disable-arm-loloops", cl::Hidden, cl::init(false), 49 cl::desc("Disable the generation of low-overhead loops")); 50 51 extern cl::opt<TailPredication::Mode> EnableTailPredication; 52 53 extern cl::opt<bool> EnableMaskedGatherScatters; 54 55 /// Convert a vector load intrinsic into a simple llvm load instruction. 56 /// This is beneficial when the underlying object being addressed comes 57 /// from a constant, since we get constant-folding for free. 58 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, 59 InstCombiner::BuilderTy &Builder) { 60 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1)); 61 62 if (!IntrAlign) 63 return nullptr; 64 65 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign 66 ? MemAlign 67 : IntrAlign->getLimitedValue(); 68 69 if (!isPowerOf2_32(Alignment)) 70 return nullptr; 71 72 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), 73 PointerType::get(II.getType(), 0)); 74 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment)); 75 } 76 77 bool ARMTTIImpl::areInlineCompatible(const Function *Caller, 78 const Function *Callee) const { 79 const TargetMachine &TM = getTLI()->getTargetMachine(); 80 const FeatureBitset &CallerBits = 81 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 82 const FeatureBitset &CalleeBits = 83 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 84 85 // To inline a callee, all features not in the allowed list must match exactly. 86 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) == 87 (CalleeBits & ~InlineFeaturesAllowed); 88 // For features in the allowed list, the callee's features must be a subset of 89 // the callers'. 90 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) == 91 (CalleeBits & InlineFeaturesAllowed); 92 return MatchExact && MatchSubset; 93 } 94 95 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const { 96 if (L->getHeader()->getParent()->hasOptSize()) 97 return false; 98 if (ST->hasMVEIntegerOps()) 99 return false; 100 return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1; 101 } 102 103 bool ARMTTIImpl::shouldFavorPostInc() const { 104 if (ST->hasMVEIntegerOps()) 105 return true; 106 return false; 107 } 108 109 Optional<Instruction *> 110 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { 111 Intrinsic::ID IID = II.getIntrinsicID(); 112 switch (IID) { 113 default: 114 break; 115 case Intrinsic::arm_neon_vld1: { 116 Align MemAlign = 117 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 118 &IC.getAssumptionCache(), &IC.getDominatorTree()); 119 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) { 120 return IC.replaceInstUsesWith(II, V); 121 } 122 break; 123 } 124 125 case Intrinsic::arm_neon_vld2: 126 case Intrinsic::arm_neon_vld3: 127 case Intrinsic::arm_neon_vld4: 128 case Intrinsic::arm_neon_vld2lane: 129 case Intrinsic::arm_neon_vld3lane: 130 case Intrinsic::arm_neon_vld4lane: 131 case Intrinsic::arm_neon_vst1: 132 case Intrinsic::arm_neon_vst2: 133 case Intrinsic::arm_neon_vst3: 134 case Intrinsic::arm_neon_vst4: 135 case Intrinsic::arm_neon_vst2lane: 136 case Intrinsic::arm_neon_vst3lane: 137 case Intrinsic::arm_neon_vst4lane: { 138 Align MemAlign = 139 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 140 &IC.getAssumptionCache(), &IC.getDominatorTree()); 141 unsigned AlignArg = II.getNumArgOperands() - 1; 142 Value *AlignArgOp = II.getArgOperand(AlignArg); 143 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue(); 144 if (Align && *Align < MemAlign) { 145 return IC.replaceOperand( 146 II, AlignArg, 147 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(), 148 false)); 149 } 150 break; 151 } 152 153 case Intrinsic::arm_mve_pred_i2v: { 154 Value *Arg = II.getArgOperand(0); 155 Value *ArgArg; 156 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 157 PatternMatch::m_Value(ArgArg))) && 158 II.getType() == ArgArg->getType()) { 159 return IC.replaceInstUsesWith(II, ArgArg); 160 } 161 Constant *XorMask; 162 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 163 PatternMatch::m_Value(ArgArg)), 164 PatternMatch::m_Constant(XorMask))) && 165 II.getType() == ArgArg->getType()) { 166 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) { 167 if (CI->getValue().trunc(16).isAllOnesValue()) { 168 auto TrueVector = IC.Builder.CreateVectorSplat( 169 cast<FixedVectorType>(II.getType())->getNumElements(), 170 IC.Builder.getTrue()); 171 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); 172 } 173 } 174 } 175 KnownBits ScalarKnown(32); 176 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16), 177 ScalarKnown, 0)) { 178 return &II; 179 } 180 break; 181 } 182 case Intrinsic::arm_mve_pred_v2i: { 183 Value *Arg = II.getArgOperand(0); 184 Value *ArgArg; 185 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>( 186 PatternMatch::m_Value(ArgArg)))) { 187 return IC.replaceInstUsesWith(II, ArgArg); 188 } 189 if (!II.getMetadata(LLVMContext::MD_range)) { 190 Type *IntTy32 = Type::getInt32Ty(II.getContext()); 191 Metadata *M[] = { 192 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), 193 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))}; 194 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M)); 195 return &II; 196 } 197 break; 198 } 199 case Intrinsic::arm_mve_vadc: 200 case Intrinsic::arm_mve_vadc_predicated: { 201 unsigned CarryOp = 202 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2; 203 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 && 204 "Bad type for intrinsic!"); 205 206 KnownBits CarryKnown(32); 207 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29), 208 CarryKnown)) { 209 return &II; 210 } 211 break; 212 } 213 } 214 return None; 215 } 216 217 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 218 TTI::TargetCostKind CostKind) { 219 assert(Ty->isIntegerTy()); 220 221 unsigned Bits = Ty->getPrimitiveSizeInBits(); 222 if (Bits == 0 || Imm.getActiveBits() >= 64) 223 return 4; 224 225 int64_t SImmVal = Imm.getSExtValue(); 226 uint64_t ZImmVal = Imm.getZExtValue(); 227 if (!ST->isThumb()) { 228 if ((SImmVal >= 0 && SImmVal < 65536) || 229 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 230 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 231 return 1; 232 return ST->hasV6T2Ops() ? 2 : 3; 233 } 234 if (ST->isThumb2()) { 235 if ((SImmVal >= 0 && SImmVal < 65536) || 236 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 237 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 238 return 1; 239 return ST->hasV6T2Ops() ? 2 : 3; 240 } 241 // Thumb1, any i8 imm cost 1. 242 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) 243 return 1; 244 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 245 return 2; 246 // Load from constantpool. 247 return 3; 248 } 249 250 // Constants smaller than 256 fit in the immediate field of 251 // Thumb1 instructions so we return a zero cost and 1 otherwise. 252 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 253 const APInt &Imm, Type *Ty) { 254 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 255 return 0; 256 257 return 1; 258 } 259 260 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, 261 Type *Ty, TTI::TargetCostKind CostKind) { 262 // Division by a constant can be turned into multiplication, but only if we 263 // know it's constant. So it's not so much that the immediate is cheap (it's 264 // not), but that the alternative is worse. 265 // FIXME: this is probably unneeded with GlobalISel. 266 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 267 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 268 Idx == 1) 269 return 0; 270 271 if (Opcode == Instruction::And) { 272 // UXTB/UXTH 273 if (Imm == 255 || Imm == 65535) 274 return 0; 275 // Conversion to BIC is free, and means we can use ~Imm instead. 276 return std::min(getIntImmCost(Imm, Ty, CostKind), 277 getIntImmCost(~Imm, Ty, CostKind)); 278 } 279 280 if (Opcode == Instruction::Add) 281 // Conversion to SUB is free, and means we can use -Imm instead. 282 return std::min(getIntImmCost(Imm, Ty, CostKind), 283 getIntImmCost(-Imm, Ty, CostKind)); 284 285 if (Opcode == Instruction::ICmp && Imm.isNegative() && 286 Ty->getIntegerBitWidth() == 32) { 287 int64_t NegImm = -Imm.getSExtValue(); 288 if (ST->isThumb2() && NegImm < 1<<12) 289 // icmp X, #-C -> cmn X, #C 290 return 0; 291 if (ST->isThumb() && NegImm < 1<<8) 292 // icmp X, #-C -> adds X, #C 293 return 0; 294 } 295 296 // xor a, -1 can always be folded to MVN 297 if (Opcode == Instruction::Xor && Imm.isAllOnesValue()) 298 return 0; 299 300 return getIntImmCost(Imm, Ty, CostKind); 301 } 302 303 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { 304 if (CostKind == TTI::TCK_RecipThroughput && 305 (ST->hasNEON() || ST->hasMVEIntegerOps())) { 306 // FIXME: The vectorizer is highly sensistive to the cost of these 307 // instructions, which suggests that it may be using the costs incorrectly. 308 // But, for now, just make them free to avoid performance regressions for 309 // vector targets. 310 return 0; 311 } 312 return BaseT::getCFInstrCost(Opcode, CostKind); 313 } 314 315 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 316 TTI::CastContextHint CCH, 317 TTI::TargetCostKind CostKind, 318 const Instruction *I) { 319 int ISD = TLI->InstructionOpcodeToISD(Opcode); 320 assert(ISD && "Invalid opcode"); 321 322 // TODO: Allow non-throughput costs that aren't binary. 323 auto AdjustCost = [&CostKind](int Cost) { 324 if (CostKind != TTI::TCK_RecipThroughput) 325 return Cost == 0 ? 0 : 1; 326 return Cost; 327 }; 328 auto IsLegalFPType = [this](EVT VT) { 329 EVT EltVT = VT.getScalarType(); 330 return (EltVT == MVT::f32 && ST->hasVFP2Base()) || 331 (EltVT == MVT::f64 && ST->hasFP64()) || 332 (EltVT == MVT::f16 && ST->hasFullFP16()); 333 }; 334 335 EVT SrcTy = TLI->getValueType(DL, Src); 336 EVT DstTy = TLI->getValueType(DL, Dst); 337 338 if (!SrcTy.isSimple() || !DstTy.isSimple()) 339 return AdjustCost( 340 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 341 342 // Extending masked load/Truncating masked stores is expensive because we 343 // currently don't split them. This means that we'll likely end up 344 // loading/storing each element individually (hence the high cost). 345 if ((ST->hasMVEIntegerOps() && 346 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt || 347 Opcode == Instruction::SExt)) || 348 (ST->hasMVEFloatOps() && 349 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) && 350 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))) 351 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128) 352 return 2 * DstTy.getVectorNumElements() * ST->getMVEVectorCostFactor(); 353 354 // The extend of other kinds of load is free 355 if (CCH == TTI::CastContextHint::Normal || 356 CCH == TTI::CastContextHint::Masked) { 357 static const TypeConversionCostTblEntry LoadConversionTbl[] = { 358 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0}, 359 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0}, 360 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0}, 361 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0}, 362 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0}, 363 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0}, 364 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1}, 365 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1}, 366 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1}, 367 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1}, 368 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1}, 369 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1}, 370 }; 371 if (const auto *Entry = ConvertCostTableLookup( 372 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 373 return AdjustCost(Entry->Cost); 374 375 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = { 376 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 377 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 378 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 379 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 380 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 381 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 382 // The following extend from a legal type to an illegal type, so need to 383 // split the load. This introduced an extra load operation, but the 384 // extend is still "free". 385 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 386 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 387 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 388 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 389 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 390 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 391 }; 392 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 393 if (const auto *Entry = 394 ConvertCostTableLookup(MVELoadConversionTbl, ISD, 395 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 396 return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor()); 397 } 398 399 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = { 400 // FPExtends are similar but also require the VCVT instructions. 401 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1}, 402 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3}, 403 }; 404 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 405 if (const auto *Entry = 406 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD, 407 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 408 return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor()); 409 } 410 411 // The truncate of a store is free. This is the mirror of extends above. 412 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = { 413 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0}, 414 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0}, 415 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0}, 416 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1}, 417 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3}, 418 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1}, 419 }; 420 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 421 if (const auto *Entry = 422 ConvertCostTableLookup(MVEStoreConversionTbl, ISD, 423 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 424 return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor()); 425 } 426 427 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = { 428 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1}, 429 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3}, 430 }; 431 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 432 if (const auto *Entry = 433 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD, 434 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 435 return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor()); 436 } 437 } 438 439 // NEON vector operations that can extend their inputs. 440 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) && 441 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) { 442 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = { 443 // vaddl 444 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 }, 445 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 }, 446 // vsubl 447 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 }, 448 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 }, 449 // vmull 450 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 }, 451 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 }, 452 // vshll 453 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 }, 454 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 }, 455 }; 456 457 auto *User = cast<Instruction>(*I->user_begin()); 458 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode()); 459 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD, 460 DstTy.getSimpleVT(), 461 SrcTy.getSimpleVT())) { 462 return AdjustCost(Entry->Cost); 463 } 464 } 465 466 // Single to/from double precision conversions. 467 if (Src->isVectorTy() && ST->hasNEON() && 468 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 && 469 DstTy.getScalarType() == MVT::f32) || 470 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 && 471 DstTy.getScalarType() == MVT::f64))) { 472 static const CostTblEntry NEONFltDblTbl[] = { 473 // Vector fptrunc/fpext conversions. 474 {ISD::FP_ROUND, MVT::v2f64, 2}, 475 {ISD::FP_EXTEND, MVT::v2f32, 2}, 476 {ISD::FP_EXTEND, MVT::v4f32, 4}}; 477 478 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 479 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 480 return AdjustCost(LT.first * Entry->Cost); 481 } 482 483 // Some arithmetic, load and store operations have specific instructions 484 // to cast up/down their types automatically at no extra cost. 485 // TODO: Get these tables to know at least what the related operations are. 486 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 487 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 488 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 489 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 490 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 491 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 492 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 493 494 // The number of vmovl instructions for the extension. 495 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 496 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 497 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 498 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 499 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 500 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 501 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 502 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 503 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 504 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 505 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 506 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 507 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 508 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 509 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 510 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 511 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 512 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 513 514 // Operations that we legalize using splitting. 515 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 516 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 517 518 // Vector float <-> i32 conversions. 519 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 520 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 521 522 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 523 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 524 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 525 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 526 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 527 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 528 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 529 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 530 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 531 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 532 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 533 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 534 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 535 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 536 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 537 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 538 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 539 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 540 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 541 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 542 543 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 544 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 545 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 546 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 547 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 548 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 549 550 // Vector double <-> i32 conversions. 551 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 552 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 553 554 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 555 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 556 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 557 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 558 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 559 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 560 561 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 562 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 563 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 564 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 565 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 566 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 567 }; 568 569 if (SrcTy.isVector() && ST->hasNEON()) { 570 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 571 DstTy.getSimpleVT(), 572 SrcTy.getSimpleVT())) 573 return AdjustCost(Entry->Cost); 574 } 575 576 // Scalar float to integer conversions. 577 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 578 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 579 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 580 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 581 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 582 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 583 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 584 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 585 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 586 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 587 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 588 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 589 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 590 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 591 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 592 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 593 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 594 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 595 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 596 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 597 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 598 }; 599 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 600 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 601 DstTy.getSimpleVT(), 602 SrcTy.getSimpleVT())) 603 return AdjustCost(Entry->Cost); 604 } 605 606 // Scalar integer to float conversions. 607 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 608 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 609 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 610 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 611 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 612 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 613 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 614 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 615 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 616 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 617 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 618 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 619 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 620 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 621 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 622 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 623 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 624 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 625 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 626 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 627 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 628 }; 629 630 if (SrcTy.isInteger() && ST->hasNEON()) { 631 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 632 ISD, DstTy.getSimpleVT(), 633 SrcTy.getSimpleVT())) 634 return AdjustCost(Entry->Cost); 635 } 636 637 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one 638 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext 639 // are linearised so take more. 640 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = { 641 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 642 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 643 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 644 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 645 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, 646 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, 647 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 648 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 649 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, 650 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 651 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, 652 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, 653 }; 654 655 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 656 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl, 657 ISD, DstTy.getSimpleVT(), 658 SrcTy.getSimpleVT())) 659 return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor()); 660 } 661 662 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) { 663 // As general rule, fp converts that were not matched above are scalarized 664 // and cost 1 vcvt for each lane, so long as the instruction is available. 665 // If not it will become a series of function calls. 666 const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind); 667 int Lanes = 1; 668 if (SrcTy.isFixedLengthVector()) 669 Lanes = SrcTy.getVectorNumElements(); 670 671 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)) 672 return Lanes; 673 else 674 return Lanes * CallCost; 675 } 676 677 // Scalar integer conversion costs. 678 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 679 // i16 -> i64 requires two dependent operations. 680 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 681 682 // Truncates on i64 are assumed to be free. 683 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 684 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 685 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 686 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 687 }; 688 689 if (SrcTy.isInteger()) { 690 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 691 DstTy.getSimpleVT(), 692 SrcTy.getSimpleVT())) 693 return AdjustCost(Entry->Cost); 694 } 695 696 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 697 ? ST->getMVEVectorCostFactor() 698 : 1; 699 return AdjustCost( 700 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 701 } 702 703 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 704 unsigned Index) { 705 // Penalize inserting into an D-subregister. We end up with a three times 706 // lower estimated throughput on swift. 707 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 708 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 709 return 3; 710 711 if (ST->hasNEON() && (Opcode == Instruction::InsertElement || 712 Opcode == Instruction::ExtractElement)) { 713 // Cross-class copies are expensive on many microarchitectures, 714 // so assume they are expensive by default. 715 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy()) 716 return 3; 717 718 // Even if it's not a cross class copy, this likely leads to mixing 719 // of NEON and VFP code and should be therefore penalized. 720 if (ValTy->isVectorTy() && 721 ValTy->getScalarSizeInBits() <= 32) 722 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 723 } 724 725 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement || 726 Opcode == Instruction::ExtractElement)) { 727 // We say MVE moves costs at least the MVEVectorCostFactor, even though 728 // they are scalar instructions. This helps prevent mixing scalar and 729 // vector, to prevent vectorising where we end up just scalarising the 730 // result anyway. 731 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 732 ST->getMVEVectorCostFactor()) * 733 cast<FixedVectorType>(ValTy)->getNumElements() / 2; 734 } 735 736 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 737 } 738 739 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 740 TTI::TargetCostKind CostKind, 741 const Instruction *I) { 742 // TODO: Handle other cost kinds. 743 if (CostKind != TTI::TCK_RecipThroughput) 744 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I); 745 746 int ISD = TLI->InstructionOpcodeToISD(Opcode); 747 // On NEON a vector select gets lowered to vbsl. 748 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 749 // Lowering of some vector selects is currently far from perfect. 750 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 751 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 752 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 753 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 754 }; 755 756 EVT SelCondTy = TLI->getValueType(DL, CondTy); 757 EVT SelValTy = TLI->getValueType(DL, ValTy); 758 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 759 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 760 SelCondTy.getSimpleVT(), 761 SelValTy.getSimpleVT())) 762 return Entry->Cost; 763 } 764 765 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 766 return LT.first; 767 } 768 769 int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy() 770 ? ST->getMVEVectorCostFactor() 771 : 1; 772 return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, 773 I); 774 } 775 776 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 777 const SCEV *Ptr) { 778 // Address computations in vectorized code with non-consecutive addresses will 779 // likely result in more instructions compared to scalar code where the 780 // computation can more often be merged into the index mode. The resulting 781 // extra micro-ops can significantly decrease throughput. 782 unsigned NumVectorInstToHideOverhead = 10; 783 int MaxMergeDistance = 64; 784 785 if (ST->hasNEON()) { 786 if (Ty->isVectorTy() && SE && 787 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 788 return NumVectorInstToHideOverhead; 789 790 // In many cases the address computation is not merged into the instruction 791 // addressing mode. 792 return 1; 793 } 794 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 795 } 796 797 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) { 798 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 799 // If a VCTP is part of a chain, it's already profitable and shouldn't be 800 // optimized, else LSR may block tail-predication. 801 switch (II->getIntrinsicID()) { 802 case Intrinsic::arm_mve_vctp8: 803 case Intrinsic::arm_mve_vctp16: 804 case Intrinsic::arm_mve_vctp32: 805 case Intrinsic::arm_mve_vctp64: 806 return true; 807 default: 808 break; 809 } 810 } 811 return false; 812 } 813 814 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 815 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) 816 return false; 817 818 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) { 819 // Don't support v2i1 yet. 820 if (VecTy->getNumElements() == 2) 821 return false; 822 823 // We don't support extending fp types. 824 unsigned VecWidth = DataTy->getPrimitiveSizeInBits(); 825 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy()) 826 return false; 827 } 828 829 unsigned EltWidth = DataTy->getScalarSizeInBits(); 830 return (EltWidth == 32 && Alignment >= 4) || 831 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8); 832 } 833 834 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) { 835 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps()) 836 return false; 837 838 // This method is called in 2 places: 839 // - from the vectorizer with a scalar type, in which case we need to get 840 // this as good as we can with the limited info we have (and rely on the cost 841 // model for the rest). 842 // - from the masked intrinsic lowering pass with the actual vector type. 843 // For MVE, we have a custom lowering pass that will already have custom 844 // legalised any gathers that we can to MVE intrinsics, and want to expand all 845 // the rest. The pass runs before the masked intrinsic lowering pass, so if we 846 // are here, we know we want to expand. 847 if (isa<VectorType>(Ty)) 848 return false; 849 850 unsigned EltWidth = Ty->getScalarSizeInBits(); 851 return ((EltWidth == 32 && Alignment >= 4) || 852 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8); 853 } 854 855 int ARMTTIImpl::getMemcpyCost(const Instruction *I) { 856 const MemCpyInst *MI = dyn_cast<MemCpyInst>(I); 857 assert(MI && "MemcpyInst expected"); 858 ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength()); 859 860 // To model the cost of a library call, we assume 1 for the call, and 861 // 3 for the argument setup. 862 const unsigned LibCallCost = 4; 863 864 // If 'size' is not a constant, a library call will be generated. 865 if (!C) 866 return LibCallCost; 867 868 const unsigned Size = C->getValue().getZExtValue(); 869 const Align DstAlign = *MI->getDestAlign(); 870 const Align SrcAlign = *MI->getSourceAlign(); 871 const Function *F = I->getParent()->getParent(); 872 const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); 873 std::vector<EVT> MemOps; 874 875 // MemOps will be poplulated with a list of data types that needs to be 876 // loaded and stored. That's why we multiply the number of elements by 2 to 877 // get the cost for this memcpy. 878 if (getTLI()->findOptimalMemOpLowering( 879 MemOps, Limit, 880 MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign, 881 /*IsVolatile*/ true), 882 MI->getDestAddressSpace(), MI->getSourceAddressSpace(), 883 F->getAttributes())) 884 return MemOps.size() * 2; 885 886 // If we can't find an optimal memop lowering, return the default cost 887 return LibCallCost; 888 } 889 890 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 891 int Index, VectorType *SubTp) { 892 if (ST->hasNEON()) { 893 if (Kind == TTI::SK_Broadcast) { 894 static const CostTblEntry NEONDupTbl[] = { 895 // VDUP handles these cases. 896 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 897 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 898 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 899 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 900 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 901 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 902 903 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 904 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 905 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 906 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}}; 907 908 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 909 910 if (const auto *Entry = 911 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second)) 912 return LT.first * Entry->Cost; 913 } 914 if (Kind == TTI::SK_Reverse) { 915 static const CostTblEntry NEONShuffleTbl[] = { 916 // Reverse shuffle cost one instruction if we are shuffling within a 917 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 918 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 919 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 920 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 921 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 922 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 923 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 924 925 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 926 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 927 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 928 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 929 930 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 931 932 if (const auto *Entry = 933 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) 934 return LT.first * Entry->Cost; 935 } 936 if (Kind == TTI::SK_Select) { 937 static const CostTblEntry NEONSelShuffleTbl[] = { 938 // Select shuffle cost table for ARM. Cost is the number of 939 // instructions 940 // required to create the shuffled vector. 941 942 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 943 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 944 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 945 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 946 947 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 948 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 949 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 950 951 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 952 953 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 954 955 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 956 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl, 957 ISD::VECTOR_SHUFFLE, LT.second)) 958 return LT.first * Entry->Cost; 959 } 960 } 961 if (ST->hasMVEIntegerOps()) { 962 if (Kind == TTI::SK_Broadcast) { 963 static const CostTblEntry MVEDupTbl[] = { 964 // VDUP handles these cases. 965 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 966 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 967 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}, 968 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 969 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}}; 970 971 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 972 973 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE, 974 LT.second)) 975 return LT.first * Entry->Cost * ST->getMVEVectorCostFactor(); 976 } 977 } 978 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy() 979 ? ST->getMVEVectorCostFactor() 980 : 1; 981 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 982 } 983 984 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 985 TTI::TargetCostKind CostKind, 986 TTI::OperandValueKind Op1Info, 987 TTI::OperandValueKind Op2Info, 988 TTI::OperandValueProperties Opd1PropInfo, 989 TTI::OperandValueProperties Opd2PropInfo, 990 ArrayRef<const Value *> Args, 991 const Instruction *CxtI) { 992 // TODO: Handle more cost kinds. 993 if (CostKind != TTI::TCK_RecipThroughput) 994 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 995 Op2Info, Opd1PropInfo, 996 Opd2PropInfo, Args, CxtI); 997 998 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 999 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1000 1001 if (ST->hasNEON()) { 1002 const unsigned FunctionCallDivCost = 20; 1003 const unsigned ReciprocalDivCost = 10; 1004 static const CostTblEntry CostTbl[] = { 1005 // Division. 1006 // These costs are somewhat random. Choose a cost of 20 to indicate that 1007 // vectorizing devision (added function call) is going to be very expensive. 1008 // Double registers types. 1009 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1010 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1011 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1012 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1013 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1014 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1015 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1016 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1017 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 1018 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 1019 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1020 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1021 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 1022 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 1023 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1024 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1025 // Quad register types. 1026 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1027 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1028 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1029 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1030 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1031 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1032 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1033 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1034 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1035 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1036 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1037 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1038 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1039 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1040 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1041 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1042 // Multiplication. 1043 }; 1044 1045 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 1046 return LT.first * Entry->Cost; 1047 1048 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 1049 Op2Info, 1050 Opd1PropInfo, Opd2PropInfo); 1051 1052 // This is somewhat of a hack. The problem that we are facing is that SROA 1053 // creates a sequence of shift, and, or instructions to construct values. 1054 // These sequences are recognized by the ISel and have zero-cost. Not so for 1055 // the vectorized code. Because we have support for v2i64 but not i64 those 1056 // sequences look particularly beneficial to vectorize. 1057 // To work around this we increase the cost of v2i64 operations to make them 1058 // seem less beneficial. 1059 if (LT.second == MVT::v2i64 && 1060 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 1061 Cost += 4; 1062 1063 return Cost; 1064 } 1065 1066 // If this operation is a shift on arm/thumb2, it might well be folded into 1067 // the following instruction, hence having a cost of 0. 1068 auto LooksLikeAFreeShift = [&]() { 1069 if (ST->isThumb1Only() || Ty->isVectorTy()) 1070 return false; 1071 1072 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift()) 1073 return false; 1074 if (Op2Info != TargetTransformInfo::OK_UniformConstantValue) 1075 return false; 1076 1077 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB 1078 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) { 1079 case Instruction::Add: 1080 case Instruction::Sub: 1081 case Instruction::And: 1082 case Instruction::Xor: 1083 case Instruction::Or: 1084 case Instruction::ICmp: 1085 return true; 1086 default: 1087 return false; 1088 } 1089 }; 1090 if (LooksLikeAFreeShift()) 1091 return 0; 1092 1093 int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy() 1094 ? ST->getMVEVectorCostFactor() 1095 : 1; 1096 1097 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost, 1098 // without treating floats as more expensive that scalars or increasing the 1099 // costs for custom operations. The results is also multiplied by the 1100 // MVEVectorCostFactor where appropriate. 1101 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second)) 1102 return LT.first * BaseCost; 1103 1104 // Else this is expand, assume that we need to scalarize this op. 1105 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) { 1106 unsigned Num = VTy->getNumElements(); 1107 unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(), 1108 CostKind); 1109 // Return the cost of multiple scalar invocation plus the cost of 1110 // inserting and extracting the values. 1111 return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost; 1112 } 1113 1114 return BaseCost; 1115 } 1116 1117 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 1118 MaybeAlign Alignment, unsigned AddressSpace, 1119 TTI::TargetCostKind CostKind, 1120 const Instruction *I) { 1121 // TODO: Handle other cost kinds. 1122 if (CostKind != TTI::TCK_RecipThroughput) 1123 return 1; 1124 1125 // Type legalization can't handle structs 1126 if (TLI->getValueType(DL, Src, true) == MVT::Other) 1127 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1128 CostKind); 1129 1130 if (ST->hasNEON() && Src->isVectorTy() && 1131 (Alignment && *Alignment != Align(16)) && 1132 cast<VectorType>(Src)->getElementType()->isDoubleTy()) { 1133 // Unaligned loads/stores are extremely inefficient. 1134 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 1135 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1136 return LT.first * 4; 1137 } 1138 1139 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load. 1140 // Same for stores. 1141 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I && 1142 ((Opcode == Instruction::Load && I->hasOneUse() && 1143 isa<FPExtInst>(*I->user_begin())) || 1144 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) { 1145 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src); 1146 Type *DstTy = 1147 Opcode == Instruction::Load 1148 ? (*I->user_begin())->getType() 1149 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType(); 1150 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() && 1151 DstTy->getScalarType()->isFloatTy()) 1152 return ST->getMVEVectorCostFactor(); 1153 } 1154 1155 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 1156 ? ST->getMVEVectorCostFactor() 1157 : 1; 1158 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1159 CostKind, I); 1160 } 1161 1162 int ARMTTIImpl::getInterleavedMemoryOpCost( 1163 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1164 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1165 bool UseMaskForCond, bool UseMaskForGaps) { 1166 assert(Factor >= 2 && "Invalid interleave factor"); 1167 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 1168 1169 // vldN/vstN doesn't support vector types of i64/f64 element. 1170 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 1171 1172 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && 1173 !UseMaskForCond && !UseMaskForGaps) { 1174 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements(); 1175 auto *SubVecTy = 1176 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1177 1178 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 1179 // Accesses having vector types that are a multiple of 128 bits can be 1180 // matched to more than one vldN/vstN instruction. 1181 int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1; 1182 if (NumElts % Factor == 0 && 1183 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL)) 1184 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1185 1186 // Some smaller than legal interleaved patterns are cheap as we can make 1187 // use of the vmovn or vrev patterns to interleave a standard load. This is 1188 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is 1189 // promoted differently). The cost of 2 here is then a load and vrev or 1190 // vmovn. 1191 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && 1192 VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64) 1193 return 2 * BaseCost; 1194 } 1195 1196 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1197 Alignment, AddressSpace, CostKind, 1198 UseMaskForCond, UseMaskForGaps); 1199 } 1200 1201 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 1202 const Value *Ptr, bool VariableMask, 1203 Align Alignment, 1204 TTI::TargetCostKind CostKind, 1205 const Instruction *I) { 1206 using namespace PatternMatch; 1207 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters) 1208 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1209 Alignment, CostKind, I); 1210 1211 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!"); 1212 auto *VTy = cast<FixedVectorType>(DataTy); 1213 1214 // TODO: Splitting, once we do that. 1215 1216 unsigned NumElems = VTy->getNumElements(); 1217 unsigned EltSize = VTy->getScalarSizeInBits(); 1218 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy); 1219 1220 // For now, it is assumed that for the MVE gather instructions the loads are 1221 // all effectively serialised. This means the cost is the scalar cost 1222 // multiplied by the number of elements being loaded. This is possibly very 1223 // conservative, but even so we still end up vectorising loops because the 1224 // cost per iteration for many loops is lower than for scalar loops. 1225 unsigned VectorCost = NumElems * LT.first; 1226 // The scalarization cost should be a lot higher. We use the number of vector 1227 // elements plus the scalarization overhead. 1228 unsigned ScalarCost = 1229 NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {}); 1230 1231 if (Alignment < EltSize / 8) 1232 return ScalarCost; 1233 1234 unsigned ExtSize = EltSize; 1235 // Check whether there's a single user that asks for an extended type 1236 if (I != nullptr) { 1237 // Dependent of the caller of this function, a gather instruction will 1238 // either have opcode Instruction::Load or be a call to the masked_gather 1239 // intrinsic 1240 if ((I->getOpcode() == Instruction::Load || 1241 match(I, m_Intrinsic<Intrinsic::masked_gather>())) && 1242 I->hasOneUse()) { 1243 const User *Us = *I->users().begin(); 1244 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) { 1245 // only allow valid type combinations 1246 unsigned TypeSize = 1247 cast<Instruction>(Us)->getType()->getScalarSizeInBits(); 1248 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) || 1249 (TypeSize == 16 && EltSize == 8)) && 1250 TypeSize * NumElems == 128) { 1251 ExtSize = TypeSize; 1252 } 1253 } 1254 } 1255 // Check whether the input data needs to be truncated 1256 TruncInst *T; 1257 if ((I->getOpcode() == Instruction::Store || 1258 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) && 1259 (T = dyn_cast<TruncInst>(I->getOperand(0)))) { 1260 // Only allow valid type combinations 1261 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits(); 1262 if (((EltSize == 16 && TypeSize == 32) || 1263 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) && 1264 TypeSize * NumElems == 128) 1265 ExtSize = TypeSize; 1266 } 1267 } 1268 1269 if (ExtSize * NumElems != 128 || NumElems < 4) 1270 return ScalarCost; 1271 1272 // Any (aligned) i32 gather will not need to be scalarised. 1273 if (ExtSize == 32) 1274 return VectorCost; 1275 // For smaller types, we need to ensure that the gep's inputs are correctly 1276 // extended from a small enough value. Other sizes (including i64) are 1277 // scalarized for now. 1278 if (ExtSize != 8 && ExtSize != 16) 1279 return ScalarCost; 1280 1281 if (const auto *BC = dyn_cast<BitCastInst>(Ptr)) 1282 Ptr = BC->getOperand(0); 1283 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1284 if (GEP->getNumOperands() != 2) 1285 return ScalarCost; 1286 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType()); 1287 // Scale needs to be correct (which is only relevant for i16s). 1288 if (Scale != 1 && Scale * 8 != ExtSize) 1289 return ScalarCost; 1290 // And we need to zext (not sext) the indexes from a small enough type. 1291 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) { 1292 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize) 1293 return VectorCost; 1294 } 1295 return ScalarCost; 1296 } 1297 return ScalarCost; 1298 } 1299 1300 bool ARMTTIImpl::isLoweredToCall(const Function *F) { 1301 if (!F->isIntrinsic()) 1302 BaseT::isLoweredToCall(F); 1303 1304 // Assume all Arm-specific intrinsics map to an instruction. 1305 if (F->getName().startswith("llvm.arm")) 1306 return false; 1307 1308 switch (F->getIntrinsicID()) { 1309 default: break; 1310 case Intrinsic::powi: 1311 case Intrinsic::sin: 1312 case Intrinsic::cos: 1313 case Intrinsic::pow: 1314 case Intrinsic::log: 1315 case Intrinsic::log10: 1316 case Intrinsic::log2: 1317 case Intrinsic::exp: 1318 case Intrinsic::exp2: 1319 return true; 1320 case Intrinsic::sqrt: 1321 case Intrinsic::fabs: 1322 case Intrinsic::copysign: 1323 case Intrinsic::floor: 1324 case Intrinsic::ceil: 1325 case Intrinsic::trunc: 1326 case Intrinsic::rint: 1327 case Intrinsic::nearbyint: 1328 case Intrinsic::round: 1329 case Intrinsic::canonicalize: 1330 case Intrinsic::lround: 1331 case Intrinsic::llround: 1332 case Intrinsic::lrint: 1333 case Intrinsic::llrint: 1334 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64()) 1335 return true; 1336 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16()) 1337 return true; 1338 // Some operations can be handled by vector instructions and assume 1339 // unsupported vectors will be expanded into supported scalar ones. 1340 // TODO Handle scalar operations properly. 1341 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base(); 1342 case Intrinsic::masked_store: 1343 case Intrinsic::masked_load: 1344 case Intrinsic::masked_gather: 1345 case Intrinsic::masked_scatter: 1346 return !ST->hasMVEIntegerOps(); 1347 case Intrinsic::sadd_with_overflow: 1348 case Intrinsic::uadd_with_overflow: 1349 case Intrinsic::ssub_with_overflow: 1350 case Intrinsic::usub_with_overflow: 1351 case Intrinsic::sadd_sat: 1352 case Intrinsic::uadd_sat: 1353 case Intrinsic::ssub_sat: 1354 case Intrinsic::usub_sat: 1355 return false; 1356 } 1357 1358 return BaseT::isLoweredToCall(F); 1359 } 1360 1361 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 1362 AssumptionCache &AC, 1363 TargetLibraryInfo *LibInfo, 1364 HardwareLoopInfo &HWLoopInfo) { 1365 // Low-overhead branches are only supported in the 'low-overhead branch' 1366 // extension of v8.1-m. 1367 if (!ST->hasLOB() || DisableLowOverheadLoops) { 1368 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n"); 1369 return false; 1370 } 1371 1372 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) { 1373 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n"); 1374 return false; 1375 } 1376 1377 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1378 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 1379 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n"); 1380 return false; 1381 } 1382 1383 const SCEV *TripCountSCEV = 1384 SE.getAddExpr(BackedgeTakenCount, 1385 SE.getOne(BackedgeTakenCount->getType())); 1386 1387 // We need to store the trip count in LR, a 32-bit register. 1388 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) { 1389 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n"); 1390 return false; 1391 } 1392 1393 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little 1394 // point in generating a hardware loop if that's going to happen. 1395 auto MaybeCall = [this](Instruction &I) { 1396 const ARMTargetLowering *TLI = getTLI(); 1397 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode()); 1398 EVT VT = TLI->getValueType(DL, I.getType(), true); 1399 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall) 1400 return true; 1401 1402 // Check if an intrinsic will be lowered to a call and assume that any 1403 // other CallInst will generate a bl. 1404 if (auto *Call = dyn_cast<CallInst>(&I)) { 1405 if (isa<IntrinsicInst>(Call)) { 1406 if (const Function *F = Call->getCalledFunction()) 1407 return isLoweredToCall(F); 1408 } 1409 return true; 1410 } 1411 1412 // FPv5 provides conversions between integer, double-precision, 1413 // single-precision, and half-precision formats. 1414 switch (I.getOpcode()) { 1415 default: 1416 break; 1417 case Instruction::FPToSI: 1418 case Instruction::FPToUI: 1419 case Instruction::SIToFP: 1420 case Instruction::UIToFP: 1421 case Instruction::FPTrunc: 1422 case Instruction::FPExt: 1423 return !ST->hasFPARMv8Base(); 1424 } 1425 1426 // FIXME: Unfortunately the approach of checking the Operation Action does 1427 // not catch all cases of Legalization that use library calls. Our 1428 // Legalization step categorizes some transformations into library calls as 1429 // Custom, Expand or even Legal when doing type legalization. So for now 1430 // we have to special case for instance the SDIV of 64bit integers and the 1431 // use of floating point emulation. 1432 if (VT.isInteger() && VT.getSizeInBits() >= 64) { 1433 switch (ISD) { 1434 default: 1435 break; 1436 case ISD::SDIV: 1437 case ISD::UDIV: 1438 case ISD::SREM: 1439 case ISD::UREM: 1440 case ISD::SDIVREM: 1441 case ISD::UDIVREM: 1442 return true; 1443 } 1444 } 1445 1446 // Assume all other non-float operations are supported. 1447 if (!VT.isFloatingPoint()) 1448 return false; 1449 1450 // We'll need a library call to handle most floats when using soft. 1451 if (TLI->useSoftFloat()) { 1452 switch (I.getOpcode()) { 1453 default: 1454 return true; 1455 case Instruction::Alloca: 1456 case Instruction::Load: 1457 case Instruction::Store: 1458 case Instruction::Select: 1459 case Instruction::PHI: 1460 return false; 1461 } 1462 } 1463 1464 // We'll need a libcall to perform double precision operations on a single 1465 // precision only FPU. 1466 if (I.getType()->isDoubleTy() && !ST->hasFP64()) 1467 return true; 1468 1469 // Likewise for half precision arithmetic. 1470 if (I.getType()->isHalfTy() && !ST->hasFullFP16()) 1471 return true; 1472 1473 return false; 1474 }; 1475 1476 auto IsHardwareLoopIntrinsic = [](Instruction &I) { 1477 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) { 1478 switch (Call->getIntrinsicID()) { 1479 default: 1480 break; 1481 case Intrinsic::set_loop_iterations: 1482 case Intrinsic::test_set_loop_iterations: 1483 case Intrinsic::loop_decrement: 1484 case Intrinsic::loop_decrement_reg: 1485 return true; 1486 } 1487 } 1488 return false; 1489 }; 1490 1491 // Scan the instructions to see if there's any that we know will turn into a 1492 // call or if this loop is already a low-overhead loop. 1493 auto ScanLoop = [&](Loop *L) { 1494 for (auto *BB : L->getBlocks()) { 1495 for (auto &I : *BB) { 1496 if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) { 1497 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n"); 1498 return false; 1499 } 1500 } 1501 } 1502 return true; 1503 }; 1504 1505 // Visit inner loops. 1506 for (auto Inner : *L) 1507 if (!ScanLoop(Inner)) 1508 return false; 1509 1510 if (!ScanLoop(L)) 1511 return false; 1512 1513 // TODO: Check whether the trip count calculation is expensive. If L is the 1514 // inner loop but we know it has a low trip count, calculating that trip 1515 // count (in the parent loop) may be detrimental. 1516 1517 LLVMContext &C = L->getHeader()->getContext(); 1518 HWLoopInfo.CounterInReg = true; 1519 HWLoopInfo.IsNestingLegal = false; 1520 HWLoopInfo.PerformEntryTest = true; 1521 HWLoopInfo.CountType = Type::getInt32Ty(C); 1522 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 1523 return true; 1524 } 1525 1526 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) { 1527 // We don't allow icmp's, and because we only look at single block loops, 1528 // we simply count the icmps, i.e. there should only be 1 for the backedge. 1529 if (isa<ICmpInst>(&I) && ++ICmpCount > 1) 1530 return false; 1531 1532 if (isa<FCmpInst>(&I)) 1533 return false; 1534 1535 // We could allow extending/narrowing FP loads/stores, but codegen is 1536 // too inefficient so reject this for now. 1537 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I)) 1538 return false; 1539 1540 // Extends have to be extending-loads 1541 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) ) 1542 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0))) 1543 return false; 1544 1545 // Truncs have to be narrowing-stores 1546 if (isa<TruncInst>(&I) ) 1547 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin())) 1548 return false; 1549 1550 return true; 1551 } 1552 1553 // To set up a tail-predicated loop, we need to know the total number of 1554 // elements processed by that loop. Thus, we need to determine the element 1555 // size and: 1556 // 1) it should be uniform for all operations in the vector loop, so we 1557 // e.g. don't want any widening/narrowing operations. 1558 // 2) it should be smaller than i64s because we don't have vector operations 1559 // that work on i64s. 1560 // 3) we don't want elements to be reversed or shuffled, to make sure the 1561 // tail-predication masks/predicates the right lanes. 1562 // 1563 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, 1564 const DataLayout &DL, 1565 const LoopAccessInfo *LAI) { 1566 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n"); 1567 1568 // If there are live-out values, it is probably a reduction, which needs a 1569 // final reduction step after the loop. MVE has a VADDV instruction to reduce 1570 // integer vectors, but doesn't have an equivalent one for float vectors. A 1571 // live-out value that is not recognised as a reduction will result in the 1572 // tail-predicated loop to be reverted to a non-predicated loop and this is 1573 // very expensive, i.e. it has a significant performance impact. So, in this 1574 // case it's better not to tail-predicate the loop, which is what we check 1575 // here. Thus, we allow only 1 live-out value, which has to be an integer 1576 // reduction, which matches the loops supported by ARMLowOverheadLoops. 1577 // It is important to keep ARMLowOverheadLoops and canTailPredicateLoop in 1578 // sync with each other. 1579 SmallVector< Instruction *, 8 > LiveOuts; 1580 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L); 1581 bool IntReductionsDisabled = 1582 EnableTailPredication == TailPredication::EnabledNoReductions || 1583 EnableTailPredication == TailPredication::ForceEnabledNoReductions; 1584 1585 for (auto *I : LiveOuts) { 1586 if (!I->getType()->isIntegerTy()) { 1587 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer " 1588 "live-out value\n"); 1589 return false; 1590 } 1591 if (I->getOpcode() != Instruction::Add) { 1592 LLVM_DEBUG(dbgs() << "Only add reductions supported\n"); 1593 return false; 1594 } 1595 if (IntReductionsDisabled) { 1596 LLVM_DEBUG(dbgs() << "Integer add reductions not enabled\n"); 1597 return false; 1598 } 1599 } 1600 1601 // Next, check that all instructions can be tail-predicated. 1602 PredicatedScalarEvolution PSE = LAI->getPSE(); 1603 SmallVector<Instruction *, 16> LoadStores; 1604 int ICmpCount = 0; 1605 int Stride = 0; 1606 1607 for (BasicBlock *BB : L->blocks()) { 1608 for (Instruction &I : BB->instructionsWithoutDebug()) { 1609 if (isa<PHINode>(&I)) 1610 continue; 1611 if (!canTailPredicateInstruction(I, ICmpCount)) { 1612 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump()); 1613 return false; 1614 } 1615 1616 Type *T = I.getType(); 1617 if (T->isPointerTy()) 1618 T = T->getPointerElementType(); 1619 1620 if (T->getScalarSizeInBits() > 32) { 1621 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); 1622 return false; 1623 } 1624 1625 if (isa<StoreInst>(I) || isa<LoadInst>(I)) { 1626 Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1); 1627 int64_t NextStride = getPtrStride(PSE, Ptr, L); 1628 // TODO: for now only allow consecutive strides of 1. We could support 1629 // other strides as long as it is uniform, but let's keep it simple for 1630 // now. 1631 if (Stride == 0 && NextStride == 1) { 1632 Stride = NextStride; 1633 continue; 1634 } 1635 if (Stride != NextStride) { 1636 LLVM_DEBUG(dbgs() << "Different strides found, can't " 1637 "tail-predicate\n."); 1638 return false; 1639 } 1640 } 1641 } 1642 } 1643 1644 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n"); 1645 return true; 1646 } 1647 1648 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, 1649 ScalarEvolution &SE, 1650 AssumptionCache &AC, 1651 TargetLibraryInfo *TLI, 1652 DominatorTree *DT, 1653 const LoopAccessInfo *LAI) { 1654 if (!EnableTailPredication) { 1655 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n"); 1656 return false; 1657 } 1658 1659 // Creating a predicated vector loop is the first step for generating a 1660 // tail-predicated hardware loop, for which we need the MVE masked 1661 // load/stores instructions: 1662 if (!ST->hasMVEIntegerOps()) 1663 return false; 1664 1665 // For now, restrict this to single block loops. 1666 if (L->getNumBlocks() > 1) { 1667 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block " 1668 "loop.\n"); 1669 return false; 1670 } 1671 1672 assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected"); 1673 1674 HardwareLoopInfo HWLoopInfo(L); 1675 if (!HWLoopInfo.canAnalyze(*LI)) { 1676 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 1677 "analyzable.\n"); 1678 return false; 1679 } 1680 1681 // This checks if we have the low-overhead branch architecture 1682 // extension, and if we will create a hardware-loop: 1683 if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) { 1684 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 1685 "profitable.\n"); 1686 return false; 1687 } 1688 1689 if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) { 1690 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 1691 "a candidate.\n"); 1692 return false; 1693 } 1694 1695 return canTailPredicateLoop(L, LI, SE, DL, LAI); 1696 } 1697 1698 bool ARMTTIImpl::emitGetActiveLaneMask() const { 1699 if (!ST->hasMVEIntegerOps() || !EnableTailPredication) 1700 return false; 1701 1702 // Intrinsic @llvm.get.active.lane.mask is supported. 1703 // It is used in the MVETailPredication pass, which requires the number of 1704 // elements processed by this vector loop to setup the tail-predicated 1705 // loop. 1706 return true; 1707 } 1708 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1709 TTI::UnrollingPreferences &UP) { 1710 // Only currently enable these preferences for M-Class cores. 1711 if (!ST->isMClass()) 1712 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP); 1713 1714 // Disable loop unrolling for Oz and Os. 1715 UP.OptSizeThreshold = 0; 1716 UP.PartialOptSizeThreshold = 0; 1717 if (L->getHeader()->getParent()->hasOptSize()) 1718 return; 1719 1720 // Only enable on Thumb-2 targets. 1721 if (!ST->isThumb2()) 1722 return; 1723 1724 SmallVector<BasicBlock*, 4> ExitingBlocks; 1725 L->getExitingBlocks(ExitingBlocks); 1726 LLVM_DEBUG(dbgs() << "Loop has:\n" 1727 << "Blocks: " << L->getNumBlocks() << "\n" 1728 << "Exit blocks: " << ExitingBlocks.size() << "\n"); 1729 1730 // Only allow another exit other than the latch. This acts as an early exit 1731 // as it mirrors the profitability calculation of the runtime unroller. 1732 if (ExitingBlocks.size() > 2) 1733 return; 1734 1735 // Limit the CFG of the loop body for targets with a branch predictor. 1736 // Allowing 4 blocks permits if-then-else diamonds in the body. 1737 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) 1738 return; 1739 1740 // Scan the loop: don't unroll loops with calls as this could prevent 1741 // inlining. 1742 unsigned Cost = 0; 1743 for (auto *BB : L->getBlocks()) { 1744 for (auto &I : *BB) { 1745 // Don't unroll vectorised loop. MVE does not benefit from it as much as 1746 // scalar code. 1747 if (I.getType()->isVectorTy()) 1748 return; 1749 1750 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 1751 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 1752 if (!isLoweredToCall(F)) 1753 continue; 1754 } 1755 return; 1756 } 1757 1758 SmallVector<const Value*, 4> Operands(I.value_op_begin(), 1759 I.value_op_end()); 1760 Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize); 1761 } 1762 } 1763 1764 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); 1765 1766 UP.Partial = true; 1767 UP.Runtime = true; 1768 UP.UpperBound = true; 1769 UP.UnrollRemainder = true; 1770 UP.DefaultUnrollRuntimeCount = 4; 1771 UP.UnrollAndJam = true; 1772 UP.UnrollAndJamInnerLoopThreshold = 60; 1773 1774 // Force unrolling small loops can be very useful because of the branch 1775 // taken cost of the backedge. 1776 if (Cost < 12) 1777 UP.Force = true; 1778 } 1779 1780 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1781 TTI::PeelingPreferences &PP) { 1782 BaseT::getPeelingPreferences(L, SE, PP); 1783 } 1784 1785 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, 1786 TTI::ReductionFlags Flags) const { 1787 return ST->hasMVEIntegerOps(); 1788 } 1789