1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARMTargetTransformInfo.h" 10 #include "ARMSubtarget.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "llvm/ADT/APInt.h" 13 #include "llvm/ADT/SmallVector.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/ISDOpcodes.h" 17 #include "llvm/CodeGen/ValueTypes.h" 18 #include "llvm/IR/BasicBlock.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Instruction.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/IntrinsicsARM.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/MC/SubtargetFeature.h" 29 #include "llvm/Support/Casting.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MachineValueType.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include "llvm/Transforms/InstCombine/InstCombiner.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include "llvm/Transforms/Utils/LoopUtils.h" 36 #include <algorithm> 37 #include <cassert> 38 #include <cstdint> 39 #include <utility> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "armtti" 44 45 static cl::opt<bool> EnableMaskedLoadStores( 46 "enable-arm-maskedldst", cl::Hidden, cl::init(true), 47 cl::desc("Enable the generation of masked loads and stores")); 48 49 static cl::opt<bool> DisableLowOverheadLoops( 50 "disable-arm-loloops", cl::Hidden, cl::init(false), 51 cl::desc("Disable the generation of low-overhead loops")); 52 53 static cl::opt<bool> 54 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true), 55 cl::desc("Enable the generation of WLS loops")); 56 57 extern cl::opt<TailPredication::Mode> EnableTailPredication; 58 59 extern cl::opt<bool> EnableMaskedGatherScatters; 60 61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor; 62 63 /// Convert a vector load intrinsic into a simple llvm load instruction. 64 /// This is beneficial when the underlying object being addressed comes 65 /// from a constant, since we get constant-folding for free. 66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign, 67 InstCombiner::BuilderTy &Builder) { 68 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1)); 69 70 if (!IntrAlign) 71 return nullptr; 72 73 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign 74 ? MemAlign 75 : IntrAlign->getLimitedValue(); 76 77 if (!isPowerOf2_32(Alignment)) 78 return nullptr; 79 80 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), 81 PointerType::get(II.getType(), 0)); 82 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment)); 83 } 84 85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller, 86 const Function *Callee) const { 87 const TargetMachine &TM = getTLI()->getTargetMachine(); 88 const FeatureBitset &CallerBits = 89 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 90 const FeatureBitset &CalleeBits = 91 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 92 93 // To inline a callee, all features not in the allowed list must match exactly. 94 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) == 95 (CalleeBits & ~InlineFeaturesAllowed); 96 // For features in the allowed list, the callee's features must be a subset of 97 // the callers'. 98 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) == 99 (CalleeBits & InlineFeaturesAllowed); 100 return MatchExact && MatchSubset; 101 } 102 103 TTI::AddressingModeKind 104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L, 105 ScalarEvolution *SE) const { 106 if (ST->hasMVEIntegerOps()) 107 return TTI::AMK_PostIndexed; 108 109 if (L->getHeader()->getParent()->hasOptSize()) 110 return TTI::AMK_None; 111 112 if (ST->isMClass() && ST->isThumb2() && 113 L->getNumBlocks() == 1) 114 return TTI::AMK_PreIndexed; 115 116 return TTI::AMK_None; 117 } 118 119 Optional<Instruction *> 120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { 121 using namespace PatternMatch; 122 Intrinsic::ID IID = II.getIntrinsicID(); 123 switch (IID) { 124 default: 125 break; 126 case Intrinsic::arm_neon_vld1: { 127 Align MemAlign = 128 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 129 &IC.getAssumptionCache(), &IC.getDominatorTree()); 130 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) { 131 return IC.replaceInstUsesWith(II, V); 132 } 133 break; 134 } 135 136 case Intrinsic::arm_neon_vld2: 137 case Intrinsic::arm_neon_vld3: 138 case Intrinsic::arm_neon_vld4: 139 case Intrinsic::arm_neon_vld2lane: 140 case Intrinsic::arm_neon_vld3lane: 141 case Intrinsic::arm_neon_vld4lane: 142 case Intrinsic::arm_neon_vst1: 143 case Intrinsic::arm_neon_vst2: 144 case Intrinsic::arm_neon_vst3: 145 case Intrinsic::arm_neon_vst4: 146 case Intrinsic::arm_neon_vst2lane: 147 case Intrinsic::arm_neon_vst3lane: 148 case Intrinsic::arm_neon_vst4lane: { 149 Align MemAlign = 150 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II, 151 &IC.getAssumptionCache(), &IC.getDominatorTree()); 152 unsigned AlignArg = II.getNumArgOperands() - 1; 153 Value *AlignArgOp = II.getArgOperand(AlignArg); 154 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue(); 155 if (Align && *Align < MemAlign) { 156 return IC.replaceOperand( 157 II, AlignArg, 158 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(), 159 false)); 160 } 161 break; 162 } 163 164 case Intrinsic::arm_mve_pred_i2v: { 165 Value *Arg = II.getArgOperand(0); 166 Value *ArgArg; 167 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 168 PatternMatch::m_Value(ArgArg))) && 169 II.getType() == ArgArg->getType()) { 170 return IC.replaceInstUsesWith(II, ArgArg); 171 } 172 Constant *XorMask; 173 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>( 174 PatternMatch::m_Value(ArgArg)), 175 PatternMatch::m_Constant(XorMask))) && 176 II.getType() == ArgArg->getType()) { 177 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) { 178 if (CI->getValue().trunc(16).isAllOnesValue()) { 179 auto TrueVector = IC.Builder.CreateVectorSplat( 180 cast<FixedVectorType>(II.getType())->getNumElements(), 181 IC.Builder.getTrue()); 182 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); 183 } 184 } 185 } 186 KnownBits ScalarKnown(32); 187 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16), 188 ScalarKnown, 0)) { 189 return &II; 190 } 191 break; 192 } 193 case Intrinsic::arm_mve_pred_v2i: { 194 Value *Arg = II.getArgOperand(0); 195 Value *ArgArg; 196 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>( 197 PatternMatch::m_Value(ArgArg)))) { 198 return IC.replaceInstUsesWith(II, ArgArg); 199 } 200 if (!II.getMetadata(LLVMContext::MD_range)) { 201 Type *IntTy32 = Type::getInt32Ty(II.getContext()); 202 Metadata *M[] = { 203 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)), 204 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))}; 205 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M)); 206 return &II; 207 } 208 break; 209 } 210 case Intrinsic::arm_mve_vadc: 211 case Intrinsic::arm_mve_vadc_predicated: { 212 unsigned CarryOp = 213 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2; 214 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 && 215 "Bad type for intrinsic!"); 216 217 KnownBits CarryKnown(32); 218 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29), 219 CarryKnown)) { 220 return &II; 221 } 222 break; 223 } 224 case Intrinsic::arm_mve_vmldava: { 225 Instruction *I = cast<Instruction>(&II); 226 if (I->hasOneUse()) { 227 auto *User = cast<Instruction>(*I->user_begin()); 228 Value *OpZ; 229 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) && 230 match(I->getOperand(3), m_Zero())) { 231 Value *OpX = I->getOperand(4); 232 Value *OpY = I->getOperand(5); 233 Type *OpTy = OpX->getType(); 234 235 IC.Builder.SetInsertPoint(User); 236 Value *V = 237 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy}, 238 {I->getOperand(0), I->getOperand(1), 239 I->getOperand(2), OpZ, OpX, OpY}); 240 241 IC.replaceInstUsesWith(*User, V); 242 return IC.eraseInstFromFunction(*User); 243 } 244 } 245 return None; 246 } 247 } 248 return None; 249 } 250 251 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 252 TTI::TargetCostKind CostKind) { 253 assert(Ty->isIntegerTy()); 254 255 unsigned Bits = Ty->getPrimitiveSizeInBits(); 256 if (Bits == 0 || Imm.getActiveBits() >= 64) 257 return 4; 258 259 int64_t SImmVal = Imm.getSExtValue(); 260 uint64_t ZImmVal = Imm.getZExtValue(); 261 if (!ST->isThumb()) { 262 if ((SImmVal >= 0 && SImmVal < 65536) || 263 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 264 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 265 return 1; 266 return ST->hasV6T2Ops() ? 2 : 3; 267 } 268 if (ST->isThumb2()) { 269 if ((SImmVal >= 0 && SImmVal < 65536) || 270 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 271 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 272 return 1; 273 return ST->hasV6T2Ops() ? 2 : 3; 274 } 275 // Thumb1, any i8 imm cost 1. 276 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256)) 277 return 1; 278 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 279 return 2; 280 // Load from constantpool. 281 return 3; 282 } 283 284 // Constants smaller than 256 fit in the immediate field of 285 // Thumb1 instructions so we return a zero cost and 1 otherwise. 286 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 287 const APInt &Imm, Type *Ty) { 288 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 289 return 0; 290 291 return 1; 292 } 293 294 // Checks whether Inst is part of a min(max()) or max(min()) pattern 295 // that will match to an SSAT instruction 296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) { 297 Value *LHS, *RHS; 298 ConstantInt *C; 299 SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor; 300 301 if (InstSPF == SPF_SMAX && 302 PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) && 303 C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) { 304 305 auto isSSatMin = [&](Value *MinInst) { 306 if (isa<SelectInst>(MinInst)) { 307 Value *MinLHS, *MinRHS; 308 ConstantInt *MinC; 309 SelectPatternFlavor MinSPF = 310 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor; 311 if (MinSPF == SPF_SMIN && 312 PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) && 313 MinC->getValue() == ((-Imm) - 1)) 314 return true; 315 } 316 return false; 317 }; 318 319 if (isSSatMin(Inst->getOperand(1)) || 320 (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) || 321 isSSatMin(*(++Inst->user_begin()))))) 322 return true; 323 } 324 return false; 325 } 326 327 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 328 const APInt &Imm, Type *Ty, 329 TTI::TargetCostKind CostKind, 330 Instruction *Inst) { 331 // Division by a constant can be turned into multiplication, but only if we 332 // know it's constant. So it's not so much that the immediate is cheap (it's 333 // not), but that the alternative is worse. 334 // FIXME: this is probably unneeded with GlobalISel. 335 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 336 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 337 Idx == 1) 338 return 0; 339 340 if (Opcode == Instruction::And) { 341 // UXTB/UXTH 342 if (Imm == 255 || Imm == 65535) 343 return 0; 344 // Conversion to BIC is free, and means we can use ~Imm instead. 345 return std::min(getIntImmCost(Imm, Ty, CostKind), 346 getIntImmCost(~Imm, Ty, CostKind)); 347 } 348 349 if (Opcode == Instruction::Add) 350 // Conversion to SUB is free, and means we can use -Imm instead. 351 return std::min(getIntImmCost(Imm, Ty, CostKind), 352 getIntImmCost(-Imm, Ty, CostKind)); 353 354 if (Opcode == Instruction::ICmp && Imm.isNegative() && 355 Ty->getIntegerBitWidth() == 32) { 356 int64_t NegImm = -Imm.getSExtValue(); 357 if (ST->isThumb2() && NegImm < 1<<12) 358 // icmp X, #-C -> cmn X, #C 359 return 0; 360 if (ST->isThumb() && NegImm < 1<<8) 361 // icmp X, #-C -> adds X, #C 362 return 0; 363 } 364 365 // xor a, -1 can always be folded to MVN 366 if (Opcode == Instruction::Xor && Imm.isAllOnesValue()) 367 return 0; 368 369 // Ensures negative constant of min(max()) or max(min()) patterns that 370 // match to SSAT instructions don't get hoisted 371 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) && 372 Ty->getIntegerBitWidth() <= 32) { 373 if (isSSATMinMaxPattern(Inst, Imm) || 374 (isa<ICmpInst>(Inst) && Inst->hasOneUse() && 375 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm))) 376 return 0; 377 } 378 379 return getIntImmCost(Imm, Ty, CostKind); 380 } 381 382 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) { 383 if (CostKind == TTI::TCK_RecipThroughput && 384 (ST->hasNEON() || ST->hasMVEIntegerOps())) { 385 // FIXME: The vectorizer is highly sensistive to the cost of these 386 // instructions, which suggests that it may be using the costs incorrectly. 387 // But, for now, just make them free to avoid performance regressions for 388 // vector targets. 389 return 0; 390 } 391 return BaseT::getCFInstrCost(Opcode, CostKind); 392 } 393 394 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 395 TTI::CastContextHint CCH, 396 TTI::TargetCostKind CostKind, 397 const Instruction *I) { 398 int ISD = TLI->InstructionOpcodeToISD(Opcode); 399 assert(ISD && "Invalid opcode"); 400 401 // TODO: Allow non-throughput costs that aren't binary. 402 auto AdjustCost = [&CostKind](int Cost) { 403 if (CostKind != TTI::TCK_RecipThroughput) 404 return Cost == 0 ? 0 : 1; 405 return Cost; 406 }; 407 auto IsLegalFPType = [this](EVT VT) { 408 EVT EltVT = VT.getScalarType(); 409 return (EltVT == MVT::f32 && ST->hasVFP2Base()) || 410 (EltVT == MVT::f64 && ST->hasFP64()) || 411 (EltVT == MVT::f16 && ST->hasFullFP16()); 412 }; 413 414 EVT SrcTy = TLI->getValueType(DL, Src); 415 EVT DstTy = TLI->getValueType(DL, Dst); 416 417 if (!SrcTy.isSimple() || !DstTy.isSimple()) 418 return AdjustCost( 419 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 420 421 // Extending masked load/Truncating masked stores is expensive because we 422 // currently don't split them. This means that we'll likely end up 423 // loading/storing each element individually (hence the high cost). 424 if ((ST->hasMVEIntegerOps() && 425 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt || 426 Opcode == Instruction::SExt)) || 427 (ST->hasMVEFloatOps() && 428 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) && 429 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))) 430 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128) 431 return 2 * DstTy.getVectorNumElements() * 432 ST->getMVEVectorCostFactor(CostKind); 433 434 // The extend of other kinds of load is free 435 if (CCH == TTI::CastContextHint::Normal || 436 CCH == TTI::CastContextHint::Masked) { 437 static const TypeConversionCostTblEntry LoadConversionTbl[] = { 438 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0}, 439 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0}, 440 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0}, 441 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0}, 442 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0}, 443 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0}, 444 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1}, 445 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1}, 446 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1}, 447 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1}, 448 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1}, 449 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1}, 450 }; 451 if (const auto *Entry = ConvertCostTableLookup( 452 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 453 return AdjustCost(Entry->Cost); 454 455 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = { 456 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 457 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0}, 458 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 459 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0}, 460 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 461 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0}, 462 // The following extend from a legal type to an illegal type, so need to 463 // split the load. This introduced an extra load operation, but the 464 // extend is still "free". 465 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 466 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1}, 467 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 468 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3}, 469 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 470 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1}, 471 }; 472 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 473 if (const auto *Entry = 474 ConvertCostTableLookup(MVELoadConversionTbl, ISD, 475 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 476 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 477 } 478 479 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = { 480 // FPExtends are similar but also require the VCVT instructions. 481 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1}, 482 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3}, 483 }; 484 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 485 if (const auto *Entry = 486 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD, 487 DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 488 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 489 } 490 491 // The truncate of a store is free. This is the mirror of extends above. 492 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = { 493 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0}, 494 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0}, 495 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0}, 496 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1}, 497 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1}, 498 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3}, 499 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1}, 500 }; 501 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 502 if (const auto *Entry = 503 ConvertCostTableLookup(MVEStoreConversionTbl, ISD, 504 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 505 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 506 } 507 508 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = { 509 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1}, 510 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3}, 511 }; 512 if (SrcTy.isVector() && ST->hasMVEFloatOps()) { 513 if (const auto *Entry = 514 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD, 515 SrcTy.getSimpleVT(), DstTy.getSimpleVT())) 516 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 517 } 518 } 519 520 // NEON vector operations that can extend their inputs. 521 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) && 522 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) { 523 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = { 524 // vaddl 525 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 }, 526 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 }, 527 // vsubl 528 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 }, 529 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 }, 530 // vmull 531 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 }, 532 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 }, 533 // vshll 534 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 }, 535 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 }, 536 }; 537 538 auto *User = cast<Instruction>(*I->user_begin()); 539 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode()); 540 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD, 541 DstTy.getSimpleVT(), 542 SrcTy.getSimpleVT())) { 543 return AdjustCost(Entry->Cost); 544 } 545 } 546 547 // Single to/from double precision conversions. 548 if (Src->isVectorTy() && ST->hasNEON() && 549 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 && 550 DstTy.getScalarType() == MVT::f32) || 551 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 && 552 DstTy.getScalarType() == MVT::f64))) { 553 static const CostTblEntry NEONFltDblTbl[] = { 554 // Vector fptrunc/fpext conversions. 555 {ISD::FP_ROUND, MVT::v2f64, 2}, 556 {ISD::FP_EXTEND, MVT::v2f32, 2}, 557 {ISD::FP_EXTEND, MVT::v4f32, 4}}; 558 559 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 560 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 561 return AdjustCost(LT.first * Entry->Cost); 562 } 563 564 // Some arithmetic, load and store operations have specific instructions 565 // to cast up/down their types automatically at no extra cost. 566 // TODO: Get these tables to know at least what the related operations are. 567 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 568 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 569 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 570 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 571 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 572 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 573 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 574 575 // The number of vmovl instructions for the extension. 576 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 577 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 578 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 579 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 580 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 581 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 }, 582 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 583 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 584 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 585 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 586 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 587 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 588 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 589 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 590 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 591 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 592 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 593 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 594 595 // Operations that we legalize using splitting. 596 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 597 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 598 599 // Vector float <-> i32 conversions. 600 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 601 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 602 603 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 604 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 605 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 606 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 607 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 608 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 609 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 610 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 611 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 612 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 614 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 615 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 616 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 617 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 618 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 619 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 620 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 621 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 622 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 623 624 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 625 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 626 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 627 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 628 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 629 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 630 631 // Vector double <-> i32 conversions. 632 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 633 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 634 635 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 636 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 637 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 638 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 639 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 640 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 641 642 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 643 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 644 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 645 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 646 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 647 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 648 }; 649 650 if (SrcTy.isVector() && ST->hasNEON()) { 651 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 652 DstTy.getSimpleVT(), 653 SrcTy.getSimpleVT())) 654 return AdjustCost(Entry->Cost); 655 } 656 657 // Scalar float to integer conversions. 658 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 659 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 660 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 661 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 662 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 663 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 664 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 665 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 666 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 667 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 668 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 669 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 670 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 671 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 672 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 673 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 674 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 675 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 676 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 677 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 678 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 679 }; 680 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 681 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 682 DstTy.getSimpleVT(), 683 SrcTy.getSimpleVT())) 684 return AdjustCost(Entry->Cost); 685 } 686 687 // Scalar integer to float conversions. 688 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 689 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 690 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 691 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 692 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 693 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 694 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 695 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 696 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 697 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 698 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 699 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 700 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 701 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 702 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 703 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 704 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 705 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 706 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 707 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 708 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 709 }; 710 711 if (SrcTy.isInteger() && ST->hasNEON()) { 712 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 713 ISD, DstTy.getSimpleVT(), 714 SrcTy.getSimpleVT())) 715 return AdjustCost(Entry->Cost); 716 } 717 718 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one 719 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext 720 // are linearised so take more. 721 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = { 722 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 723 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 724 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 725 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 726 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 }, 727 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 }, 728 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 729 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 730 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 }, 731 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 }, 732 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 }, 733 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 }, 734 }; 735 736 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) { 737 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl, 738 ISD, DstTy.getSimpleVT(), 739 SrcTy.getSimpleVT())) 740 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind); 741 } 742 743 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) { 744 // As general rule, fp converts that were not matched above are scalarized 745 // and cost 1 vcvt for each lane, so long as the instruction is available. 746 // If not it will become a series of function calls. 747 const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind); 748 int Lanes = 1; 749 if (SrcTy.isFixedLengthVector()) 750 Lanes = SrcTy.getVectorNumElements(); 751 752 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)) 753 return Lanes; 754 else 755 return Lanes * CallCost; 756 } 757 758 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() && 759 SrcTy.isFixedLengthVector()) { 760 // Treat a truncate with larger than legal source (128bits for MVE) as 761 // expensive, 2 instructions per lane. 762 if ((SrcTy.getScalarType() == MVT::i8 || 763 SrcTy.getScalarType() == MVT::i16 || 764 SrcTy.getScalarType() == MVT::i32) && 765 SrcTy.getSizeInBits() > 128 && 766 SrcTy.getSizeInBits() > DstTy.getSizeInBits()) 767 return SrcTy.getVectorNumElements() * 2; 768 } 769 770 // Scalar integer conversion costs. 771 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 772 // i16 -> i64 requires two dependent operations. 773 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 774 775 // Truncates on i64 are assumed to be free. 776 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 777 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 778 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 779 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 780 }; 781 782 if (SrcTy.isInteger()) { 783 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 784 DstTy.getSimpleVT(), 785 SrcTy.getSimpleVT())) 786 return AdjustCost(Entry->Cost); 787 } 788 789 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 790 ? ST->getMVEVectorCostFactor(CostKind) 791 : 1; 792 return AdjustCost( 793 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 794 } 795 796 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 797 unsigned Index) { 798 // Penalize inserting into an D-subregister. We end up with a three times 799 // lower estimated throughput on swift. 800 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 801 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 802 return 3; 803 804 if (ST->hasNEON() && (Opcode == Instruction::InsertElement || 805 Opcode == Instruction::ExtractElement)) { 806 // Cross-class copies are expensive on many microarchitectures, 807 // so assume they are expensive by default. 808 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy()) 809 return 3; 810 811 // Even if it's not a cross class copy, this likely leads to mixing 812 // of NEON and VFP code and should be therefore penalized. 813 if (ValTy->isVectorTy() && 814 ValTy->getScalarSizeInBits() <= 32) 815 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 816 } 817 818 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement || 819 Opcode == Instruction::ExtractElement)) { 820 // Integer cross-lane moves are more expensive than float, which can 821 // sometimes just be vmovs. Integer involve being passes to GPR registers, 822 // causing more of a delay. 823 std::pair<unsigned, MVT> LT = 824 getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType()); 825 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1); 826 } 827 828 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 829 } 830 831 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 832 CmpInst::Predicate VecPred, 833 TTI::TargetCostKind CostKind, 834 const Instruction *I) { 835 int ISD = TLI->InstructionOpcodeToISD(Opcode); 836 837 // Thumb scalar code size cost for select. 838 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT && 839 ST->isThumb() && !ValTy->isVectorTy()) { 840 // Assume expensive structs. 841 if (TLI->getValueType(DL, ValTy, true) == MVT::Other) 842 return TTI::TCC_Expensive; 843 844 // Select costs can vary because they: 845 // - may require one or more conditional mov (including an IT), 846 // - can't operate directly on immediates, 847 // - require live flags, which we can't copy around easily. 848 int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first; 849 850 // Possible IT instruction for Thumb2, or more for Thumb1. 851 ++Cost; 852 853 // i1 values may need rematerialising by using mov immediates and/or 854 // flag setting instructions. 855 if (ValTy->isIntegerTy(1)) 856 ++Cost; 857 858 return Cost; 859 } 860 861 // If this is a vector min/max/abs, use the cost of that intrinsic directly 862 // instead. Hopefully when min/max intrinsics are more prevalent this code 863 // will not be needed. 864 const Instruction *Sel = I; 865 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel && 866 Sel->hasOneUse()) 867 Sel = cast<Instruction>(Sel->user_back()); 868 if (Sel && ValTy->isVectorTy() && 869 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) { 870 const Value *LHS, *RHS; 871 SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor; 872 unsigned IID = 0; 873 switch (SPF) { 874 case SPF_ABS: 875 IID = Intrinsic::abs; 876 break; 877 case SPF_SMIN: 878 IID = Intrinsic::smin; 879 break; 880 case SPF_SMAX: 881 IID = Intrinsic::smax; 882 break; 883 case SPF_UMIN: 884 IID = Intrinsic::umin; 885 break; 886 case SPF_UMAX: 887 IID = Intrinsic::umax; 888 break; 889 case SPF_FMINNUM: 890 IID = Intrinsic::minnum; 891 break; 892 case SPF_FMAXNUM: 893 IID = Intrinsic::maxnum; 894 break; 895 default: 896 break; 897 } 898 if (IID) { 899 // The ICmp is free, the select gets the cost of the min/max/etc 900 if (Sel != I) 901 return 0; 902 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy}); 903 return *getIntrinsicInstrCost(CostAttrs, CostKind).getValue(); 904 } 905 } 906 907 // On NEON a vector select gets lowered to vbsl. 908 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) { 909 // Lowering of some vector selects is currently far from perfect. 910 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 911 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 912 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 913 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 914 }; 915 916 EVT SelCondTy = TLI->getValueType(DL, CondTy); 917 EVT SelValTy = TLI->getValueType(DL, ValTy); 918 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 919 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 920 SelCondTy.getSimpleVT(), 921 SelValTy.getSimpleVT())) 922 return Entry->Cost; 923 } 924 925 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 926 return LT.first; 927 } 928 929 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() && 930 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 931 cast<FixedVectorType>(ValTy)->getNumElements() > 1) { 932 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy); 933 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy); 934 if (!VecCondTy) 935 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy)); 936 937 // If we don't have mve.fp any fp operations will need to be scalarized. 938 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) { 939 // One scalaization insert, one scalarization extract and the cost of the 940 // fcmps. 941 return BaseT::getScalarizationOverhead(VecValTy, false, true) + 942 BaseT::getScalarizationOverhead(VecCondTy, true, false) + 943 VecValTy->getNumElements() * 944 getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 945 VecCondTy->getScalarType(), VecPred, CostKind, 946 I); 947 } 948 949 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 950 int BaseCost = ST->getMVEVectorCostFactor(CostKind); 951 // There are two types - the input that specifies the type of the compare 952 // and the output vXi1 type. Because we don't know how the output will be 953 // split, we may need an expensive shuffle to get two in sync. This has the 954 // effect of making larger than legal compares (v8i32 for example) 955 // expensive. 956 if (LT.second.getVectorNumElements() > 2) { 957 if (LT.first > 1) 958 return LT.first * BaseCost + 959 BaseT::getScalarizationOverhead(VecCondTy, true, false); 960 return BaseCost; 961 } 962 } 963 964 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 965 // for "multiple beats" potentially needed by MVE instructions. 966 int BaseCost = 1; 967 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy()) 968 BaseCost = ST->getMVEVectorCostFactor(CostKind); 969 970 return BaseCost * 971 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 972 } 973 974 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 975 const SCEV *Ptr) { 976 // Address computations in vectorized code with non-consecutive addresses will 977 // likely result in more instructions compared to scalar code where the 978 // computation can more often be merged into the index mode. The resulting 979 // extra micro-ops can significantly decrease throughput. 980 unsigned NumVectorInstToHideOverhead = 10; 981 int MaxMergeDistance = 64; 982 983 if (ST->hasNEON()) { 984 if (Ty->isVectorTy() && SE && 985 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 986 return NumVectorInstToHideOverhead; 987 988 // In many cases the address computation is not merged into the instruction 989 // addressing mode. 990 return 1; 991 } 992 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 993 } 994 995 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) { 996 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 997 // If a VCTP is part of a chain, it's already profitable and shouldn't be 998 // optimized, else LSR may block tail-predication. 999 switch (II->getIntrinsicID()) { 1000 case Intrinsic::arm_mve_vctp8: 1001 case Intrinsic::arm_mve_vctp16: 1002 case Intrinsic::arm_mve_vctp32: 1003 case Intrinsic::arm_mve_vctp64: 1004 return true; 1005 default: 1006 break; 1007 } 1008 } 1009 return false; 1010 } 1011 1012 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 1013 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) 1014 return false; 1015 1016 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) { 1017 // Don't support v2i1 yet. 1018 if (VecTy->getNumElements() == 2) 1019 return false; 1020 1021 // We don't support extending fp types. 1022 unsigned VecWidth = DataTy->getPrimitiveSizeInBits(); 1023 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy()) 1024 return false; 1025 } 1026 1027 unsigned EltWidth = DataTy->getScalarSizeInBits(); 1028 return (EltWidth == 32 && Alignment >= 4) || 1029 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8); 1030 } 1031 1032 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) { 1033 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps()) 1034 return false; 1035 1036 // This method is called in 2 places: 1037 // - from the vectorizer with a scalar type, in which case we need to get 1038 // this as good as we can with the limited info we have (and rely on the cost 1039 // model for the rest). 1040 // - from the masked intrinsic lowering pass with the actual vector type. 1041 // For MVE, we have a custom lowering pass that will already have custom 1042 // legalised any gathers that we can to MVE intrinsics, and want to expand all 1043 // the rest. The pass runs before the masked intrinsic lowering pass, so if we 1044 // are here, we know we want to expand. 1045 if (isa<VectorType>(Ty)) 1046 return false; 1047 1048 unsigned EltWidth = Ty->getScalarSizeInBits(); 1049 return ((EltWidth == 32 && Alignment >= 4) || 1050 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8); 1051 } 1052 1053 /// Given a memcpy/memset/memmove instruction, return the number of memory 1054 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a 1055 /// call is used. 1056 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const { 1057 MemOp MOp; 1058 unsigned DstAddrSpace = ~0u; 1059 unsigned SrcAddrSpace = ~0u; 1060 const Function *F = I->getParent()->getParent(); 1061 1062 if (const auto *MC = dyn_cast<MemTransferInst>(I)) { 1063 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength()); 1064 // If 'size' is not a constant, a library call will be generated. 1065 if (!C) 1066 return -1; 1067 1068 const unsigned Size = C->getValue().getZExtValue(); 1069 const Align DstAlign = *MC->getDestAlign(); 1070 const Align SrcAlign = *MC->getSourceAlign(); 1071 1072 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign, 1073 /*IsVolatile*/ false); 1074 DstAddrSpace = MC->getDestAddressSpace(); 1075 SrcAddrSpace = MC->getSourceAddressSpace(); 1076 } 1077 else if (const auto *MS = dyn_cast<MemSetInst>(I)) { 1078 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength()); 1079 // If 'size' is not a constant, a library call will be generated. 1080 if (!C) 1081 return -1; 1082 1083 const unsigned Size = C->getValue().getZExtValue(); 1084 const Align DstAlign = *MS->getDestAlign(); 1085 1086 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign, 1087 /*IsZeroMemset*/ false, /*IsVolatile*/ false); 1088 DstAddrSpace = MS->getDestAddressSpace(); 1089 } 1090 else 1091 llvm_unreachable("Expected a memcpy/move or memset!"); 1092 1093 unsigned Limit, Factor = 2; 1094 switch(I->getIntrinsicID()) { 1095 case Intrinsic::memcpy: 1096 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize()); 1097 break; 1098 case Intrinsic::memmove: 1099 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); 1100 break; 1101 case Intrinsic::memset: 1102 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize()); 1103 Factor = 1; 1104 break; 1105 default: 1106 llvm_unreachable("Expected a memcpy/move or memset!"); 1107 } 1108 1109 // MemOps will be poplulated with a list of data types that needs to be 1110 // loaded and stored. That's why we multiply the number of elements by 2 to 1111 // get the cost for this memcpy. 1112 std::vector<EVT> MemOps; 1113 if (getTLI()->findOptimalMemOpLowering( 1114 MemOps, Limit, MOp, DstAddrSpace, 1115 SrcAddrSpace, F->getAttributes())) 1116 return MemOps.size() * Factor; 1117 1118 // If we can't find an optimal memop lowering, return the default cost 1119 return -1; 1120 } 1121 1122 int ARMTTIImpl::getMemcpyCost(const Instruction *I) { 1123 int NumOps = getNumMemOps(cast<IntrinsicInst>(I)); 1124 1125 // To model the cost of a library call, we assume 1 for the call, and 1126 // 3 for the argument setup. 1127 if (NumOps == -1) 1128 return 4; 1129 return NumOps; 1130 } 1131 1132 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 1133 ArrayRef<int> Mask, int Index, 1134 VectorType *SubTp) { 1135 if (ST->hasNEON()) { 1136 if (Kind == TTI::SK_Broadcast) { 1137 static const CostTblEntry NEONDupTbl[] = { 1138 // VDUP handles these cases. 1139 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1140 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1141 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1142 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1143 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1144 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1145 1146 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1147 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1148 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1149 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}}; 1150 1151 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1152 1153 if (const auto *Entry = 1154 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1155 return LT.first * Entry->Cost; 1156 } 1157 if (Kind == TTI::SK_Reverse) { 1158 static const CostTblEntry NEONShuffleTbl[] = { 1159 // Reverse shuffle cost one instruction if we are shuffling within a 1160 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 1161 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1162 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1163 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1164 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1165 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1}, 1166 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1}, 1167 1168 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1169 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1170 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 1171 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 1172 1173 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1174 1175 if (const auto *Entry = 1176 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) 1177 return LT.first * Entry->Cost; 1178 } 1179 if (Kind == TTI::SK_Select) { 1180 static const CostTblEntry NEONSelShuffleTbl[] = { 1181 // Select shuffle cost table for ARM. Cost is the number of 1182 // instructions 1183 // required to create the shuffled vector. 1184 1185 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 1186 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 1187 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 1188 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 1189 1190 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 1191 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 1192 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 1193 1194 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 1195 1196 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 1197 1198 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1199 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl, 1200 ISD::VECTOR_SHUFFLE, LT.second)) 1201 return LT.first * Entry->Cost; 1202 } 1203 } 1204 if (ST->hasMVEIntegerOps()) { 1205 if (Kind == TTI::SK_Broadcast) { 1206 static const CostTblEntry MVEDupTbl[] = { 1207 // VDUP handles these cases. 1208 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 1209 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 1210 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}, 1211 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 1212 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}}; 1213 1214 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1215 1216 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE, 1217 LT.second)) 1218 return LT.first * Entry->Cost * 1219 ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput); 1220 } 1221 1222 if (!Mask.empty()) { 1223 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1224 if (Mask.size() <= LT.second.getVectorNumElements() && 1225 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) || 1226 isVREVMask(Mask, LT.second, 64))) 1227 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first; 1228 } 1229 } 1230 1231 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy() 1232 ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) 1233 : 1; 1234 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 1235 } 1236 1237 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 1238 TTI::TargetCostKind CostKind, 1239 TTI::OperandValueKind Op1Info, 1240 TTI::OperandValueKind Op2Info, 1241 TTI::OperandValueProperties Opd1PropInfo, 1242 TTI::OperandValueProperties Opd2PropInfo, 1243 ArrayRef<const Value *> Args, 1244 const Instruction *CxtI) { 1245 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 1246 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) { 1247 // Make operations on i1 relatively expensive as this often involves 1248 // combining predicates. AND and XOR should be easier to handle with IT 1249 // blocks. 1250 switch (ISDOpcode) { 1251 default: 1252 break; 1253 case ISD::AND: 1254 case ISD::XOR: 1255 return 2; 1256 case ISD::OR: 1257 return 3; 1258 } 1259 } 1260 1261 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1262 1263 if (ST->hasNEON()) { 1264 const unsigned FunctionCallDivCost = 20; 1265 const unsigned ReciprocalDivCost = 10; 1266 static const CostTblEntry CostTbl[] = { 1267 // Division. 1268 // These costs are somewhat random. Choose a cost of 20 to indicate that 1269 // vectorizing devision (added function call) is going to be very expensive. 1270 // Double registers types. 1271 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1272 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 1273 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1274 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 1275 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1276 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 1277 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1278 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 1279 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 1280 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 1281 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1282 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 1283 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 1284 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 1285 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1286 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 1287 // Quad register types. 1288 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1289 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 1290 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1291 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 1292 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1293 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 1294 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1295 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 1296 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1297 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 1298 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1299 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 1300 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1301 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 1302 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1303 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 1304 // Multiplication. 1305 }; 1306 1307 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 1308 return LT.first * Entry->Cost; 1309 1310 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 1311 Op2Info, 1312 Opd1PropInfo, Opd2PropInfo); 1313 1314 // This is somewhat of a hack. The problem that we are facing is that SROA 1315 // creates a sequence of shift, and, or instructions to construct values. 1316 // These sequences are recognized by the ISel and have zero-cost. Not so for 1317 // the vectorized code. Because we have support for v2i64 but not i64 those 1318 // sequences look particularly beneficial to vectorize. 1319 // To work around this we increase the cost of v2i64 operations to make them 1320 // seem less beneficial. 1321 if (LT.second == MVT::v2i64 && 1322 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 1323 Cost += 4; 1324 1325 return Cost; 1326 } 1327 1328 // If this operation is a shift on arm/thumb2, it might well be folded into 1329 // the following instruction, hence having a cost of 0. 1330 auto LooksLikeAFreeShift = [&]() { 1331 if (ST->isThumb1Only() || Ty->isVectorTy()) 1332 return false; 1333 1334 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift()) 1335 return false; 1336 if (Op2Info != TargetTransformInfo::OK_UniformConstantValue) 1337 return false; 1338 1339 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB 1340 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) { 1341 case Instruction::Add: 1342 case Instruction::Sub: 1343 case Instruction::And: 1344 case Instruction::Xor: 1345 case Instruction::Or: 1346 case Instruction::ICmp: 1347 return true; 1348 default: 1349 return false; 1350 } 1351 }; 1352 if (LooksLikeAFreeShift()) 1353 return 0; 1354 1355 // Default to cheap (throughput/size of 1 instruction) but adjust throughput 1356 // for "multiple beats" potentially needed by MVE instructions. 1357 int BaseCost = 1; 1358 if (ST->hasMVEIntegerOps() && Ty->isVectorTy()) 1359 BaseCost = ST->getMVEVectorCostFactor(CostKind); 1360 1361 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost, 1362 // without treating floats as more expensive that scalars or increasing the 1363 // costs for custom operations. The results is also multiplied by the 1364 // MVEVectorCostFactor where appropriate. 1365 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second)) 1366 return LT.first * BaseCost; 1367 1368 // Else this is expand, assume that we need to scalarize this op. 1369 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) { 1370 unsigned Num = VTy->getNumElements(); 1371 unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(), 1372 CostKind); 1373 // Return the cost of multiple scalar invocation plus the cost of 1374 // inserting and extracting the values. 1375 SmallVector<Type *> Tys(Args.size(), Ty); 1376 return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost; 1377 } 1378 1379 return BaseCost; 1380 } 1381 1382 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 1383 MaybeAlign Alignment, unsigned AddressSpace, 1384 TTI::TargetCostKind CostKind, 1385 const Instruction *I) { 1386 // TODO: Handle other cost kinds. 1387 if (CostKind != TTI::TCK_RecipThroughput) 1388 return 1; 1389 1390 // Type legalization can't handle structs 1391 if (TLI->getValueType(DL, Src, true) == MVT::Other) 1392 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1393 CostKind); 1394 1395 if (ST->hasNEON() && Src->isVectorTy() && 1396 (Alignment && *Alignment != Align(16)) && 1397 cast<VectorType>(Src)->getElementType()->isDoubleTy()) { 1398 // Unaligned loads/stores are extremely inefficient. 1399 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 1400 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1401 return LT.first * 4; 1402 } 1403 1404 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load. 1405 // Same for stores. 1406 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I && 1407 ((Opcode == Instruction::Load && I->hasOneUse() && 1408 isa<FPExtInst>(*I->user_begin())) || 1409 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) { 1410 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src); 1411 Type *DstTy = 1412 Opcode == Instruction::Load 1413 ? (*I->user_begin())->getType() 1414 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType(); 1415 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() && 1416 DstTy->getScalarType()->isFloatTy()) 1417 return ST->getMVEVectorCostFactor(CostKind); 1418 } 1419 1420 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy() 1421 ? ST->getMVEVectorCostFactor(CostKind) 1422 : 1; 1423 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1424 CostKind, I); 1425 } 1426 1427 unsigned ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 1428 Align Alignment, 1429 unsigned AddressSpace, 1430 TTI::TargetCostKind CostKind) { 1431 if (ST->hasMVEIntegerOps()) { 1432 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment)) 1433 return ST->getMVEVectorCostFactor(CostKind); 1434 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment)) 1435 return ST->getMVEVectorCostFactor(CostKind); 1436 } 1437 if (!isa<FixedVectorType>(Src)) 1438 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1439 CostKind); 1440 // Scalar cost, which is currently very high due to the efficiency of the 1441 // generated code. 1442 return cast<FixedVectorType>(Src)->getNumElements() * 8; 1443 } 1444 1445 int ARMTTIImpl::getInterleavedMemoryOpCost( 1446 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1447 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1448 bool UseMaskForCond, bool UseMaskForGaps) { 1449 assert(Factor >= 2 && "Invalid interleave factor"); 1450 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 1451 1452 // vldN/vstN doesn't support vector types of i64/f64 element. 1453 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 1454 1455 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && 1456 !UseMaskForCond && !UseMaskForGaps) { 1457 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements(); 1458 auto *SubVecTy = 1459 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1460 1461 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 1462 // Accesses having vector types that are a multiple of 128 bits can be 1463 // matched to more than one vldN/vstN instruction. 1464 int BaseCost = 1465 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1; 1466 if (NumElts % Factor == 0 && 1467 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL)) 1468 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1469 1470 // Some smaller than legal interleaved patterns are cheap as we can make 1471 // use of the vmovn or vrev patterns to interleave a standard load. This is 1472 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is 1473 // promoted differently). The cost of 2 here is then a load and vrev or 1474 // vmovn. 1475 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && 1476 VecTy->isIntOrIntVectorTy() && 1477 DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64) 1478 return 2 * BaseCost; 1479 } 1480 1481 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1482 Alignment, AddressSpace, CostKind, 1483 UseMaskForCond, UseMaskForGaps); 1484 } 1485 1486 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 1487 const Value *Ptr, bool VariableMask, 1488 Align Alignment, 1489 TTI::TargetCostKind CostKind, 1490 const Instruction *I) { 1491 using namespace PatternMatch; 1492 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters) 1493 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1494 Alignment, CostKind, I); 1495 1496 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!"); 1497 auto *VTy = cast<FixedVectorType>(DataTy); 1498 1499 // TODO: Splitting, once we do that. 1500 1501 unsigned NumElems = VTy->getNumElements(); 1502 unsigned EltSize = VTy->getScalarSizeInBits(); 1503 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy); 1504 1505 // For now, it is assumed that for the MVE gather instructions the loads are 1506 // all effectively serialised. This means the cost is the scalar cost 1507 // multiplied by the number of elements being loaded. This is possibly very 1508 // conservative, but even so we still end up vectorising loops because the 1509 // cost per iteration for many loops is lower than for scalar loops. 1510 unsigned VectorCost = 1511 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind); 1512 // The scalarization cost should be a lot higher. We use the number of vector 1513 // elements plus the scalarization overhead. 1514 unsigned ScalarCost = NumElems * LT.first + 1515 BaseT::getScalarizationOverhead(VTy, true, false) + 1516 BaseT::getScalarizationOverhead(VTy, false, true); 1517 1518 if (EltSize < 8 || Alignment < EltSize / 8) 1519 return ScalarCost; 1520 1521 unsigned ExtSize = EltSize; 1522 // Check whether there's a single user that asks for an extended type 1523 if (I != nullptr) { 1524 // Dependent of the caller of this function, a gather instruction will 1525 // either have opcode Instruction::Load or be a call to the masked_gather 1526 // intrinsic 1527 if ((I->getOpcode() == Instruction::Load || 1528 match(I, m_Intrinsic<Intrinsic::masked_gather>())) && 1529 I->hasOneUse()) { 1530 const User *Us = *I->users().begin(); 1531 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) { 1532 // only allow valid type combinations 1533 unsigned TypeSize = 1534 cast<Instruction>(Us)->getType()->getScalarSizeInBits(); 1535 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) || 1536 (TypeSize == 16 && EltSize == 8)) && 1537 TypeSize * NumElems == 128) { 1538 ExtSize = TypeSize; 1539 } 1540 } 1541 } 1542 // Check whether the input data needs to be truncated 1543 TruncInst *T; 1544 if ((I->getOpcode() == Instruction::Store || 1545 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) && 1546 (T = dyn_cast<TruncInst>(I->getOperand(0)))) { 1547 // Only allow valid type combinations 1548 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits(); 1549 if (((EltSize == 16 && TypeSize == 32) || 1550 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) && 1551 TypeSize * NumElems == 128) 1552 ExtSize = TypeSize; 1553 } 1554 } 1555 1556 if (ExtSize * NumElems != 128 || NumElems < 4) 1557 return ScalarCost; 1558 1559 // Any (aligned) i32 gather will not need to be scalarised. 1560 if (ExtSize == 32) 1561 return VectorCost; 1562 // For smaller types, we need to ensure that the gep's inputs are correctly 1563 // extended from a small enough value. Other sizes (including i64) are 1564 // scalarized for now. 1565 if (ExtSize != 8 && ExtSize != 16) 1566 return ScalarCost; 1567 1568 if (const auto *BC = dyn_cast<BitCastInst>(Ptr)) 1569 Ptr = BC->getOperand(0); 1570 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1571 if (GEP->getNumOperands() != 2) 1572 return ScalarCost; 1573 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType()); 1574 // Scale needs to be correct (which is only relevant for i16s). 1575 if (Scale != 1 && Scale * 8 != ExtSize) 1576 return ScalarCost; 1577 // And we need to zext (not sext) the indexes from a small enough type. 1578 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) { 1579 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize) 1580 return VectorCost; 1581 } 1582 return ScalarCost; 1583 } 1584 return ScalarCost; 1585 } 1586 1587 int ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 1588 bool IsPairwiseForm, 1589 TTI::TargetCostKind CostKind) { 1590 EVT ValVT = TLI->getValueType(DL, ValTy); 1591 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1592 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD) 1593 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1594 CostKind); 1595 1596 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1597 1598 static const CostTblEntry CostTblAdd[]{ 1599 {ISD::ADD, MVT::v16i8, 1}, 1600 {ISD::ADD, MVT::v8i16, 1}, 1601 {ISD::ADD, MVT::v4i32, 1}, 1602 }; 1603 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second)) 1604 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first; 1605 1606 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1607 CostKind); 1608 } 1609 1610 InstructionCost 1611 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned, 1612 Type *ResTy, VectorType *ValTy, 1613 TTI::TargetCostKind CostKind) { 1614 EVT ValVT = TLI->getValueType(DL, ValTy); 1615 EVT ResVT = TLI->getValueType(DL, ResTy); 1616 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) { 1617 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1618 if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) || 1619 (LT.second == MVT::v8i16 && 1620 ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) || 1621 (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64)) 1622 return ST->getMVEVectorCostFactor(CostKind) * LT.first; 1623 } 1624 1625 return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy, 1626 CostKind); 1627 } 1628 1629 InstructionCost 1630 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1631 TTI::TargetCostKind CostKind) { 1632 switch (ICA.getID()) { 1633 case Intrinsic::get_active_lane_mask: 1634 // Currently we make a somewhat optimistic assumption that 1635 // active_lane_mask's are always free. In reality it may be freely folded 1636 // into a tail predicated loop, expanded into a VCPT or expanded into a lot 1637 // of add/icmp code. We may need to improve this in the future, but being 1638 // able to detect if it is free or not involves looking at a lot of other 1639 // code. We currently assume that the vectorizer inserted these, and knew 1640 // what it was doing in adding one. 1641 if (ST->hasMVEIntegerOps()) 1642 return 0; 1643 break; 1644 case Intrinsic::sadd_sat: 1645 case Intrinsic::ssub_sat: 1646 case Intrinsic::uadd_sat: 1647 case Intrinsic::usub_sat: { 1648 if (!ST->hasMVEIntegerOps()) 1649 break; 1650 Type *VT = ICA.getReturnType(); 1651 1652 std::pair<int, MVT> LT = 1653 TLI->getTypeLegalizationCost(DL, VT); 1654 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1655 LT.second == MVT::v16i8) { 1656 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we 1657 // need to extend the type, as it uses shr(qadd(shl, shl)). 1658 unsigned Instrs = 1659 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4; 1660 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs; 1661 } 1662 break; 1663 } 1664 case Intrinsic::abs: 1665 case Intrinsic::smin: 1666 case Intrinsic::smax: 1667 case Intrinsic::umin: 1668 case Intrinsic::umax: { 1669 if (!ST->hasMVEIntegerOps()) 1670 break; 1671 Type *VT = ICA.getReturnType(); 1672 1673 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1674 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 || 1675 LT.second == MVT::v16i8) 1676 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1677 break; 1678 } 1679 case Intrinsic::minnum: 1680 case Intrinsic::maxnum: { 1681 if (!ST->hasMVEFloatOps()) 1682 break; 1683 Type *VT = ICA.getReturnType(); 1684 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VT); 1685 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) 1686 return LT.first * ST->getMVEVectorCostFactor(CostKind); 1687 break; 1688 } 1689 } 1690 1691 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 1692 } 1693 1694 bool ARMTTIImpl::isLoweredToCall(const Function *F) { 1695 if (!F->isIntrinsic()) 1696 BaseT::isLoweredToCall(F); 1697 1698 // Assume all Arm-specific intrinsics map to an instruction. 1699 if (F->getName().startswith("llvm.arm")) 1700 return false; 1701 1702 switch (F->getIntrinsicID()) { 1703 default: break; 1704 case Intrinsic::powi: 1705 case Intrinsic::sin: 1706 case Intrinsic::cos: 1707 case Intrinsic::pow: 1708 case Intrinsic::log: 1709 case Intrinsic::log10: 1710 case Intrinsic::log2: 1711 case Intrinsic::exp: 1712 case Intrinsic::exp2: 1713 return true; 1714 case Intrinsic::sqrt: 1715 case Intrinsic::fabs: 1716 case Intrinsic::copysign: 1717 case Intrinsic::floor: 1718 case Intrinsic::ceil: 1719 case Intrinsic::trunc: 1720 case Intrinsic::rint: 1721 case Intrinsic::nearbyint: 1722 case Intrinsic::round: 1723 case Intrinsic::canonicalize: 1724 case Intrinsic::lround: 1725 case Intrinsic::llround: 1726 case Intrinsic::lrint: 1727 case Intrinsic::llrint: 1728 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64()) 1729 return true; 1730 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16()) 1731 return true; 1732 // Some operations can be handled by vector instructions and assume 1733 // unsupported vectors will be expanded into supported scalar ones. 1734 // TODO Handle scalar operations properly. 1735 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base(); 1736 case Intrinsic::masked_store: 1737 case Intrinsic::masked_load: 1738 case Intrinsic::masked_gather: 1739 case Intrinsic::masked_scatter: 1740 return !ST->hasMVEIntegerOps(); 1741 case Intrinsic::sadd_with_overflow: 1742 case Intrinsic::uadd_with_overflow: 1743 case Intrinsic::ssub_with_overflow: 1744 case Intrinsic::usub_with_overflow: 1745 case Intrinsic::sadd_sat: 1746 case Intrinsic::uadd_sat: 1747 case Intrinsic::ssub_sat: 1748 case Intrinsic::usub_sat: 1749 return false; 1750 } 1751 1752 return BaseT::isLoweredToCall(F); 1753 } 1754 1755 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) { 1756 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode()); 1757 EVT VT = TLI->getValueType(DL, I.getType(), true); 1758 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall) 1759 return true; 1760 1761 // Check if an intrinsic will be lowered to a call and assume that any 1762 // other CallInst will generate a bl. 1763 if (auto *Call = dyn_cast<CallInst>(&I)) { 1764 if (auto *II = dyn_cast<IntrinsicInst>(Call)) { 1765 switch(II->getIntrinsicID()) { 1766 case Intrinsic::memcpy: 1767 case Intrinsic::memset: 1768 case Intrinsic::memmove: 1769 return getNumMemOps(II) == -1; 1770 default: 1771 if (const Function *F = Call->getCalledFunction()) 1772 return isLoweredToCall(F); 1773 } 1774 } 1775 return true; 1776 } 1777 1778 // FPv5 provides conversions between integer, double-precision, 1779 // single-precision, and half-precision formats. 1780 switch (I.getOpcode()) { 1781 default: 1782 break; 1783 case Instruction::FPToSI: 1784 case Instruction::FPToUI: 1785 case Instruction::SIToFP: 1786 case Instruction::UIToFP: 1787 case Instruction::FPTrunc: 1788 case Instruction::FPExt: 1789 return !ST->hasFPARMv8Base(); 1790 } 1791 1792 // FIXME: Unfortunately the approach of checking the Operation Action does 1793 // not catch all cases of Legalization that use library calls. Our 1794 // Legalization step categorizes some transformations into library calls as 1795 // Custom, Expand or even Legal when doing type legalization. So for now 1796 // we have to special case for instance the SDIV of 64bit integers and the 1797 // use of floating point emulation. 1798 if (VT.isInteger() && VT.getSizeInBits() >= 64) { 1799 switch (ISD) { 1800 default: 1801 break; 1802 case ISD::SDIV: 1803 case ISD::UDIV: 1804 case ISD::SREM: 1805 case ISD::UREM: 1806 case ISD::SDIVREM: 1807 case ISD::UDIVREM: 1808 return true; 1809 } 1810 } 1811 1812 // Assume all other non-float operations are supported. 1813 if (!VT.isFloatingPoint()) 1814 return false; 1815 1816 // We'll need a library call to handle most floats when using soft. 1817 if (TLI->useSoftFloat()) { 1818 switch (I.getOpcode()) { 1819 default: 1820 return true; 1821 case Instruction::Alloca: 1822 case Instruction::Load: 1823 case Instruction::Store: 1824 case Instruction::Select: 1825 case Instruction::PHI: 1826 return false; 1827 } 1828 } 1829 1830 // We'll need a libcall to perform double precision operations on a single 1831 // precision only FPU. 1832 if (I.getType()->isDoubleTy() && !ST->hasFP64()) 1833 return true; 1834 1835 // Likewise for half precision arithmetic. 1836 if (I.getType()->isHalfTy() && !ST->hasFullFP16()) 1837 return true; 1838 1839 return false; 1840 } 1841 1842 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 1843 AssumptionCache &AC, 1844 TargetLibraryInfo *LibInfo, 1845 HardwareLoopInfo &HWLoopInfo) { 1846 // Low-overhead branches are only supported in the 'low-overhead branch' 1847 // extension of v8.1-m. 1848 if (!ST->hasLOB() || DisableLowOverheadLoops) { 1849 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n"); 1850 return false; 1851 } 1852 1853 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) { 1854 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n"); 1855 return false; 1856 } 1857 1858 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1859 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 1860 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n"); 1861 return false; 1862 } 1863 1864 const SCEV *TripCountSCEV = 1865 SE.getAddExpr(BackedgeTakenCount, 1866 SE.getOne(BackedgeTakenCount->getType())); 1867 1868 // We need to store the trip count in LR, a 32-bit register. 1869 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) { 1870 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n"); 1871 return false; 1872 } 1873 1874 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little 1875 // point in generating a hardware loop if that's going to happen. 1876 1877 auto IsHardwareLoopIntrinsic = [](Instruction &I) { 1878 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) { 1879 switch (Call->getIntrinsicID()) { 1880 default: 1881 break; 1882 case Intrinsic::start_loop_iterations: 1883 case Intrinsic::test_start_loop_iterations: 1884 case Intrinsic::loop_decrement: 1885 case Intrinsic::loop_decrement_reg: 1886 return true; 1887 } 1888 } 1889 return false; 1890 }; 1891 1892 // Scan the instructions to see if there's any that we know will turn into a 1893 // call or if this loop is already a low-overhead loop or will become a tail 1894 // predicated loop. 1895 bool IsTailPredLoop = false; 1896 auto ScanLoop = [&](Loop *L) { 1897 for (auto *BB : L->getBlocks()) { 1898 for (auto &I : *BB) { 1899 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) || 1900 isa<InlineAsm>(I)) { 1901 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n"); 1902 return false; 1903 } 1904 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1905 IsTailPredLoop |= 1906 II->getIntrinsicID() == Intrinsic::get_active_lane_mask || 1907 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 || 1908 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 || 1909 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 || 1910 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64; 1911 } 1912 } 1913 return true; 1914 }; 1915 1916 // Visit inner loops. 1917 for (auto Inner : *L) 1918 if (!ScanLoop(Inner)) 1919 return false; 1920 1921 if (!ScanLoop(L)) 1922 return false; 1923 1924 // TODO: Check whether the trip count calculation is expensive. If L is the 1925 // inner loop but we know it has a low trip count, calculating that trip 1926 // count (in the parent loop) may be detrimental. 1927 1928 LLVMContext &C = L->getHeader()->getContext(); 1929 HWLoopInfo.CounterInReg = true; 1930 HWLoopInfo.IsNestingLegal = false; 1931 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop; 1932 HWLoopInfo.CountType = Type::getInt32Ty(C); 1933 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 1934 return true; 1935 } 1936 1937 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) { 1938 // We don't allow icmp's, and because we only look at single block loops, 1939 // we simply count the icmps, i.e. there should only be 1 for the backedge. 1940 if (isa<ICmpInst>(&I) && ++ICmpCount > 1) 1941 return false; 1942 1943 if (isa<FCmpInst>(&I)) 1944 return false; 1945 1946 // We could allow extending/narrowing FP loads/stores, but codegen is 1947 // too inefficient so reject this for now. 1948 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I)) 1949 return false; 1950 1951 // Extends have to be extending-loads 1952 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) ) 1953 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0))) 1954 return false; 1955 1956 // Truncs have to be narrowing-stores 1957 if (isa<TruncInst>(&I) ) 1958 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin())) 1959 return false; 1960 1961 return true; 1962 } 1963 1964 // To set up a tail-predicated loop, we need to know the total number of 1965 // elements processed by that loop. Thus, we need to determine the element 1966 // size and: 1967 // 1) it should be uniform for all operations in the vector loop, so we 1968 // e.g. don't want any widening/narrowing operations. 1969 // 2) it should be smaller than i64s because we don't have vector operations 1970 // that work on i64s. 1971 // 3) we don't want elements to be reversed or shuffled, to make sure the 1972 // tail-predication masks/predicates the right lanes. 1973 // 1974 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, 1975 const DataLayout &DL, 1976 const LoopAccessInfo *LAI) { 1977 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n"); 1978 1979 // If there are live-out values, it is probably a reduction. We can predicate 1980 // most reduction operations freely under MVE using a combination of 1981 // prefer-predicated-reduction-select and inloop reductions. We limit this to 1982 // floating point and integer reductions, but don't check for operators 1983 // specifically here. If the value ends up not being a reduction (and so the 1984 // vectorizer cannot tailfold the loop), we should fall back to standard 1985 // vectorization automatically. 1986 SmallVector< Instruction *, 8 > LiveOuts; 1987 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L); 1988 bool ReductionsDisabled = 1989 EnableTailPredication == TailPredication::EnabledNoReductions || 1990 EnableTailPredication == TailPredication::ForceEnabledNoReductions; 1991 1992 for (auto *I : LiveOuts) { 1993 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() && 1994 !I->getType()->isHalfTy()) { 1995 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float " 1996 "live-out value\n"); 1997 return false; 1998 } 1999 if (ReductionsDisabled) { 2000 LLVM_DEBUG(dbgs() << "Reductions not enabled\n"); 2001 return false; 2002 } 2003 } 2004 2005 // Next, check that all instructions can be tail-predicated. 2006 PredicatedScalarEvolution PSE = LAI->getPSE(); 2007 SmallVector<Instruction *, 16> LoadStores; 2008 int ICmpCount = 0; 2009 2010 for (BasicBlock *BB : L->blocks()) { 2011 for (Instruction &I : BB->instructionsWithoutDebug()) { 2012 if (isa<PHINode>(&I)) 2013 continue; 2014 if (!canTailPredicateInstruction(I, ICmpCount)) { 2015 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump()); 2016 return false; 2017 } 2018 2019 Type *T = I.getType(); 2020 if (T->isPointerTy()) 2021 T = T->getPointerElementType(); 2022 2023 if (T->getScalarSizeInBits() > 32) { 2024 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); 2025 return false; 2026 } 2027 if (isa<StoreInst>(I) || isa<LoadInst>(I)) { 2028 Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1); 2029 int64_t NextStride = getPtrStride(PSE, Ptr, L); 2030 if (NextStride == 1) { 2031 // TODO: for now only allow consecutive strides of 1. We could support 2032 // other strides as long as it is uniform, but let's keep it simple 2033 // for now. 2034 continue; 2035 } else if (NextStride == -1 || 2036 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) || 2037 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) { 2038 LLVM_DEBUG(dbgs() 2039 << "Consecutive strides of 2 found, vld2/vstr2 can't " 2040 "be tail-predicated\n."); 2041 return false; 2042 // TODO: don't tail predicate if there is a reversed load? 2043 } else if (EnableMaskedGatherScatters) { 2044 // Gather/scatters do allow loading from arbitrary strides, at 2045 // least if they are loop invariant. 2046 // TODO: Loop variant strides should in theory work, too, but 2047 // this requires further testing. 2048 const SCEV *PtrScev = 2049 replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr); 2050 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) { 2051 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); 2052 if (PSE.getSE()->isLoopInvariant(Step, L)) 2053 continue; 2054 } 2055 } 2056 LLVM_DEBUG(dbgs() << "Bad stride found, can't " 2057 "tail-predicate\n."); 2058 return false; 2059 } 2060 } 2061 } 2062 2063 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n"); 2064 return true; 2065 } 2066 2067 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, 2068 ScalarEvolution &SE, 2069 AssumptionCache &AC, 2070 TargetLibraryInfo *TLI, 2071 DominatorTree *DT, 2072 const LoopAccessInfo *LAI) { 2073 if (!EnableTailPredication) { 2074 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n"); 2075 return false; 2076 } 2077 2078 // Creating a predicated vector loop is the first step for generating a 2079 // tail-predicated hardware loop, for which we need the MVE masked 2080 // load/stores instructions: 2081 if (!ST->hasMVEIntegerOps()) 2082 return false; 2083 2084 // For now, restrict this to single block loops. 2085 if (L->getNumBlocks() > 1) { 2086 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block " 2087 "loop.\n"); 2088 return false; 2089 } 2090 2091 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected"); 2092 2093 HardwareLoopInfo HWLoopInfo(L); 2094 if (!HWLoopInfo.canAnalyze(*LI)) { 2095 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2096 "analyzable.\n"); 2097 return false; 2098 } 2099 2100 // This checks if we have the low-overhead branch architecture 2101 // extension, and if we will create a hardware-loop: 2102 if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) { 2103 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2104 "profitable.\n"); 2105 return false; 2106 } 2107 2108 if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) { 2109 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " 2110 "a candidate.\n"); 2111 return false; 2112 } 2113 2114 return canTailPredicateLoop(L, LI, SE, DL, LAI); 2115 } 2116 2117 bool ARMTTIImpl::emitGetActiveLaneMask() const { 2118 if (!ST->hasMVEIntegerOps() || !EnableTailPredication) 2119 return false; 2120 2121 // Intrinsic @llvm.get.active.lane.mask is supported. 2122 // It is used in the MVETailPredication pass, which requires the number of 2123 // elements processed by this vector loop to setup the tail-predicated 2124 // loop. 2125 return true; 2126 } 2127 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2128 TTI::UnrollingPreferences &UP) { 2129 // Enable Upper bound unrolling universally, not dependant upon the conditions 2130 // below. 2131 UP.UpperBound = true; 2132 2133 // Only currently enable these preferences for M-Class cores. 2134 if (!ST->isMClass()) 2135 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP); 2136 2137 // Disable loop unrolling for Oz and Os. 2138 UP.OptSizeThreshold = 0; 2139 UP.PartialOptSizeThreshold = 0; 2140 if (L->getHeader()->getParent()->hasOptSize()) 2141 return; 2142 2143 // Only enable on Thumb-2 targets. 2144 if (!ST->isThumb2()) 2145 return; 2146 2147 SmallVector<BasicBlock*, 4> ExitingBlocks; 2148 L->getExitingBlocks(ExitingBlocks); 2149 LLVM_DEBUG(dbgs() << "Loop has:\n" 2150 << "Blocks: " << L->getNumBlocks() << "\n" 2151 << "Exit blocks: " << ExitingBlocks.size() << "\n"); 2152 2153 // Only allow another exit other than the latch. This acts as an early exit 2154 // as it mirrors the profitability calculation of the runtime unroller. 2155 if (ExitingBlocks.size() > 2) 2156 return; 2157 2158 // Limit the CFG of the loop body for targets with a branch predictor. 2159 // Allowing 4 blocks permits if-then-else diamonds in the body. 2160 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) 2161 return; 2162 2163 // Don't unroll vectorized loops, including the remainder loop 2164 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized")) 2165 return; 2166 2167 // Scan the loop: don't unroll loops with calls as this could prevent 2168 // inlining. 2169 InstructionCost Cost = 0; 2170 for (auto *BB : L->getBlocks()) { 2171 for (auto &I : *BB) { 2172 // Don't unroll vectorised loop. MVE does not benefit from it as much as 2173 // scalar code. 2174 if (I.getType()->isVectorTy()) 2175 return; 2176 2177 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 2178 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 2179 if (!isLoweredToCall(F)) 2180 continue; 2181 } 2182 return; 2183 } 2184 2185 SmallVector<const Value*, 4> Operands(I.operand_values()); 2186 Cost += 2187 getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency); 2188 } 2189 } 2190 2191 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); 2192 2193 UP.Partial = true; 2194 UP.Runtime = true; 2195 UP.UnrollRemainder = true; 2196 UP.DefaultUnrollRuntimeCount = 4; 2197 UP.UnrollAndJam = true; 2198 UP.UnrollAndJamInnerLoopThreshold = 60; 2199 2200 // Force unrolling small loops can be very useful because of the branch 2201 // taken cost of the backedge. 2202 if (Cost < 12) 2203 UP.Force = true; 2204 } 2205 2206 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2207 TTI::PeelingPreferences &PP) { 2208 BaseT::getPeelingPreferences(L, SE, PP); 2209 } 2210 2211 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty, 2212 TTI::ReductionFlags Flags) const { 2213 if (!ST->hasMVEIntegerOps()) 2214 return false; 2215 2216 unsigned ScalarBits = Ty->getScalarSizeInBits(); 2217 switch (Opcode) { 2218 case Instruction::Add: 2219 return ScalarBits <= 64; 2220 default: 2221 return false; 2222 } 2223 } 2224 2225 bool ARMTTIImpl::preferPredicatedReductionSelect( 2226 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const { 2227 if (!ST->hasMVEIntegerOps()) 2228 return false; 2229 return true; 2230 } 2231