1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/LoopInfo.h" 13 #include "llvm/Analysis/TargetTransformInfo.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/CodeGen/CostTable.h" 16 #include "llvm/CodeGen/TargetLowering.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/IntrinsicsAArch64.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/Debug.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include <algorithm> 23 using namespace llvm; 24 using namespace llvm::PatternMatch; 25 26 #define DEBUG_TYPE "aarch64tti" 27 28 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 29 cl::init(true), cl::Hidden); 30 31 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 32 const Function *Callee) const { 33 const TargetMachine &TM = getTLI()->getTargetMachine(); 34 35 const FeatureBitset &CallerBits = 36 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 37 const FeatureBitset &CalleeBits = 38 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 39 40 // Inline a callee if its target-features are a subset of the callers 41 // target-features. 42 return (CallerBits & CalleeBits) == CalleeBits; 43 } 44 45 /// Calculate the cost of materializing a 64-bit value. This helper 46 /// method might only calculate a fraction of a larger immediate. Therefore it 47 /// is valid to return a cost of ZERO. 48 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { 49 // Check if the immediate can be encoded within an instruction. 50 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 51 return 0; 52 53 if (Val < 0) 54 Val = ~Val; 55 56 // Calculate how many moves we will need to materialize this constant. 57 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 58 AArch64_IMM::expandMOVImm(Val, 64, Insn); 59 return Insn.size(); 60 } 61 62 /// Calculate the cost of materializing the given constant. 63 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 64 TTI::TargetCostKind CostKind) { 65 assert(Ty->isIntegerTy()); 66 67 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 68 if (BitSize == 0) 69 return ~0U; 70 71 // Sign-extend all constants to a multiple of 64-bit. 72 APInt ImmVal = Imm; 73 if (BitSize & 0x3f) 74 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 75 76 // Split the constant into 64-bit chunks and calculate the cost for each 77 // chunk. 78 InstructionCost Cost = 0; 79 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 80 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 81 int64_t Val = Tmp.getSExtValue(); 82 Cost += getIntImmCost(Val); 83 } 84 // We need at least one instruction to materialze the constant. 85 return std::max<InstructionCost>(1, Cost); 86 } 87 88 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 89 const APInt &Imm, Type *Ty, 90 TTI::TargetCostKind CostKind, 91 Instruction *Inst) { 92 assert(Ty->isIntegerTy()); 93 94 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 95 // There is no cost model for constants with a bit size of 0. Return TCC_Free 96 // here, so that constant hoisting will ignore this constant. 97 if (BitSize == 0) 98 return TTI::TCC_Free; 99 100 unsigned ImmIdx = ~0U; 101 switch (Opcode) { 102 default: 103 return TTI::TCC_Free; 104 case Instruction::GetElementPtr: 105 // Always hoist the base address of a GetElementPtr. 106 if (Idx == 0) 107 return 2 * TTI::TCC_Basic; 108 return TTI::TCC_Free; 109 case Instruction::Store: 110 ImmIdx = 0; 111 break; 112 case Instruction::Add: 113 case Instruction::Sub: 114 case Instruction::Mul: 115 case Instruction::UDiv: 116 case Instruction::SDiv: 117 case Instruction::URem: 118 case Instruction::SRem: 119 case Instruction::And: 120 case Instruction::Or: 121 case Instruction::Xor: 122 case Instruction::ICmp: 123 ImmIdx = 1; 124 break; 125 // Always return TCC_Free for the shift value of a shift instruction. 126 case Instruction::Shl: 127 case Instruction::LShr: 128 case Instruction::AShr: 129 if (Idx == 1) 130 return TTI::TCC_Free; 131 break; 132 case Instruction::Trunc: 133 case Instruction::ZExt: 134 case Instruction::SExt: 135 case Instruction::IntToPtr: 136 case Instruction::PtrToInt: 137 case Instruction::BitCast: 138 case Instruction::PHI: 139 case Instruction::Call: 140 case Instruction::Select: 141 case Instruction::Ret: 142 case Instruction::Load: 143 break; 144 } 145 146 if (Idx == ImmIdx) { 147 int NumConstants = (BitSize + 63) / 64; 148 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 149 return (Cost <= NumConstants * TTI::TCC_Basic) 150 ? static_cast<int>(TTI::TCC_Free) 151 : Cost; 152 } 153 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 154 } 155 156 InstructionCost 157 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 158 const APInt &Imm, Type *Ty, 159 TTI::TargetCostKind CostKind) { 160 assert(Ty->isIntegerTy()); 161 162 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 163 // There is no cost model for constants with a bit size of 0. Return TCC_Free 164 // here, so that constant hoisting will ignore this constant. 165 if (BitSize == 0) 166 return TTI::TCC_Free; 167 168 // Most (all?) AArch64 intrinsics do not support folding immediates into the 169 // selected instruction, so we compute the materialization cost for the 170 // immediate directly. 171 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 172 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 173 174 switch (IID) { 175 default: 176 return TTI::TCC_Free; 177 case Intrinsic::sadd_with_overflow: 178 case Intrinsic::uadd_with_overflow: 179 case Intrinsic::ssub_with_overflow: 180 case Intrinsic::usub_with_overflow: 181 case Intrinsic::smul_with_overflow: 182 case Intrinsic::umul_with_overflow: 183 if (Idx == 1) { 184 int NumConstants = (BitSize + 63) / 64; 185 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 186 return (Cost <= NumConstants * TTI::TCC_Basic) 187 ? static_cast<int>(TTI::TCC_Free) 188 : Cost; 189 } 190 break; 191 case Intrinsic::experimental_stackmap: 192 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 193 return TTI::TCC_Free; 194 break; 195 case Intrinsic::experimental_patchpoint_void: 196 case Intrinsic::experimental_patchpoint_i64: 197 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 198 return TTI::TCC_Free; 199 break; 200 case Intrinsic::experimental_gc_statepoint: 201 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 202 return TTI::TCC_Free; 203 break; 204 } 205 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 206 } 207 208 TargetTransformInfo::PopcntSupportKind 209 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 210 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 211 if (TyWidth == 32 || TyWidth == 64) 212 return TTI::PSK_FastHardware; 213 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 214 return TTI::PSK_Software; 215 } 216 217 InstructionCost 218 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 219 TTI::TargetCostKind CostKind) { 220 auto *RetTy = ICA.getReturnType(); 221 switch (ICA.getID()) { 222 case Intrinsic::umin: 223 case Intrinsic::umax: { 224 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 225 // umin(x,y) -> sub(x,usubsat(x,y)) 226 // umax(x,y) -> add(x,usubsat(y,x)) 227 if (LT.second == MVT::v2i64) 228 return LT.first * 2; 229 LLVM_FALLTHROUGH; 230 } 231 case Intrinsic::smin: 232 case Intrinsic::smax: { 233 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 234 MVT::v8i16, MVT::v2i32, MVT::v4i32}; 235 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 236 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 237 return LT.first; 238 break; 239 } 240 case Intrinsic::sadd_sat: 241 case Intrinsic::ssub_sat: 242 case Intrinsic::uadd_sat: 243 case Intrinsic::usub_sat: { 244 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 245 MVT::v8i16, MVT::v2i32, MVT::v4i32, 246 MVT::v2i64}; 247 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 248 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we 249 // need to extend the type, as it uses shr(qadd(shl, shl)). 250 unsigned Instrs = 251 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; 252 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 253 return LT.first * Instrs; 254 break; 255 } 256 case Intrinsic::abs: { 257 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 258 MVT::v8i16, MVT::v2i32, MVT::v4i32, 259 MVT::v2i64}; 260 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 261 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) 262 return LT.first; 263 break; 264 } 265 case Intrinsic::experimental_stepvector: { 266 InstructionCost Cost = 1; // Cost of the `index' instruction 267 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 268 // Legalisation of illegal vectors involves an `index' instruction plus 269 // (LT.first - 1) vector adds. 270 if (LT.first > 1) { 271 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); 272 InstructionCost AddCost = 273 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); 274 Cost += AddCost * (LT.first - 1); 275 } 276 return Cost; 277 } 278 default: 279 break; 280 } 281 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 282 } 283 284 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC, 285 IntrinsicInst &II) { 286 Value *Pg = II.getArgOperand(0); 287 Value *Vec = II.getArgOperand(1); 288 bool IsAfter = II.getIntrinsicID() == Intrinsic::aarch64_sve_lasta; 289 290 auto *C = dyn_cast<Constant>(Pg); 291 if (IsAfter && C && C->isNullValue()) { 292 // The intrinsic is extracting lane 0 so use an extract instead. 293 auto *IdxTy = Type::getInt64Ty(II.getContext()); 294 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); 295 Extract->insertBefore(&II); 296 Extract->takeName(&II); 297 return IC.replaceInstUsesWith(II, Extract); 298 } 299 300 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg); 301 if (!IntrPG) 302 return None; 303 304 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 305 return None; 306 307 const auto PTruePattern = 308 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue(); 309 310 // Can the intrinsic's predicate be converted to a known constant index? 311 unsigned Idx; 312 switch (PTruePattern) { 313 default: 314 return None; 315 case AArch64SVEPredPattern::vl1: 316 Idx = 0; 317 break; 318 case AArch64SVEPredPattern::vl2: 319 Idx = 1; 320 break; 321 case AArch64SVEPredPattern::vl3: 322 Idx = 2; 323 break; 324 case AArch64SVEPredPattern::vl4: 325 Idx = 3; 326 break; 327 case AArch64SVEPredPattern::vl5: 328 Idx = 4; 329 break; 330 case AArch64SVEPredPattern::vl6: 331 Idx = 5; 332 break; 333 case AArch64SVEPredPattern::vl7: 334 Idx = 6; 335 break; 336 case AArch64SVEPredPattern::vl8: 337 Idx = 7; 338 break; 339 case AArch64SVEPredPattern::vl16: 340 Idx = 15; 341 break; 342 } 343 344 // Increment the index if extracting the element after the last active 345 // predicate element. 346 if (IsAfter) 347 ++Idx; 348 349 // Ignore extracts whose index is larger than the known minimum vector 350 // length. NOTE: This is an artificial constraint where we prefer to 351 // maintain what the user asked for until an alternative is proven faster. 352 auto *PgVTy = cast<ScalableVectorType>(Pg->getType()); 353 if (Idx >= PgVTy->getMinNumElements()) 354 return None; 355 356 // The intrinsic is extracting a fixed lane so use an extract instead. 357 auto *IdxTy = Type::getInt64Ty(II.getContext()); 358 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); 359 Extract->insertBefore(&II); 360 Extract->takeName(&II); 361 return IC.replaceInstUsesWith(II, Extract); 362 } 363 364 Optional<Instruction *> 365 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, 366 IntrinsicInst &II) const { 367 Intrinsic::ID IID = II.getIntrinsicID(); 368 switch (IID) { 369 default: 370 break; 371 case Intrinsic::aarch64_sve_lasta: 372 case Intrinsic::aarch64_sve_lastb: 373 return instCombineSVELast(IC, II); 374 } 375 376 return None; 377 } 378 379 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 380 ArrayRef<const Value *> Args) { 381 382 // A helper that returns a vector type from the given type. The number of 383 // elements in type Ty determine the vector width. 384 auto toVectorTy = [&](Type *ArgTy) { 385 return VectorType::get(ArgTy->getScalarType(), 386 cast<VectorType>(DstTy)->getElementCount()); 387 }; 388 389 // Exit early if DstTy is not a vector type whose elements are at least 390 // 16-bits wide. 391 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 392 return false; 393 394 // Determine if the operation has a widening variant. We consider both the 395 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 396 // instructions. 397 // 398 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we 399 // verify that their extending operands are eliminated during code 400 // generation. 401 switch (Opcode) { 402 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 403 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 404 break; 405 default: 406 return false; 407 } 408 409 // To be a widening instruction (either the "wide" or "long" versions), the 410 // second operand must be a sign- or zero extend having a single user. We 411 // only consider extends having a single user because they may otherwise not 412 // be eliminated. 413 if (Args.size() != 2 || 414 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) || 415 !Args[1]->hasOneUse()) 416 return false; 417 auto *Extend = cast<CastInst>(Args[1]); 418 419 // Legalize the destination type and ensure it can be used in a widening 420 // operation. 421 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 422 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 423 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 424 return false; 425 426 // Legalize the source type and ensure it can be used in a widening 427 // operation. 428 auto *SrcTy = toVectorTy(Extend->getSrcTy()); 429 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 430 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 431 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 432 return false; 433 434 // Get the total number of vector elements in the legalized types. 435 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorMinNumElements(); 436 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 437 438 // Return true if the legalized types have the same number of vector elements 439 // and the destination element type size is twice that of the source type. 440 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 441 } 442 443 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 444 Type *Src, 445 TTI::CastContextHint CCH, 446 TTI::TargetCostKind CostKind, 447 const Instruction *I) { 448 int ISD = TLI->InstructionOpcodeToISD(Opcode); 449 assert(ISD && "Invalid opcode"); 450 451 // If the cast is observable, and it is used by a widening instruction (e.g., 452 // uaddl, saddw, etc.), it may be free. 453 if (I && I->hasOneUse()) { 454 auto *SingleUser = cast<Instruction>(*I->user_begin()); 455 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 456 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 457 // If the cast is the second operand, it is free. We will generate either 458 // a "wide" or "long" version of the widening instruction. 459 if (I == SingleUser->getOperand(1)) 460 return 0; 461 // If the cast is not the second operand, it will be free if it looks the 462 // same as the second operand. In this case, we will generate a "long" 463 // version of the widening instruction. 464 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 465 if (I->getOpcode() == unsigned(Cast->getOpcode()) && 466 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 467 return 0; 468 } 469 } 470 471 // TODO: Allow non-throughput costs that aren't binary. 472 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 473 if (CostKind != TTI::TCK_RecipThroughput) 474 return Cost == 0 ? 0 : 1; 475 return Cost; 476 }; 477 478 EVT SrcTy = TLI->getValueType(DL, Src); 479 EVT DstTy = TLI->getValueType(DL, Dst); 480 481 if (!SrcTy.isSimple() || !DstTy.isSimple()) 482 return AdjustCost( 483 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 484 485 static const TypeConversionCostTblEntry 486 ConversionTbl[] = { 487 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 488 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 489 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 490 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 491 492 // Truncations on nxvmiN 493 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, 494 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, 495 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, 496 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, 497 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, 498 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, 499 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, 500 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, 501 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, 502 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, 503 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, 504 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, 505 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, 506 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, 507 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, 508 509 // The number of shll instructions for the extension. 510 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 511 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 512 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 513 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 514 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 515 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 516 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 517 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 518 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 519 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 520 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 521 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 522 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 523 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 524 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 525 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 526 527 // LowerVectorINT_TO_FP: 528 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 529 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 530 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 531 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 532 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 533 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 534 535 // Complex: to v2f32 536 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 537 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 538 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 539 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 540 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 541 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 542 543 // Complex: to v4f32 544 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 545 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 546 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 547 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 548 549 // Complex: to v8f32 550 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 551 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 552 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 553 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 554 555 // Complex: to v16f32 556 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 557 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 558 559 // Complex: to v2f64 560 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 561 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 562 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 563 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 564 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 565 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 566 567 568 // LowerVectorFP_TO_INT 569 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 570 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 571 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 572 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 573 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 574 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 575 576 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 577 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 578 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 579 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 580 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 581 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 582 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 583 584 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 585 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 586 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 587 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 588 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 589 590 // Complex, from nxv2f32. 591 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 592 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 593 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 594 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 595 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 596 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 597 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 598 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 599 600 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 601 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 602 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 603 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 604 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 605 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 606 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 607 608 // Complex, from nxv2f64. 609 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 610 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 611 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 612 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 613 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 614 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 615 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 616 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 617 618 // Complex, from nxv4f32. 619 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 620 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 621 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 622 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 623 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 624 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 625 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 626 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 627 628 // Complex, from nxv8f64. Illegal -> illegal conversions not required. 629 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 630 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 631 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 632 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 633 634 // Complex, from nxv4f64. Illegal -> illegal conversions not required. 635 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 636 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 637 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 638 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 639 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 640 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 641 642 // Complex, from nxv8f32. Illegal -> illegal conversions not required. 643 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 644 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 645 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 646 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 647 648 // Complex, from nxv8f16. 649 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 650 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 651 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 652 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 653 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 654 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 655 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 656 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 657 658 // Complex, from nxv4f16. 659 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 660 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 661 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 662 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 663 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 664 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 665 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 666 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 667 668 // Complex, from nxv2f16. 669 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 670 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 671 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 672 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 673 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 674 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 675 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 676 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 677 678 // Truncate from nxvmf32 to nxvmf16. 679 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, 680 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, 681 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, 682 683 // Truncate from nxvmf64 to nxvmf16. 684 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, 685 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, 686 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, 687 688 // Truncate from nxvmf64 to nxvmf32. 689 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, 690 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, 691 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, 692 693 // Extend from nxvmf16 to nxvmf32. 694 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, 695 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, 696 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, 697 698 // Extend from nxvmf16 to nxvmf64. 699 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, 700 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, 701 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, 702 703 // Extend from nxvmf32 to nxvmf64. 704 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, 705 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, 706 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, 707 708 }; 709 710 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 711 DstTy.getSimpleVT(), 712 SrcTy.getSimpleVT())) 713 return AdjustCost(Entry->Cost); 714 715 return AdjustCost( 716 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 717 } 718 719 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, 720 Type *Dst, 721 VectorType *VecTy, 722 unsigned Index) { 723 724 // Make sure we were given a valid extend opcode. 725 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 726 "Invalid opcode"); 727 728 // We are extending an element we extract from a vector, so the source type 729 // of the extend is the element type of the vector. 730 auto *Src = VecTy->getElementType(); 731 732 // Sign- and zero-extends are for integer types only. 733 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 734 735 // Get the cost for the extract. We compute the cost (if any) for the extend 736 // below. 737 InstructionCost Cost = 738 getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 739 740 // Legalize the types. 741 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 742 auto DstVT = TLI->getValueType(DL, Dst); 743 auto SrcVT = TLI->getValueType(DL, Src); 744 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 745 746 // If the resulting type is still a vector and the destination type is legal, 747 // we may get the extension for free. If not, get the default cost for the 748 // extend. 749 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 750 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 751 CostKind); 752 753 // The destination type should be larger than the element type. If not, get 754 // the default cost for the extend. 755 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 756 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 757 CostKind); 758 759 switch (Opcode) { 760 default: 761 llvm_unreachable("Opcode should be either SExt or ZExt"); 762 763 // For sign-extends, we only need a smov, which performs the extension 764 // automatically. 765 case Instruction::SExt: 766 return Cost; 767 768 // For zero-extends, the extend is performed automatically by a umov unless 769 // the destination type is i64 and the element type is i8 or i16. 770 case Instruction::ZExt: 771 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 772 return Cost; 773 } 774 775 // If we are unable to perform the extend for free, get the default cost. 776 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 777 CostKind); 778 } 779 780 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 781 TTI::TargetCostKind CostKind, 782 const Instruction *I) { 783 if (CostKind != TTI::TCK_RecipThroughput) 784 return Opcode == Instruction::PHI ? 0 : 1; 785 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 786 // Branches are assumed to be predicted. 787 return 0; 788 } 789 790 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 791 unsigned Index) { 792 assert(Val->isVectorTy() && "This must be a vector type"); 793 794 if (Index != -1U) { 795 // Legalize the type. 796 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 797 798 // This type is legalized to a scalar type. 799 if (!LT.second.isVector()) 800 return 0; 801 802 // The type may be split. Normalize the index to the new type. 803 unsigned Width = LT.second.getVectorNumElements(); 804 Index = Index % Width; 805 806 // The element at index zero is already inside the vector. 807 if (Index == 0) 808 return 0; 809 } 810 811 // All other insert/extracts cost this much. 812 return ST->getVectorInsertExtractBaseCost(); 813 } 814 815 InstructionCost AArch64TTIImpl::getArithmeticInstrCost( 816 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 817 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, 818 TTI::OperandValueProperties Opd1PropInfo, 819 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 820 const Instruction *CxtI) { 821 // TODO: Handle more cost kinds. 822 if (CostKind != TTI::TCK_RecipThroughput) 823 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 824 Opd2Info, Opd1PropInfo, 825 Opd2PropInfo, Args, CxtI); 826 827 // Legalize the type. 828 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 829 830 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), 831 // add in the widening overhead specified by the sub-target. Since the 832 // extends feeding widening instructions are performed automatically, they 833 // aren't present in the generated code and have a zero cost. By adding a 834 // widening overhead here, we attach the total cost of the combined operation 835 // to the widening instruction. 836 InstructionCost Cost = 0; 837 if (isWideningInstruction(Ty, Opcode, Args)) 838 Cost += ST->getWideningBaseCost(); 839 840 int ISD = TLI->InstructionOpcodeToISD(Opcode); 841 842 switch (ISD) { 843 default: 844 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 845 Opd2Info, 846 Opd1PropInfo, Opd2PropInfo); 847 case ISD::SDIV: 848 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 849 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 850 // On AArch64, scalar signed division by constants power-of-two are 851 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 852 // The OperandValue properties many not be same as that of previous 853 // operation; conservatively assume OP_None. 854 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, 855 Opd1Info, Opd2Info, 856 TargetTransformInfo::OP_None, 857 TargetTransformInfo::OP_None); 858 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, 859 Opd1Info, Opd2Info, 860 TargetTransformInfo::OP_None, 861 TargetTransformInfo::OP_None); 862 Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind, 863 Opd1Info, Opd2Info, 864 TargetTransformInfo::OP_None, 865 TargetTransformInfo::OP_None); 866 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 867 Opd1Info, Opd2Info, 868 TargetTransformInfo::OP_None, 869 TargetTransformInfo::OP_None); 870 return Cost; 871 } 872 LLVM_FALLTHROUGH; 873 case ISD::UDIV: 874 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { 875 auto VT = TLI->getValueType(DL, Ty); 876 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 877 // Vector signed division by constant are expanded to the 878 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 879 // to MULHS + SUB + SRL + ADD + SRL. 880 InstructionCost MulCost = getArithmeticInstrCost( 881 Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info, 882 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 883 InstructionCost AddCost = getArithmeticInstrCost( 884 Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, 885 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 886 InstructionCost ShrCost = getArithmeticInstrCost( 887 Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, 888 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 889 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 890 } 891 } 892 893 Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 894 Opd2Info, 895 Opd1PropInfo, Opd2PropInfo); 896 if (Ty->isVectorTy()) { 897 // On AArch64, vector divisions are not supported natively and are 898 // expanded into scalar divisions of each pair of elements. 899 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, 900 Opd1Info, Opd2Info, Opd1PropInfo, 901 Opd2PropInfo); 902 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 903 Opd1Info, Opd2Info, Opd1PropInfo, 904 Opd2PropInfo); 905 // TODO: if one of the arguments is scalar, then it's not necessary to 906 // double the cost of handling the vector elements. 907 Cost += Cost; 908 } 909 return Cost; 910 911 case ISD::MUL: 912 if (LT.second != MVT::v2i64) 913 return (Cost + 1) * LT.first; 914 // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive 915 // as elements are extracted from the vectors and the muls scalarized. 916 // As getScalarizationOverhead is a bit too pessimistic, we estimate the 917 // cost for a i64 vector directly here, which is: 918 // - four i64 extracts, 919 // - two i64 inserts, and 920 // - two muls. 921 // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with 922 // LT.first = 2 the cost is 16. 923 return LT.first * 8; 924 case ISD::ADD: 925 case ISD::XOR: 926 case ISD::OR: 927 case ISD::AND: 928 // These nodes are marked as 'custom' for combining purposes only. 929 // We know that they are legal. See LowerAdd in ISelLowering. 930 return (Cost + 1) * LT.first; 931 932 case ISD::FADD: 933 // These nodes are marked as 'custom' just to lower them to SVE. 934 // We know said lowering will incur no additional cost. 935 if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty()) 936 return (Cost + 2) * LT.first; 937 938 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 939 Opd2Info, 940 Opd1PropInfo, Opd2PropInfo); 941 } 942 } 943 944 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, 945 ScalarEvolution *SE, 946 const SCEV *Ptr) { 947 // Address computations in vectorized code with non-consecutive addresses will 948 // likely result in more instructions compared to scalar code where the 949 // computation can more often be merged into the index mode. The resulting 950 // extra micro-ops can significantly decrease throughput. 951 unsigned NumVectorInstToHideOverhead = 10; 952 int MaxMergeDistance = 64; 953 954 if (Ty->isVectorTy() && SE && 955 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 956 return NumVectorInstToHideOverhead; 957 958 // In many cases the address computation is not merged into the instruction 959 // addressing mode. 960 return 1; 961 } 962 963 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 964 Type *CondTy, 965 CmpInst::Predicate VecPred, 966 TTI::TargetCostKind CostKind, 967 const Instruction *I) { 968 // TODO: Handle other cost kinds. 969 if (CostKind != TTI::TCK_RecipThroughput) 970 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 971 I); 972 973 int ISD = TLI->InstructionOpcodeToISD(Opcode); 974 // We don't lower some vector selects well that are wider than the register 975 // width. 976 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 977 // We would need this many instructions to hide the scalarization happening. 978 const int AmortizationCost = 20; 979 980 // If VecPred is not set, check if we can get a predicate from the context 981 // instruction, if its type matches the requested ValTy. 982 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 983 CmpInst::Predicate CurrentPred; 984 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 985 m_Value()))) 986 VecPred = CurrentPred; 987 } 988 // Check if we have a compare/select chain that can be lowered using CMxx & 989 // BFI pair. 990 if (CmpInst::isIntPredicate(VecPred)) { 991 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 992 MVT::v8i16, MVT::v2i32, MVT::v4i32, 993 MVT::v2i64}; 994 auto LT = TLI->getTypeLegalizationCost(DL, ValTy); 995 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 996 return LT.first; 997 } 998 999 static const TypeConversionCostTblEntry 1000 VectorSelectTbl[] = { 1001 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 1002 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 1003 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 1004 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 1005 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 1006 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 1007 }; 1008 1009 EVT SelCondTy = TLI->getValueType(DL, CondTy); 1010 EVT SelValTy = TLI->getValueType(DL, ValTy); 1011 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 1012 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 1013 SelCondTy.getSimpleVT(), 1014 SelValTy.getSimpleVT())) 1015 return Entry->Cost; 1016 } 1017 } 1018 // The base case handles scalable vectors fine for now, since it treats the 1019 // cost as 1 * legalization cost. 1020 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 1021 } 1022 1023 AArch64TTIImpl::TTI::MemCmpExpansionOptions 1024 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 1025 TTI::MemCmpExpansionOptions Options; 1026 if (ST->requiresStrictAlign()) { 1027 // TODO: Add cost modeling for strict align. Misaligned loads expand to 1028 // a bunch of instructions when strict align is enabled. 1029 return Options; 1030 } 1031 Options.AllowOverlappingLoads = true; 1032 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 1033 Options.NumLoadsPerBlock = Options.MaxNumLoads; 1034 // TODO: Though vector loads usually perform well on AArch64, in some targets 1035 // they may wake up the FP unit, which raises the power consumption. Perhaps 1036 // they could be used with no holds barred (-O3). 1037 Options.LoadSizes = {8, 4, 2, 1}; 1038 return Options; 1039 } 1040 1041 InstructionCost 1042 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 1043 Align Alignment, unsigned AddressSpace, 1044 TTI::TargetCostKind CostKind) { 1045 if (!isa<ScalableVectorType>(Src)) 1046 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1047 CostKind); 1048 auto LT = TLI->getTypeLegalizationCost(DL, Src); 1049 return LT.first * 2; 1050 } 1051 1052 InstructionCost AArch64TTIImpl::getGatherScatterOpCost( 1053 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1054 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 1055 1056 if (!isa<ScalableVectorType>(DataTy)) 1057 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 1058 Alignment, CostKind, I); 1059 auto *VT = cast<VectorType>(DataTy); 1060 auto LT = TLI->getTypeLegalizationCost(DL, DataTy); 1061 ElementCount LegalVF = LT.second.getVectorElementCount(); 1062 Optional<unsigned> MaxNumVScale = getMaxVScale(); 1063 assert(MaxNumVScale && "Expected valid max vscale value"); 1064 1065 InstructionCost MemOpCost = 1066 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); 1067 unsigned MaxNumElementsPerGather = 1068 MaxNumVScale.getValue() * LegalVF.getKnownMinValue(); 1069 return LT.first * MaxNumElementsPerGather * MemOpCost; 1070 } 1071 1072 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 1073 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 1074 } 1075 1076 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 1077 MaybeAlign Alignment, 1078 unsigned AddressSpace, 1079 TTI::TargetCostKind CostKind, 1080 const Instruction *I) { 1081 // Type legalization can't handle structs 1082 if (TLI->getValueType(DL, Ty, true) == MVT::Other) 1083 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 1084 CostKind); 1085 1086 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 1087 1088 // TODO: consider latency as well for TCK_SizeAndLatency. 1089 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 1090 return LT.first; 1091 1092 if (CostKind != TTI::TCK_RecipThroughput) 1093 return 1; 1094 1095 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 1096 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 1097 // Unaligned stores are extremely inefficient. We don't split all 1098 // unaligned 128-bit stores because the negative impact that has shown in 1099 // practice on inlined block copy code. 1100 // We make such stores expensive so that we will only vectorize if there 1101 // are 6 other instructions getting vectorized. 1102 const int AmortizationCost = 6; 1103 1104 return LT.first * 2 * AmortizationCost; 1105 } 1106 1107 if (useNeonVector(Ty) && 1108 cast<VectorType>(Ty)->getElementType()->isIntegerTy(8)) { 1109 unsigned ProfitableNumElements; 1110 if (Opcode == Instruction::Store) 1111 // We use a custom trunc store lowering so v.4b should be profitable. 1112 ProfitableNumElements = 4; 1113 else 1114 // We scalarize the loads because there is not v.4b register and we 1115 // have to promote the elements to v.2. 1116 ProfitableNumElements = 8; 1117 1118 if (cast<FixedVectorType>(Ty)->getNumElements() < ProfitableNumElements) { 1119 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements(); 1120 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; 1121 // We generate 2 instructions per vector element. 1122 return NumVectorizableInstsToAmortize * NumVecElts * 2; 1123 } 1124 } 1125 1126 return LT.first; 1127 } 1128 1129 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( 1130 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1131 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1132 bool UseMaskForCond, bool UseMaskForGaps) { 1133 assert(Factor >= 2 && "Invalid interleave factor"); 1134 auto *VecVTy = cast<FixedVectorType>(VecTy); 1135 1136 if (!UseMaskForCond && !UseMaskForGaps && 1137 Factor <= TLI->getMaxSupportedInterleaveFactor()) { 1138 unsigned NumElts = VecVTy->getNumElements(); 1139 auto *SubVecTy = 1140 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 1141 1142 // ldN/stN only support legal vector types of size 64 or 128 in bits. 1143 // Accesses having vector types that are a multiple of 128 bits can be 1144 // matched to more than one ldN/stN instruction. 1145 if (NumElts % Factor == 0 && 1146 TLI->isLegalInterleavedAccessType(SubVecTy, DL)) 1147 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); 1148 } 1149 1150 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 1151 Alignment, AddressSpace, CostKind, 1152 UseMaskForCond, UseMaskForGaps); 1153 } 1154 1155 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 1156 InstructionCost Cost = 0; 1157 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1158 for (auto *I : Tys) { 1159 if (!I->isVectorTy()) 1160 continue; 1161 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 1162 128) 1163 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 1164 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 1165 } 1166 return *Cost.getValue(); 1167 } 1168 1169 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 1170 return ST->getMaxInterleaveFactor(); 1171 } 1172 1173 // For Falkor, we want to avoid having too many strided loads in a loop since 1174 // that can exhaust the HW prefetcher resources. We adjust the unroller 1175 // MaxCount preference below to attempt to ensure unrolling doesn't create too 1176 // many strided loads. 1177 static void 1178 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1179 TargetTransformInfo::UnrollingPreferences &UP) { 1180 enum { MaxStridedLoads = 7 }; 1181 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 1182 int StridedLoads = 0; 1183 // FIXME? We could make this more precise by looking at the CFG and 1184 // e.g. not counting loads in each side of an if-then-else diamond. 1185 for (const auto BB : L->blocks()) { 1186 for (auto &I : *BB) { 1187 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 1188 if (!LMemI) 1189 continue; 1190 1191 Value *PtrValue = LMemI->getPointerOperand(); 1192 if (L->isLoopInvariant(PtrValue)) 1193 continue; 1194 1195 const SCEV *LSCEV = SE.getSCEV(PtrValue); 1196 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 1197 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 1198 continue; 1199 1200 // FIXME? We could take pairing of unrolled load copies into account 1201 // by looking at the AddRec, but we would probably have to limit this 1202 // to loops with no stores or other memory optimization barriers. 1203 ++StridedLoads; 1204 // We've seen enough strided loads that seeing more won't make a 1205 // difference. 1206 if (StridedLoads > MaxStridedLoads / 2) 1207 return StridedLoads; 1208 } 1209 } 1210 return StridedLoads; 1211 }; 1212 1213 int StridedLoads = countStridedLoads(L, SE); 1214 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 1215 << " strided loads\n"); 1216 // Pick the largest power of 2 unroll count that won't result in too many 1217 // strided loads. 1218 if (StridedLoads) { 1219 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 1220 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 1221 << UP.MaxCount << '\n'); 1222 } 1223 } 1224 1225 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 1226 TTI::UnrollingPreferences &UP) { 1227 // Enable partial unrolling and runtime unrolling. 1228 BaseT::getUnrollingPreferences(L, SE, UP); 1229 1230 // For inner loop, it is more likely to be a hot one, and the runtime check 1231 // can be promoted out from LICM pass, so the overhead is less, let's try 1232 // a larger threshold to unroll more loops. 1233 if (L->getLoopDepth() > 1) 1234 UP.PartialThreshold *= 2; 1235 1236 // Disable partial & runtime unrolling on -Os. 1237 UP.PartialOptSizeThreshold = 0; 1238 1239 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 1240 EnableFalkorHWPFUnrollFix) 1241 getFalkorUnrollingPreferences(L, SE, UP); 1242 1243 // Scan the loop: don't unroll loops with calls as this could prevent 1244 // inlining. Don't unroll vector loops either, as they don't benefit much from 1245 // unrolling. 1246 for (auto *BB : L->getBlocks()) { 1247 for (auto &I : *BB) { 1248 // Don't unroll vectorised loop. 1249 if (I.getType()->isVectorTy()) 1250 return; 1251 1252 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 1253 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 1254 if (!isLoweredToCall(F)) 1255 continue; 1256 } 1257 return; 1258 } 1259 } 1260 } 1261 1262 // Enable runtime unrolling for in-order models 1263 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by 1264 // checking for that case, we can ensure that the default behaviour is 1265 // unchanged 1266 if (ST->getProcFamily() != AArch64Subtarget::Others && 1267 !ST->getSchedModel().isOutOfOrder()) { 1268 UP.Runtime = true; 1269 UP.Partial = true; 1270 UP.UpperBound = true; 1271 UP.UnrollRemainder = true; 1272 UP.DefaultUnrollRuntimeCount = 4; 1273 } 1274 } 1275 1276 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1277 TTI::PeelingPreferences &PP) { 1278 BaseT::getPeelingPreferences(L, SE, PP); 1279 } 1280 1281 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 1282 Type *ExpectedType) { 1283 switch (Inst->getIntrinsicID()) { 1284 default: 1285 return nullptr; 1286 case Intrinsic::aarch64_neon_st2: 1287 case Intrinsic::aarch64_neon_st3: 1288 case Intrinsic::aarch64_neon_st4: { 1289 // Create a struct type 1290 StructType *ST = dyn_cast<StructType>(ExpectedType); 1291 if (!ST) 1292 return nullptr; 1293 unsigned NumElts = Inst->getNumArgOperands() - 1; 1294 if (ST->getNumElements() != NumElts) 1295 return nullptr; 1296 for (unsigned i = 0, e = NumElts; i != e; ++i) { 1297 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 1298 return nullptr; 1299 } 1300 Value *Res = UndefValue::get(ExpectedType); 1301 IRBuilder<> Builder(Inst); 1302 for (unsigned i = 0, e = NumElts; i != e; ++i) { 1303 Value *L = Inst->getArgOperand(i); 1304 Res = Builder.CreateInsertValue(Res, L, i); 1305 } 1306 return Res; 1307 } 1308 case Intrinsic::aarch64_neon_ld2: 1309 case Intrinsic::aarch64_neon_ld3: 1310 case Intrinsic::aarch64_neon_ld4: 1311 if (Inst->getType() == ExpectedType) 1312 return Inst; 1313 return nullptr; 1314 } 1315 } 1316 1317 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 1318 MemIntrinsicInfo &Info) { 1319 switch (Inst->getIntrinsicID()) { 1320 default: 1321 break; 1322 case Intrinsic::aarch64_neon_ld2: 1323 case Intrinsic::aarch64_neon_ld3: 1324 case Intrinsic::aarch64_neon_ld4: 1325 Info.ReadMem = true; 1326 Info.WriteMem = false; 1327 Info.PtrVal = Inst->getArgOperand(0); 1328 break; 1329 case Intrinsic::aarch64_neon_st2: 1330 case Intrinsic::aarch64_neon_st3: 1331 case Intrinsic::aarch64_neon_st4: 1332 Info.ReadMem = false; 1333 Info.WriteMem = true; 1334 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); 1335 break; 1336 } 1337 1338 switch (Inst->getIntrinsicID()) { 1339 default: 1340 return false; 1341 case Intrinsic::aarch64_neon_ld2: 1342 case Intrinsic::aarch64_neon_st2: 1343 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 1344 break; 1345 case Intrinsic::aarch64_neon_ld3: 1346 case Intrinsic::aarch64_neon_st3: 1347 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 1348 break; 1349 case Intrinsic::aarch64_neon_ld4: 1350 case Intrinsic::aarch64_neon_st4: 1351 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 1352 break; 1353 } 1354 return true; 1355 } 1356 1357 /// See if \p I should be considered for address type promotion. We check if \p 1358 /// I is a sext with right type and used in memory accesses. If it used in a 1359 /// "complex" getelementptr, we allow it to be promoted without finding other 1360 /// sext instructions that sign extended the same initial value. A getelementptr 1361 /// is considered as "complex" if it has more than 2 operands. 1362 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 1363 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 1364 bool Considerable = false; 1365 AllowPromotionWithoutCommonHeader = false; 1366 if (!isa<SExtInst>(&I)) 1367 return false; 1368 Type *ConsideredSExtType = 1369 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 1370 if (I.getType() != ConsideredSExtType) 1371 return false; 1372 // See if the sext is the one with the right type and used in at least one 1373 // GetElementPtrInst. 1374 for (const User *U : I.users()) { 1375 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 1376 Considerable = true; 1377 // A getelementptr is considered as "complex" if it has more than 2 1378 // operands. We will promote a SExt used in such complex GEP as we 1379 // expect some computation to be merged if they are done on 64 bits. 1380 if (GEPInst->getNumOperands() > 2) { 1381 AllowPromotionWithoutCommonHeader = true; 1382 break; 1383 } 1384 } 1385 } 1386 return Considerable; 1387 } 1388 1389 bool AArch64TTIImpl::isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, 1390 ElementCount VF) const { 1391 if (!VF.isScalable()) 1392 return true; 1393 1394 Type *Ty = RdxDesc.getRecurrenceType(); 1395 if (Ty->isBFloatTy() || !isLegalElementTypeForSVE(Ty)) 1396 return false; 1397 1398 switch (RdxDesc.getRecurrenceKind()) { 1399 case RecurKind::Add: 1400 case RecurKind::FAdd: 1401 case RecurKind::And: 1402 case RecurKind::Or: 1403 case RecurKind::Xor: 1404 case RecurKind::SMin: 1405 case RecurKind::SMax: 1406 case RecurKind::UMin: 1407 case RecurKind::UMax: 1408 case RecurKind::FMin: 1409 case RecurKind::FMax: 1410 return true; 1411 default: 1412 return false; 1413 } 1414 } 1415 1416 InstructionCost 1417 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 1418 bool IsPairwise, bool IsUnsigned, 1419 TTI::TargetCostKind CostKind) { 1420 if (!isa<ScalableVectorType>(Ty)) 1421 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned, 1422 CostKind); 1423 assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) && 1424 "Both vector needs to be scalable"); 1425 1426 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1427 InstructionCost LegalizationCost = 0; 1428 if (LT.first > 1) { 1429 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 1430 unsigned CmpOpcode = 1431 Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp; 1432 LegalizationCost = 1433 getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy, 1434 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 1435 getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy, 1436 CmpInst::BAD_ICMP_PREDICATE, CostKind); 1437 LegalizationCost *= LT.first - 1; 1438 } 1439 1440 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 1441 } 1442 1443 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( 1444 unsigned Opcode, VectorType *ValTy, bool IsPairwise, 1445 TTI::TargetCostKind CostKind) { 1446 assert(!IsPairwise && "Cannot be pair wise to continue"); 1447 1448 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1449 InstructionCost LegalizationCost = 0; 1450 if (LT.first > 1) { 1451 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 1452 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 1453 LegalizationCost *= LT.first - 1; 1454 } 1455 1456 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1457 assert(ISD && "Invalid opcode"); 1458 // Add the final reduction cost for the legal horizontal reduction 1459 switch (ISD) { 1460 case ISD::ADD: 1461 case ISD::AND: 1462 case ISD::OR: 1463 case ISD::XOR: 1464 case ISD::FADD: 1465 return LegalizationCost + 2; 1466 default: 1467 return InstructionCost::getInvalid(); 1468 } 1469 } 1470 1471 InstructionCost 1472 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 1473 bool IsPairwiseForm, 1474 TTI::TargetCostKind CostKind) { 1475 1476 if (isa<ScalableVectorType>(ValTy)) 1477 return getArithmeticReductionCostSVE(Opcode, ValTy, IsPairwiseForm, 1478 CostKind); 1479 if (IsPairwiseForm) 1480 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1481 CostKind); 1482 1483 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1484 MVT MTy = LT.second; 1485 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1486 assert(ISD && "Invalid opcode"); 1487 1488 // Horizontal adds can use the 'addv' instruction. We model the cost of these 1489 // instructions as normal vector adds. This is the only arithmetic vector 1490 // reduction operation for which we have an instruction. 1491 static const CostTblEntry CostTblNoPairwise[]{ 1492 {ISD::ADD, MVT::v8i8, 1}, 1493 {ISD::ADD, MVT::v16i8, 1}, 1494 {ISD::ADD, MVT::v4i16, 1}, 1495 {ISD::ADD, MVT::v8i16, 1}, 1496 {ISD::ADD, MVT::v4i32, 1}, 1497 }; 1498 1499 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 1500 return LT.first * Entry->Cost; 1501 1502 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm, 1503 CostKind); 1504 } 1505 1506 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1507 VectorType *Tp, 1508 ArrayRef<int> Mask, int Index, 1509 VectorType *SubTp) { 1510 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 1511 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || 1512 Kind == TTI::SK_Reverse) { 1513 static const CostTblEntry ShuffleTbl[] = { 1514 // Broadcast shuffle kinds can be performed with 'dup'. 1515 { TTI::SK_Broadcast, MVT::v8i8, 1 }, 1516 { TTI::SK_Broadcast, MVT::v16i8, 1 }, 1517 { TTI::SK_Broadcast, MVT::v4i16, 1 }, 1518 { TTI::SK_Broadcast, MVT::v8i16, 1 }, 1519 { TTI::SK_Broadcast, MVT::v2i32, 1 }, 1520 { TTI::SK_Broadcast, MVT::v4i32, 1 }, 1521 { TTI::SK_Broadcast, MVT::v2i64, 1 }, 1522 { TTI::SK_Broadcast, MVT::v2f32, 1 }, 1523 { TTI::SK_Broadcast, MVT::v4f32, 1 }, 1524 { TTI::SK_Broadcast, MVT::v2f64, 1 }, 1525 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 1526 // 'zip1/zip2' instructions. 1527 { TTI::SK_Transpose, MVT::v8i8, 1 }, 1528 { TTI::SK_Transpose, MVT::v16i8, 1 }, 1529 { TTI::SK_Transpose, MVT::v4i16, 1 }, 1530 { TTI::SK_Transpose, MVT::v8i16, 1 }, 1531 { TTI::SK_Transpose, MVT::v2i32, 1 }, 1532 { TTI::SK_Transpose, MVT::v4i32, 1 }, 1533 { TTI::SK_Transpose, MVT::v2i64, 1 }, 1534 { TTI::SK_Transpose, MVT::v2f32, 1 }, 1535 { TTI::SK_Transpose, MVT::v4f32, 1 }, 1536 { TTI::SK_Transpose, MVT::v2f64, 1 }, 1537 // Select shuffle kinds. 1538 // TODO: handle vXi8/vXi16. 1539 { TTI::SK_Select, MVT::v2i32, 1 }, // mov. 1540 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). 1541 { TTI::SK_Select, MVT::v2i64, 1 }, // mov. 1542 { TTI::SK_Select, MVT::v2f32, 1 }, // mov. 1543 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). 1544 { TTI::SK_Select, MVT::v2f64, 1 }, // mov. 1545 // PermuteSingleSrc shuffle kinds. 1546 // TODO: handle vXi8/vXi16. 1547 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. 1548 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. 1549 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. 1550 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. 1551 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. 1552 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. 1553 // Reverse can be lowered with `rev`. 1554 { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov. 1555 { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT 1556 { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov. 1557 { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov. 1558 { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT 1559 { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov. 1560 // Broadcast shuffle kinds for scalable vectors 1561 { TTI::SK_Broadcast, MVT::nxv16i8, 1 }, 1562 { TTI::SK_Broadcast, MVT::nxv8i16, 1 }, 1563 { TTI::SK_Broadcast, MVT::nxv4i32, 1 }, 1564 { TTI::SK_Broadcast, MVT::nxv2i64, 1 }, 1565 { TTI::SK_Broadcast, MVT::nxv8f16, 1 }, 1566 { TTI::SK_Broadcast, MVT::nxv8bf16, 1 }, 1567 { TTI::SK_Broadcast, MVT::nxv4f32, 1 }, 1568 { TTI::SK_Broadcast, MVT::nxv2f64, 1 }, 1569 // Handle the cases for vector.reverse with scalable vectors 1570 { TTI::SK_Reverse, MVT::nxv16i8, 1 }, 1571 { TTI::SK_Reverse, MVT::nxv8i16, 1 }, 1572 { TTI::SK_Reverse, MVT::nxv4i32, 1 }, 1573 { TTI::SK_Reverse, MVT::nxv2i64, 1 }, 1574 { TTI::SK_Reverse, MVT::nxv8f16, 1 }, 1575 { TTI::SK_Reverse, MVT::nxv8bf16, 1 }, 1576 { TTI::SK_Reverse, MVT::nxv4f32, 1 }, 1577 { TTI::SK_Reverse, MVT::nxv2f64, 1 }, 1578 { TTI::SK_Reverse, MVT::nxv16i1, 1 }, 1579 { TTI::SK_Reverse, MVT::nxv8i1, 1 }, 1580 { TTI::SK_Reverse, MVT::nxv4i1, 1 }, 1581 { TTI::SK_Reverse, MVT::nxv2i1, 1 }, 1582 }; 1583 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 1584 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 1585 return LT.first * Entry->Cost; 1586 } 1587 1588 return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 1589 } 1590