1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "AArch64TargetTransformInfo.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/LoopInfo.h" 13 #include "llvm/Analysis/TargetTransformInfo.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/Support/Debug.h" 16 #include "llvm/Target/CostTable.h" 17 #include "llvm/Target/TargetLowering.h" 18 #include <algorithm> 19 using namespace llvm; 20 21 #define DEBUG_TYPE "aarch64tti" 22 23 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 24 cl::init(true), cl::Hidden); 25 26 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 27 const Function *Callee) const { 28 const TargetMachine &TM = getTLI()->getTargetMachine(); 29 30 const FeatureBitset &CallerBits = 31 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 32 const FeatureBitset &CalleeBits = 33 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 34 35 // Inline a callee if its target-features are a subset of the callers 36 // target-features. 37 return (CallerBits & CalleeBits) == CalleeBits; 38 } 39 40 /// \brief Calculate the cost of materializing a 64-bit value. This helper 41 /// method might only calculate a fraction of a larger immediate. Therefore it 42 /// is valid to return a cost of ZERO. 43 int AArch64TTIImpl::getIntImmCost(int64_t Val) { 44 // Check if the immediate can be encoded within an instruction. 45 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 46 return 0; 47 48 if (Val < 0) 49 Val = ~Val; 50 51 // Calculate how many moves we will need to materialize this constant. 52 unsigned LZ = countLeadingZeros((uint64_t)Val); 53 return (64 - LZ + 15) / 16; 54 } 55 56 /// \brief Calculate the cost of materializing the given constant. 57 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 58 assert(Ty->isIntegerTy()); 59 60 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 61 if (BitSize == 0) 62 return ~0U; 63 64 // Sign-extend all constants to a multiple of 64-bit. 65 APInt ImmVal = Imm; 66 if (BitSize & 0x3f) 67 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 68 69 // Split the constant into 64-bit chunks and calculate the cost for each 70 // chunk. 71 int Cost = 0; 72 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 73 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 74 int64_t Val = Tmp.getSExtValue(); 75 Cost += getIntImmCost(Val); 76 } 77 // We need at least one instruction to materialze the constant. 78 return std::max(1, Cost); 79 } 80 81 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, 82 const APInt &Imm, Type *Ty) { 83 assert(Ty->isIntegerTy()); 84 85 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 86 // There is no cost model for constants with a bit size of 0. Return TCC_Free 87 // here, so that constant hoisting will ignore this constant. 88 if (BitSize == 0) 89 return TTI::TCC_Free; 90 91 unsigned ImmIdx = ~0U; 92 switch (Opcode) { 93 default: 94 return TTI::TCC_Free; 95 case Instruction::GetElementPtr: 96 // Always hoist the base address of a GetElementPtr. 97 if (Idx == 0) 98 return 2 * TTI::TCC_Basic; 99 return TTI::TCC_Free; 100 case Instruction::Store: 101 ImmIdx = 0; 102 break; 103 case Instruction::Add: 104 case Instruction::Sub: 105 case Instruction::Mul: 106 case Instruction::UDiv: 107 case Instruction::SDiv: 108 case Instruction::URem: 109 case Instruction::SRem: 110 case Instruction::And: 111 case Instruction::Or: 112 case Instruction::Xor: 113 case Instruction::ICmp: 114 ImmIdx = 1; 115 break; 116 // Always return TCC_Free for the shift value of a shift instruction. 117 case Instruction::Shl: 118 case Instruction::LShr: 119 case Instruction::AShr: 120 if (Idx == 1) 121 return TTI::TCC_Free; 122 break; 123 case Instruction::Trunc: 124 case Instruction::ZExt: 125 case Instruction::SExt: 126 case Instruction::IntToPtr: 127 case Instruction::PtrToInt: 128 case Instruction::BitCast: 129 case Instruction::PHI: 130 case Instruction::Call: 131 case Instruction::Select: 132 case Instruction::Ret: 133 case Instruction::Load: 134 break; 135 } 136 137 if (Idx == ImmIdx) { 138 int NumConstants = (BitSize + 63) / 64; 139 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty); 140 return (Cost <= NumConstants * TTI::TCC_Basic) 141 ? static_cast<int>(TTI::TCC_Free) 142 : Cost; 143 } 144 return AArch64TTIImpl::getIntImmCost(Imm, Ty); 145 } 146 147 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 148 const APInt &Imm, Type *Ty) { 149 assert(Ty->isIntegerTy()); 150 151 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 152 // There is no cost model for constants with a bit size of 0. Return TCC_Free 153 // here, so that constant hoisting will ignore this constant. 154 if (BitSize == 0) 155 return TTI::TCC_Free; 156 157 switch (IID) { 158 default: 159 return TTI::TCC_Free; 160 case Intrinsic::sadd_with_overflow: 161 case Intrinsic::uadd_with_overflow: 162 case Intrinsic::ssub_with_overflow: 163 case Intrinsic::usub_with_overflow: 164 case Intrinsic::smul_with_overflow: 165 case Intrinsic::umul_with_overflow: 166 if (Idx == 1) { 167 int NumConstants = (BitSize + 63) / 64; 168 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty); 169 return (Cost <= NumConstants * TTI::TCC_Basic) 170 ? static_cast<int>(TTI::TCC_Free) 171 : Cost; 172 } 173 break; 174 case Intrinsic::experimental_stackmap: 175 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 176 return TTI::TCC_Free; 177 break; 178 case Intrinsic::experimental_patchpoint_void: 179 case Intrinsic::experimental_patchpoint_i64: 180 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 181 return TTI::TCC_Free; 182 break; 183 } 184 return AArch64TTIImpl::getIntImmCost(Imm, Ty); 185 } 186 187 TargetTransformInfo::PopcntSupportKind 188 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 189 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 190 if (TyWidth == 32 || TyWidth == 64) 191 return TTI::PSK_FastHardware; 192 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 193 return TTI::PSK_Software; 194 } 195 196 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 197 ArrayRef<const Value *> Args) { 198 199 // A helper that returns a vector type from the given type. The number of 200 // elements in type Ty determine the vector width. 201 auto toVectorTy = [&](Type *ArgTy) { 202 return VectorType::get(ArgTy->getScalarType(), 203 DstTy->getVectorNumElements()); 204 }; 205 206 // Exit early if DstTy is not a vector type whose elements are at least 207 // 16-bits wide. 208 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 209 return false; 210 211 // Determine if the operation has a widening variant. We consider both the 212 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 213 // instructions. 214 // 215 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we 216 // verify that their extending operands are eliminated during code 217 // generation. 218 switch (Opcode) { 219 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 220 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 221 break; 222 default: 223 return false; 224 } 225 226 // To be a widening instruction (either the "wide" or "long" versions), the 227 // second operand must be a sign- or zero extend having a single user. We 228 // only consider extends having a single user because they may otherwise not 229 // be eliminated. 230 if (Args.size() != 2 || 231 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) || 232 !Args[1]->hasOneUse()) 233 return false; 234 auto *Extend = cast<CastInst>(Args[1]); 235 236 // Legalize the destination type and ensure it can be used in a widening 237 // operation. 238 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 239 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 240 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 241 return false; 242 243 // Legalize the source type and ensure it can be used in a widening 244 // operation. 245 Type *SrcTy = toVectorTy(Extend->getSrcTy()); 246 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 247 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 248 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 249 return false; 250 251 // Get the total number of vector elements in the legalized types. 252 unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorNumElements(); 253 unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorNumElements(); 254 255 // Return true if the legalized types have the same number of vector elements 256 // and the destination element type size is twice that of the source type. 257 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 258 } 259 260 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 261 const Instruction *I) { 262 int ISD = TLI->InstructionOpcodeToISD(Opcode); 263 assert(ISD && "Invalid opcode"); 264 265 // If the cast is observable, and it is used by a widening instruction (e.g., 266 // uaddl, saddw, etc.), it may be free. 267 if (I && I->hasOneUse()) { 268 auto *SingleUser = cast<Instruction>(*I->user_begin()); 269 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 270 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 271 // If the cast is the second operand, it is free. We will generate either 272 // a "wide" or "long" version of the widening instruction. 273 if (I == SingleUser->getOperand(1)) 274 return 0; 275 // If the cast is not the second operand, it will be free if it looks the 276 // same as the second operand. In this case, we will generate a "long" 277 // version of the widening instruction. 278 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 279 if (I->getOpcode() == Cast->getOpcode() && 280 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 281 return 0; 282 } 283 } 284 285 EVT SrcTy = TLI->getValueType(DL, Src); 286 EVT DstTy = TLI->getValueType(DL, Dst); 287 288 if (!SrcTy.isSimple() || !DstTy.isSimple()) 289 return BaseT::getCastInstrCost(Opcode, Dst, Src); 290 291 static const TypeConversionCostTblEntry 292 ConversionTbl[] = { 293 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 294 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 295 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 296 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 297 298 // The number of shll instructions for the extension. 299 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 300 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 301 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 302 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 303 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 304 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 305 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 306 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 307 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 308 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 309 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 310 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 311 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 312 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 313 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 314 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 315 316 // LowerVectorINT_TO_FP: 317 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 318 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 319 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 320 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 321 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 322 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 323 324 // Complex: to v2f32 325 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 326 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 327 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 328 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 329 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 330 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 331 332 // Complex: to v4f32 333 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 334 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 335 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 336 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 337 338 // Complex: to v8f32 339 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 340 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 341 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 342 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 343 344 // Complex: to v16f32 345 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 346 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 347 348 // Complex: to v2f64 349 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 350 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 351 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 352 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 353 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 354 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 355 356 357 // LowerVectorFP_TO_INT 358 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 359 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 360 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 361 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 362 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 363 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 364 365 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 366 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 367 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 368 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 369 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 370 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 371 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 372 373 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 374 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 375 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 376 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 377 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 378 379 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 380 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 381 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 382 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 383 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 384 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 385 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 386 }; 387 388 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 389 DstTy.getSimpleVT(), 390 SrcTy.getSimpleVT())) 391 return Entry->Cost; 392 393 return BaseT::getCastInstrCost(Opcode, Dst, Src); 394 } 395 396 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst, 397 VectorType *VecTy, 398 unsigned Index) { 399 400 // Make sure we were given a valid extend opcode. 401 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 402 "Invalid opcode"); 403 404 // We are extending an element we extract from a vector, so the source type 405 // of the extend is the element type of the vector. 406 auto *Src = VecTy->getElementType(); 407 408 // Sign- and zero-extends are for integer types only. 409 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 410 411 // Get the cost for the extract. We compute the cost (if any) for the extend 412 // below. 413 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 414 415 // Legalize the types. 416 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 417 auto DstVT = TLI->getValueType(DL, Dst); 418 auto SrcVT = TLI->getValueType(DL, Src); 419 420 // If the resulting type is still a vector and the destination type is legal, 421 // we may get the extension for free. If not, get the default cost for the 422 // extend. 423 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 424 return Cost + getCastInstrCost(Opcode, Dst, Src); 425 426 // The destination type should be larger than the element type. If not, get 427 // the default cost for the extend. 428 if (DstVT.getSizeInBits() < SrcVT.getSizeInBits()) 429 return Cost + getCastInstrCost(Opcode, Dst, Src); 430 431 switch (Opcode) { 432 default: 433 llvm_unreachable("Opcode should be either SExt or ZExt"); 434 435 // For sign-extends, we only need a smov, which performs the extension 436 // automatically. 437 case Instruction::SExt: 438 return Cost; 439 440 // For zero-extends, the extend is performed automatically by a umov unless 441 // the destination type is i64 and the element type is i8 or i16. 442 case Instruction::ZExt: 443 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 444 return Cost; 445 } 446 447 // If we are unable to perform the extend for free, get the default cost. 448 return Cost + getCastInstrCost(Opcode, Dst, Src); 449 } 450 451 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 452 unsigned Index) { 453 assert(Val->isVectorTy() && "This must be a vector type"); 454 455 if (Index != -1U) { 456 // Legalize the type. 457 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 458 459 // This type is legalized to a scalar type. 460 if (!LT.second.isVector()) 461 return 0; 462 463 // The type may be split. Normalize the index to the new type. 464 unsigned Width = LT.second.getVectorNumElements(); 465 Index = Index % Width; 466 467 // The element at index zero is already inside the vector. 468 if (Index == 0) 469 return 0; 470 } 471 472 // All other insert/extracts cost this much. 473 return ST->getVectorInsertExtractBaseCost(); 474 } 475 476 int AArch64TTIImpl::getArithmeticInstrCost( 477 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, 478 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 479 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) { 480 // Legalize the type. 481 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 482 483 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), 484 // add in the widening overhead specified by the sub-target. Since the 485 // extends feeding widening instructions are performed automatically, they 486 // aren't present in the generated code and have a zero cost. By adding a 487 // widening overhead here, we attach the total cost of the combined operation 488 // to the widening instruction. 489 int Cost = 0; 490 if (isWideningInstruction(Ty, Opcode, Args)) 491 Cost += ST->getWideningBaseCost(); 492 493 int ISD = TLI->InstructionOpcodeToISD(Opcode); 494 495 if (ISD == ISD::SDIV && 496 Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 497 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 498 // On AArch64, scalar signed division by constants power-of-two are 499 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 500 // The OperandValue properties many not be same as that of previous 501 // operation; conservatively assume OP_None. 502 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info, 503 TargetTransformInfo::OP_None, 504 TargetTransformInfo::OP_None); 505 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info, 506 TargetTransformInfo::OP_None, 507 TargetTransformInfo::OP_None); 508 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info, 509 TargetTransformInfo::OP_None, 510 TargetTransformInfo::OP_None); 511 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info, 512 TargetTransformInfo::OP_None, 513 TargetTransformInfo::OP_None); 514 return Cost; 515 } 516 517 switch (ISD) { 518 default: 519 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 520 Opd1PropInfo, Opd2PropInfo); 521 case ISD::ADD: 522 case ISD::MUL: 523 case ISD::XOR: 524 case ISD::OR: 525 case ISD::AND: 526 // These nodes are marked as 'custom' for combining purposes only. 527 // We know that they are legal. See LowerAdd in ISelLowering. 528 return (Cost + 1) * LT.first; 529 } 530 } 531 532 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 533 const SCEV *Ptr) { 534 // Address computations in vectorized code with non-consecutive addresses will 535 // likely result in more instructions compared to scalar code where the 536 // computation can more often be merged into the index mode. The resulting 537 // extra micro-ops can significantly decrease throughput. 538 unsigned NumVectorInstToHideOverhead = 10; 539 int MaxMergeDistance = 64; 540 541 if (Ty->isVectorTy() && SE && 542 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 543 return NumVectorInstToHideOverhead; 544 545 // In many cases the address computation is not merged into the instruction 546 // addressing mode. 547 return 1; 548 } 549 550 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 551 Type *CondTy, const Instruction *I) { 552 553 int ISD = TLI->InstructionOpcodeToISD(Opcode); 554 // We don't lower some vector selects well that are wider than the register 555 // width. 556 if (ValTy->isVectorTy() && ISD == ISD::SELECT) { 557 // We would need this many instructions to hide the scalarization happening. 558 const int AmortizationCost = 20; 559 static const TypeConversionCostTblEntry 560 VectorSelectTbl[] = { 561 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 562 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 563 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 564 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 565 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 566 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 567 }; 568 569 EVT SelCondTy = TLI->getValueType(DL, CondTy); 570 EVT SelValTy = TLI->getValueType(DL, ValTy); 571 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 572 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 573 SelCondTy.getSimpleVT(), 574 SelValTy.getSimpleVT())) 575 return Entry->Cost; 576 } 577 } 578 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 579 } 580 581 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 582 unsigned Alignment, unsigned AddressSpace, 583 const Instruction *I) { 584 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 585 586 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 587 LT.second.is128BitVector() && Alignment < 16) { 588 // Unaligned stores are extremely inefficient. We don't split all 589 // unaligned 128-bit stores because the negative impact that has shown in 590 // practice on inlined block copy code. 591 // We make such stores expensive so that we will only vectorize if there 592 // are 6 other instructions getting vectorized. 593 const int AmortizationCost = 6; 594 595 return LT.first * 2 * AmortizationCost; 596 } 597 598 if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) && 599 Ty->getVectorNumElements() < 8) { 600 // We scalarize the loads/stores because there is not v.4b register and we 601 // have to promote the elements to v.4h. 602 unsigned NumVecElts = Ty->getVectorNumElements(); 603 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; 604 // We generate 2 instructions per vector element. 605 return NumVectorizableInstsToAmortize * NumVecElts * 2; 606 } 607 608 return LT.first; 609 } 610 611 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 612 unsigned Factor, 613 ArrayRef<unsigned> Indices, 614 unsigned Alignment, 615 unsigned AddressSpace) { 616 assert(Factor >= 2 && "Invalid interleave factor"); 617 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 618 619 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) { 620 unsigned NumElts = VecTy->getVectorNumElements(); 621 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 622 623 // ldN/stN only support legal vector types of size 64 or 128 in bits. 624 // Accesses having vector types that are a multiple of 128 bits can be 625 // matched to more than one ldN/stN instruction. 626 if (NumElts % Factor == 0 && 627 TLI->isLegalInterleavedAccessType(SubVecTy, DL)) 628 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); 629 } 630 631 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 632 Alignment, AddressSpace); 633 } 634 635 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 636 int Cost = 0; 637 for (auto *I : Tys) { 638 if (!I->isVectorTy()) 639 continue; 640 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128) 641 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) + 642 getMemoryOpCost(Instruction::Load, I, 128, 0); 643 } 644 return Cost; 645 } 646 647 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 648 return ST->getMaxInterleaveFactor(); 649 } 650 651 // For Falkor, we want to avoid having too many strided loads in a loop since 652 // that can exhaust the HW prefetcher resources. We adjust the unroller 653 // MaxCount preference below to attempt to ensure unrolling doesn't create too 654 // many strided loads. 655 static void 656 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 657 TargetTransformInfo::UnrollingPreferences &UP) { 658 enum { MaxStridedLoads = 7 }; 659 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 660 int StridedLoads = 0; 661 // FIXME? We could make this more precise by looking at the CFG and 662 // e.g. not counting loads in each side of an if-then-else diamond. 663 for (const auto BB : L->blocks()) { 664 for (auto &I : *BB) { 665 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 666 if (!LMemI) 667 continue; 668 669 Value *PtrValue = LMemI->getPointerOperand(); 670 if (L->isLoopInvariant(PtrValue)) 671 continue; 672 673 const SCEV *LSCEV = SE.getSCEV(PtrValue); 674 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 675 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 676 continue; 677 678 // FIXME? We could take pairing of unrolled load copies into account 679 // by looking at the AddRec, but we would probably have to limit this 680 // to loops with no stores or other memory optimization barriers. 681 ++StridedLoads; 682 // We've seen enough strided loads that seeing more won't make a 683 // difference. 684 if (StridedLoads > MaxStridedLoads / 2) 685 return StridedLoads; 686 } 687 } 688 return StridedLoads; 689 }; 690 691 int StridedLoads = countStridedLoads(L, SE); 692 DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 693 << " strided loads\n"); 694 // Pick the largest power of 2 unroll count that won't result in too many 695 // strided loads. 696 if (StridedLoads) { 697 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 698 DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount 699 << '\n'); 700 } 701 } 702 703 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 704 TTI::UnrollingPreferences &UP) { 705 // Enable partial unrolling and runtime unrolling. 706 BaseT::getUnrollingPreferences(L, SE, UP); 707 708 // For inner loop, it is more likely to be a hot one, and the runtime check 709 // can be promoted out from LICM pass, so the overhead is less, let's try 710 // a larger threshold to unroll more loops. 711 if (L->getLoopDepth() > 1) 712 UP.PartialThreshold *= 2; 713 714 // Disable partial & runtime unrolling on -Os. 715 UP.PartialOptSizeThreshold = 0; 716 717 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 718 EnableFalkorHWPFUnrollFix) 719 getFalkorUnrollingPreferences(L, SE, UP); 720 } 721 722 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 723 Type *ExpectedType) { 724 switch (Inst->getIntrinsicID()) { 725 default: 726 return nullptr; 727 case Intrinsic::aarch64_neon_st2: 728 case Intrinsic::aarch64_neon_st3: 729 case Intrinsic::aarch64_neon_st4: { 730 // Create a struct type 731 StructType *ST = dyn_cast<StructType>(ExpectedType); 732 if (!ST) 733 return nullptr; 734 unsigned NumElts = Inst->getNumArgOperands() - 1; 735 if (ST->getNumElements() != NumElts) 736 return nullptr; 737 for (unsigned i = 0, e = NumElts; i != e; ++i) { 738 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 739 return nullptr; 740 } 741 Value *Res = UndefValue::get(ExpectedType); 742 IRBuilder<> Builder(Inst); 743 for (unsigned i = 0, e = NumElts; i != e; ++i) { 744 Value *L = Inst->getArgOperand(i); 745 Res = Builder.CreateInsertValue(Res, L, i); 746 } 747 return Res; 748 } 749 case Intrinsic::aarch64_neon_ld2: 750 case Intrinsic::aarch64_neon_ld3: 751 case Intrinsic::aarch64_neon_ld4: 752 if (Inst->getType() == ExpectedType) 753 return Inst; 754 return nullptr; 755 } 756 } 757 758 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 759 MemIntrinsicInfo &Info) { 760 switch (Inst->getIntrinsicID()) { 761 default: 762 break; 763 case Intrinsic::aarch64_neon_ld2: 764 case Intrinsic::aarch64_neon_ld3: 765 case Intrinsic::aarch64_neon_ld4: 766 Info.ReadMem = true; 767 Info.WriteMem = false; 768 Info.PtrVal = Inst->getArgOperand(0); 769 break; 770 case Intrinsic::aarch64_neon_st2: 771 case Intrinsic::aarch64_neon_st3: 772 case Intrinsic::aarch64_neon_st4: 773 Info.ReadMem = false; 774 Info.WriteMem = true; 775 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); 776 break; 777 } 778 779 switch (Inst->getIntrinsicID()) { 780 default: 781 return false; 782 case Intrinsic::aarch64_neon_ld2: 783 case Intrinsic::aarch64_neon_st2: 784 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 785 break; 786 case Intrinsic::aarch64_neon_ld3: 787 case Intrinsic::aarch64_neon_st3: 788 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 789 break; 790 case Intrinsic::aarch64_neon_ld4: 791 case Intrinsic::aarch64_neon_st4: 792 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 793 break; 794 } 795 return true; 796 } 797 798 /// See if \p I should be considered for address type promotion. We check if \p 799 /// I is a sext with right type and used in memory accesses. If it used in a 800 /// "complex" getelementptr, we allow it to be promoted without finding other 801 /// sext instructions that sign extended the same initial value. A getelementptr 802 /// is considered as "complex" if it has more than 2 operands. 803 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 804 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 805 bool Considerable = false; 806 AllowPromotionWithoutCommonHeader = false; 807 if (!isa<SExtInst>(&I)) 808 return false; 809 Type *ConsideredSExtType = 810 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 811 if (I.getType() != ConsideredSExtType) 812 return false; 813 // See if the sext is the one with the right type and used in at least one 814 // GetElementPtrInst. 815 for (const User *U : I.users()) { 816 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 817 Considerable = true; 818 // A getelementptr is considered as "complex" if it has more than 2 819 // operands. We will promote a SExt used in such complex GEP as we 820 // expect some computation to be merged if they are done on 64 bits. 821 if (GEPInst->getNumOperands() > 2) { 822 AllowPromotionWithoutCommonHeader = true; 823 break; 824 } 825 } 826 } 827 return Considerable; 828 } 829 830 unsigned AArch64TTIImpl::getCacheLineSize() { 831 return ST->getCacheLineSize(); 832 } 833 834 unsigned AArch64TTIImpl::getPrefetchDistance() { 835 return ST->getPrefetchDistance(); 836 } 837 838 unsigned AArch64TTIImpl::getMinPrefetchStride() { 839 return ST->getMinPrefetchStride(); 840 } 841 842 unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() { 843 return ST->getMaxPrefetchIterationsAhead(); 844 } 845 846 bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, 847 TTI::ReductionFlags Flags) const { 848 assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type"); 849 unsigned ScalarBits = Ty->getScalarSizeInBits(); 850 switch (Opcode) { 851 case Instruction::FAdd: 852 case Instruction::FMul: 853 case Instruction::And: 854 case Instruction::Or: 855 case Instruction::Xor: 856 case Instruction::Mul: 857 return false; 858 case Instruction::Add: 859 return ScalarBits * Ty->getVectorNumElements() >= 128; 860 case Instruction::ICmp: 861 return (ScalarBits < 64) && 862 (ScalarBits * Ty->getVectorNumElements() >= 128); 863 case Instruction::FCmp: 864 return Flags.NoNaN; 865 default: 866 llvm_unreachable("Unhandled reduction opcode"); 867 } 868 return false; 869 } 870