1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "AArch64TargetTransformInfo.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/TargetTransformInfo.h" 13 #include "llvm/Analysis/LoopInfo.h" 14 #include "llvm/CodeGen/BasicTTIImpl.h" 15 #include "llvm/Support/Debug.h" 16 #include "llvm/Target/CostTable.h" 17 #include "llvm/Target/TargetLowering.h" 18 #include <algorithm> 19 using namespace llvm; 20 21 #define DEBUG_TYPE "aarch64tti" 22 23 /// \brief Calculate the cost of materializing a 64-bit value. This helper 24 /// method might only calculate a fraction of a larger immediate. Therefore it 25 /// is valid to return a cost of ZERO. 26 int AArch64TTIImpl::getIntImmCost(int64_t Val) { 27 // Check if the immediate can be encoded within an instruction. 28 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 29 return 0; 30 31 if (Val < 0) 32 Val = ~Val; 33 34 // Calculate how many moves we will need to materialize this constant. 35 unsigned LZ = countLeadingZeros((uint64_t)Val); 36 return (64 - LZ + 15) / 16; 37 } 38 39 /// \brief Calculate the cost of materializing the given constant. 40 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 41 assert(Ty->isIntegerTy()); 42 43 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 44 if (BitSize == 0) 45 return ~0U; 46 47 // Sign-extend all constants to a multiple of 64-bit. 48 APInt ImmVal = Imm; 49 if (BitSize & 0x3f) 50 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 51 52 // Split the constant into 64-bit chunks and calculate the cost for each 53 // chunk. 54 int Cost = 0; 55 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 56 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 57 int64_t Val = Tmp.getSExtValue(); 58 Cost += getIntImmCost(Val); 59 } 60 // We need at least one instruction to materialze the constant. 61 return std::max(1, Cost); 62 } 63 64 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, 65 const APInt &Imm, Type *Ty) { 66 assert(Ty->isIntegerTy()); 67 68 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 69 // There is no cost model for constants with a bit size of 0. Return TCC_Free 70 // here, so that constant hoisting will ignore this constant. 71 if (BitSize == 0) 72 return TTI::TCC_Free; 73 74 unsigned ImmIdx = ~0U; 75 switch (Opcode) { 76 default: 77 return TTI::TCC_Free; 78 case Instruction::GetElementPtr: 79 // Always hoist the base address of a GetElementPtr. 80 if (Idx == 0) 81 return 2 * TTI::TCC_Basic; 82 return TTI::TCC_Free; 83 case Instruction::Store: 84 ImmIdx = 0; 85 break; 86 case Instruction::Add: 87 case Instruction::Sub: 88 case Instruction::Mul: 89 case Instruction::UDiv: 90 case Instruction::SDiv: 91 case Instruction::URem: 92 case Instruction::SRem: 93 case Instruction::And: 94 case Instruction::Or: 95 case Instruction::Xor: 96 case Instruction::ICmp: 97 ImmIdx = 1; 98 break; 99 // Always return TCC_Free for the shift value of a shift instruction. 100 case Instruction::Shl: 101 case Instruction::LShr: 102 case Instruction::AShr: 103 if (Idx == 1) 104 return TTI::TCC_Free; 105 break; 106 case Instruction::Trunc: 107 case Instruction::ZExt: 108 case Instruction::SExt: 109 case Instruction::IntToPtr: 110 case Instruction::PtrToInt: 111 case Instruction::BitCast: 112 case Instruction::PHI: 113 case Instruction::Call: 114 case Instruction::Select: 115 case Instruction::Ret: 116 case Instruction::Load: 117 break; 118 } 119 120 if (Idx == ImmIdx) { 121 int NumConstants = (BitSize + 63) / 64; 122 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty); 123 return (Cost <= NumConstants * TTI::TCC_Basic) 124 ? static_cast<int>(TTI::TCC_Free) 125 : Cost; 126 } 127 return AArch64TTIImpl::getIntImmCost(Imm, Ty); 128 } 129 130 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 131 const APInt &Imm, Type *Ty) { 132 assert(Ty->isIntegerTy()); 133 134 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 135 // There is no cost model for constants with a bit size of 0. Return TCC_Free 136 // here, so that constant hoisting will ignore this constant. 137 if (BitSize == 0) 138 return TTI::TCC_Free; 139 140 switch (IID) { 141 default: 142 return TTI::TCC_Free; 143 case Intrinsic::sadd_with_overflow: 144 case Intrinsic::uadd_with_overflow: 145 case Intrinsic::ssub_with_overflow: 146 case Intrinsic::usub_with_overflow: 147 case Intrinsic::smul_with_overflow: 148 case Intrinsic::umul_with_overflow: 149 if (Idx == 1) { 150 int NumConstants = (BitSize + 63) / 64; 151 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty); 152 return (Cost <= NumConstants * TTI::TCC_Basic) 153 ? static_cast<int>(TTI::TCC_Free) 154 : Cost; 155 } 156 break; 157 case Intrinsic::experimental_stackmap: 158 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 159 return TTI::TCC_Free; 160 break; 161 case Intrinsic::experimental_patchpoint_void: 162 case Intrinsic::experimental_patchpoint_i64: 163 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 164 return TTI::TCC_Free; 165 break; 166 } 167 return AArch64TTIImpl::getIntImmCost(Imm, Ty); 168 } 169 170 TargetTransformInfo::PopcntSupportKind 171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 172 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 173 if (TyWidth == 32 || TyWidth == 64) 174 return TTI::PSK_FastHardware; 175 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 176 return TTI::PSK_Software; 177 } 178 179 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 180 int ISD = TLI->InstructionOpcodeToISD(Opcode); 181 assert(ISD && "Invalid opcode"); 182 183 EVT SrcTy = TLI->getValueType(DL, Src); 184 EVT DstTy = TLI->getValueType(DL, Dst); 185 186 if (!SrcTy.isSimple() || !DstTy.isSimple()) 187 return BaseT::getCastInstrCost(Opcode, Dst, Src); 188 189 static const TypeConversionCostTblEntry 190 ConversionTbl[] = { 191 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 192 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 193 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 194 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 195 196 // The number of shll instructions for the extension. 197 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 198 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 199 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 200 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 201 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 202 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 203 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 204 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 205 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 206 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 207 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 208 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 209 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 210 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 211 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 212 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 213 214 // LowerVectorINT_TO_FP: 215 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 216 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 217 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 218 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 219 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 220 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 221 222 // Complex: to v2f32 223 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 224 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 225 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 226 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 227 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 228 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 229 230 // Complex: to v4f32 231 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 232 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 233 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 234 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 235 236 // Complex: to v8f32 237 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 238 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 239 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 240 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 241 242 // Complex: to v16f32 243 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 244 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 245 246 // Complex: to v2f64 247 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 248 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 249 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 250 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 251 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 252 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 253 254 255 // LowerVectorFP_TO_INT 256 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 257 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 258 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 259 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 260 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 261 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 262 263 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 264 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 265 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 266 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 267 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 268 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 269 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 270 271 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 272 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 273 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 274 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 275 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 276 277 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 278 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 279 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 280 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 281 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 282 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 283 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 284 }; 285 286 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 287 DstTy.getSimpleVT(), 288 SrcTy.getSimpleVT())) 289 return Entry->Cost; 290 291 return BaseT::getCastInstrCost(Opcode, Dst, Src); 292 } 293 294 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst, 295 VectorType *VecTy, 296 unsigned Index) { 297 298 // Make sure we were given a valid extend opcode. 299 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 300 "Invalid opcode"); 301 302 // We are extending an element we extract from a vector, so the source type 303 // of the extend is the element type of the vector. 304 auto *Src = VecTy->getElementType(); 305 306 // Sign- and zero-extends are for integer types only. 307 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 308 309 // Get the cost for the extract. We compute the cost (if any) for the extend 310 // below. 311 auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 312 313 // Legalize the types. 314 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 315 auto DstVT = TLI->getValueType(DL, Dst); 316 auto SrcVT = TLI->getValueType(DL, Src); 317 318 // If the resulting type is still a vector and the destination type is legal, 319 // we may get the extension for free. If not, get the default cost for the 320 // extend. 321 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 322 return Cost + getCastInstrCost(Opcode, Dst, Src); 323 324 // The destination type should be larger than the element type. If not, get 325 // the default cost for the extend. 326 if (DstVT.getSizeInBits() < SrcVT.getSizeInBits()) 327 return Cost + getCastInstrCost(Opcode, Dst, Src); 328 329 switch (Opcode) { 330 default: 331 llvm_unreachable("Opcode should be either SExt or ZExt"); 332 333 // For sign-extends, we only need a smov, which performs the extension 334 // automatically. 335 case Instruction::SExt: 336 return Cost; 337 338 // For zero-extends, the extend is performed automatically by a umov unless 339 // the destination type is i64 and the element type is i8 or i16. 340 case Instruction::ZExt: 341 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 342 return Cost; 343 } 344 345 // If we are unable to perform the extend for free, get the default cost. 346 return Cost + getCastInstrCost(Opcode, Dst, Src); 347 } 348 349 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 350 unsigned Index) { 351 assert(Val->isVectorTy() && "This must be a vector type"); 352 353 if (Index != -1U) { 354 // Legalize the type. 355 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 356 357 // This type is legalized to a scalar type. 358 if (!LT.second.isVector()) 359 return 0; 360 361 // The type may be split. Normalize the index to the new type. 362 unsigned Width = LT.second.getVectorNumElements(); 363 Index = Index % Width; 364 365 // The element at index zero is already inside the vector. 366 if (Index == 0) 367 return 0; 368 } 369 370 // All other insert/extracts cost this much. 371 if (ST->isKryo()) 372 return 2; 373 return 3; 374 } 375 376 int AArch64TTIImpl::getArithmeticInstrCost( 377 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, 378 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, 379 TTI::OperandValueProperties Opd2PropInfo) { 380 // Legalize the type. 381 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 382 383 int ISD = TLI->InstructionOpcodeToISD(Opcode); 384 385 if (ISD == ISD::SDIV && 386 Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 387 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 388 // On AArch64, scalar signed division by constants power-of-two are 389 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 390 // The OperandValue properties many not be same as that of previous 391 // operation; conservatively assume OP_None. 392 int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info, 393 TargetTransformInfo::OP_None, 394 TargetTransformInfo::OP_None); 395 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info, 396 TargetTransformInfo::OP_None, 397 TargetTransformInfo::OP_None); 398 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info, 399 TargetTransformInfo::OP_None, 400 TargetTransformInfo::OP_None); 401 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info, 402 TargetTransformInfo::OP_None, 403 TargetTransformInfo::OP_None); 404 return Cost; 405 } 406 407 switch (ISD) { 408 default: 409 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, 410 Opd1PropInfo, Opd2PropInfo); 411 case ISD::ADD: 412 case ISD::MUL: 413 case ISD::XOR: 414 case ISD::OR: 415 case ISD::AND: 416 // These nodes are marked as 'custom' for combining purposes only. 417 // We know that they are legal. See LowerAdd in ISelLowering. 418 return 1 * LT.first; 419 } 420 } 421 422 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 423 // Address computations in vectorized code with non-consecutive addresses will 424 // likely result in more instructions compared to scalar code where the 425 // computation can more often be merged into the index mode. The resulting 426 // extra micro-ops can significantly decrease throughput. 427 unsigned NumVectorInstToHideOverhead = 10; 428 429 if (Ty->isVectorTy() && IsComplex) 430 return NumVectorInstToHideOverhead; 431 432 // In many cases the address computation is not merged into the instruction 433 // addressing mode. 434 return 1; 435 } 436 437 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 438 Type *CondTy) { 439 440 int ISD = TLI->InstructionOpcodeToISD(Opcode); 441 // We don't lower some vector selects well that are wider than the register 442 // width. 443 if (ValTy->isVectorTy() && ISD == ISD::SELECT) { 444 // We would need this many instructions to hide the scalarization happening. 445 const int AmortizationCost = 20; 446 static const TypeConversionCostTblEntry 447 VectorSelectTbl[] = { 448 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 449 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 450 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 451 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 452 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 453 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 454 }; 455 456 EVT SelCondTy = TLI->getValueType(DL, CondTy); 457 EVT SelValTy = TLI->getValueType(DL, ValTy); 458 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 459 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 460 SelCondTy.getSimpleVT(), 461 SelValTy.getSimpleVT())) 462 return Entry->Cost; 463 } 464 } 465 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 466 } 467 468 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 469 unsigned Alignment, unsigned AddressSpace) { 470 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 471 472 if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 && 473 Src->getVectorElementType()->isIntegerTy(64)) { 474 // Unaligned stores are extremely inefficient. We don't split 475 // unaligned v2i64 stores because the negative impact that has shown in 476 // practice on inlined memcpy code. 477 // We make v2i64 stores expensive so that we will only vectorize if there 478 // are 6 other instructions getting vectorized. 479 int AmortizationCost = 6; 480 481 return LT.first * 2 * AmortizationCost; 482 } 483 484 if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) && 485 Src->getVectorNumElements() < 8) { 486 // We scalarize the loads/stores because there is not v.4b register and we 487 // have to promote the elements to v.4h. 488 unsigned NumVecElts = Src->getVectorNumElements(); 489 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; 490 // We generate 2 instructions per vector element. 491 return NumVectorizableInstsToAmortize * NumVecElts * 2; 492 } 493 494 return LT.first; 495 } 496 497 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 498 unsigned Factor, 499 ArrayRef<unsigned> Indices, 500 unsigned Alignment, 501 unsigned AddressSpace) { 502 assert(Factor >= 2 && "Invalid interleave factor"); 503 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 504 505 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) { 506 unsigned NumElts = VecTy->getVectorNumElements(); 507 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 508 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 509 510 // ldN/stN only support legal vector types of size 64 or 128 in bits. 511 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) 512 return Factor; 513 } 514 515 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 516 Alignment, AddressSpace); 517 } 518 519 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 520 int Cost = 0; 521 for (auto *I : Tys) { 522 if (!I->isVectorTy()) 523 continue; 524 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128) 525 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) + 526 getMemoryOpCost(Instruction::Load, I, 128, 0); 527 } 528 return Cost; 529 } 530 531 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 532 if (ST->isCortexA57() || ST->isKryo()) 533 return 4; 534 return 2; 535 } 536 537 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, 538 TTI::UnrollingPreferences &UP) { 539 // Enable partial unrolling and runtime unrolling. 540 BaseT::getUnrollingPreferences(L, UP); 541 542 // For inner loop, it is more likely to be a hot one, and the runtime check 543 // can be promoted out from LICM pass, so the overhead is less, let's try 544 // a larger threshold to unroll more loops. 545 if (L->getLoopDepth() > 1) 546 UP.PartialThreshold *= 2; 547 548 // Disable partial & runtime unrolling on -Os. 549 UP.PartialOptSizeThreshold = 0; 550 } 551 552 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 553 Type *ExpectedType) { 554 switch (Inst->getIntrinsicID()) { 555 default: 556 return nullptr; 557 case Intrinsic::aarch64_neon_st2: 558 case Intrinsic::aarch64_neon_st3: 559 case Intrinsic::aarch64_neon_st4: { 560 // Create a struct type 561 StructType *ST = dyn_cast<StructType>(ExpectedType); 562 if (!ST) 563 return nullptr; 564 unsigned NumElts = Inst->getNumArgOperands() - 1; 565 if (ST->getNumElements() != NumElts) 566 return nullptr; 567 for (unsigned i = 0, e = NumElts; i != e; ++i) { 568 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 569 return nullptr; 570 } 571 Value *Res = UndefValue::get(ExpectedType); 572 IRBuilder<> Builder(Inst); 573 for (unsigned i = 0, e = NumElts; i != e; ++i) { 574 Value *L = Inst->getArgOperand(i); 575 Res = Builder.CreateInsertValue(Res, L, i); 576 } 577 return Res; 578 } 579 case Intrinsic::aarch64_neon_ld2: 580 case Intrinsic::aarch64_neon_ld3: 581 case Intrinsic::aarch64_neon_ld4: 582 if (Inst->getType() == ExpectedType) 583 return Inst; 584 return nullptr; 585 } 586 } 587 588 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 589 MemIntrinsicInfo &Info) { 590 switch (Inst->getIntrinsicID()) { 591 default: 592 break; 593 case Intrinsic::aarch64_neon_ld2: 594 case Intrinsic::aarch64_neon_ld3: 595 case Intrinsic::aarch64_neon_ld4: 596 Info.ReadMem = true; 597 Info.WriteMem = false; 598 Info.IsSimple = true; 599 Info.NumMemRefs = 1; 600 Info.PtrVal = Inst->getArgOperand(0); 601 break; 602 case Intrinsic::aarch64_neon_st2: 603 case Intrinsic::aarch64_neon_st3: 604 case Intrinsic::aarch64_neon_st4: 605 Info.ReadMem = false; 606 Info.WriteMem = true; 607 Info.IsSimple = true; 608 Info.NumMemRefs = 1; 609 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); 610 break; 611 } 612 613 switch (Inst->getIntrinsicID()) { 614 default: 615 return false; 616 case Intrinsic::aarch64_neon_ld2: 617 case Intrinsic::aarch64_neon_st2: 618 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 619 break; 620 case Intrinsic::aarch64_neon_ld3: 621 case Intrinsic::aarch64_neon_st3: 622 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 623 break; 624 case Intrinsic::aarch64_neon_ld4: 625 case Intrinsic::aarch64_neon_st4: 626 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 627 break; 628 } 629 return true; 630 } 631 632 unsigned AArch64TTIImpl::getCacheLineSize() { 633 if (ST->isCyclone()) 634 return 64; 635 return BaseT::getCacheLineSize(); 636 } 637 638 unsigned AArch64TTIImpl::getPrefetchDistance() { 639 if (ST->isCyclone()) 640 return 280; 641 return BaseT::getPrefetchDistance(); 642 } 643 644 unsigned AArch64TTIImpl::getMinPrefetchStride() { 645 if (ST->isCyclone()) 646 // The HW prefetcher handles accesses with strides up to 2KB. 647 return 2048; 648 return BaseT::getMinPrefetchStride(); 649 } 650 651 unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() { 652 if (ST->isCyclone()) 653 // Be conservative for now and don't prefetch ahead too much since the loop 654 // may terminate early. 655 return 3; 656 return BaseT::getMaxPrefetchIterationsAhead(); 657 } 658