1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARMTargetTransformInfo.h" 11 #include "ARMSubtarget.h" 12 #include "MCTargetDesc/ARMAddressingModes.h" 13 #include "llvm/ADT/APInt.h" 14 #include "llvm/ADT/SmallVector.h" 15 #include "llvm/Analysis/LoopInfo.h" 16 #include "llvm/CodeGen/CostTable.h" 17 #include "llvm/CodeGen/ISDOpcodes.h" 18 #include "llvm/CodeGen/MachineValueType.h" 19 #include "llvm/CodeGen/ValueTypes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/MC/SubtargetFeature.h" 28 #include "llvm/Support/Casting.h" 29 #include "llvm/Target/TargetMachine.h" 30 #include <algorithm> 31 #include <cassert> 32 #include <cstdint> 33 #include <utility> 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "armtti" 38 39 bool ARMTTIImpl::areInlineCompatible(const Function *Caller, 40 const Function *Callee) const { 41 const TargetMachine &TM = getTLI()->getTargetMachine(); 42 const FeatureBitset &CallerBits = 43 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 44 const FeatureBitset &CalleeBits = 45 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 46 47 // To inline a callee, all features not in the whitelist must match exactly. 48 bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) == 49 (CalleeBits & ~InlineFeatureWhitelist); 50 // For features in the whitelist, the callee's features must be a subset of 51 // the callers'. 52 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) == 53 (CalleeBits & InlineFeatureWhitelist); 54 return MatchExact && MatchSubset; 55 } 56 57 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 58 assert(Ty->isIntegerTy()); 59 60 unsigned Bits = Ty->getPrimitiveSizeInBits(); 61 if (Bits == 0 || Imm.getActiveBits() >= 64) 62 return 4; 63 64 int64_t SImmVal = Imm.getSExtValue(); 65 uint64_t ZImmVal = Imm.getZExtValue(); 66 if (!ST->isThumb()) { 67 if ((SImmVal >= 0 && SImmVal < 65536) || 68 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 69 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 70 return 1; 71 return ST->hasV6T2Ops() ? 2 : 3; 72 } 73 if (ST->isThumb2()) { 74 if ((SImmVal >= 0 && SImmVal < 65536) || 75 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 76 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 77 return 1; 78 return ST->hasV6T2Ops() ? 2 : 3; 79 } 80 // Thumb1. 81 if (SImmVal >= 0 && SImmVal < 256) 82 return 1; 83 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 84 return 2; 85 // Load from constantpool. 86 return 3; 87 } 88 89 // Constants smaller than 256 fit in the immediate field of 90 // Thumb1 instructions so we return a zero cost and 1 otherwise. 91 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 92 const APInt &Imm, Type *Ty) { 93 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 94 return 0; 95 96 return 1; 97 } 98 99 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 100 Type *Ty) { 101 // Division by a constant can be turned into multiplication, but only if we 102 // know it's constant. So it's not so much that the immediate is cheap (it's 103 // not), but that the alternative is worse. 104 // FIXME: this is probably unneeded with GlobalISel. 105 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 106 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 107 Idx == 1) 108 return 0; 109 110 if (Opcode == Instruction::And) 111 // Conversion to BIC is free, and means we can use ~Imm instead. 112 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty)); 113 114 if (Opcode == Instruction::Add) 115 // Conversion to SUB is free, and means we can use -Imm instead. 116 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty)); 117 118 if (Opcode == Instruction::ICmp && Imm.isNegative() && 119 Ty->getIntegerBitWidth() == 32) { 120 int64_t NegImm = -Imm.getSExtValue(); 121 if (ST->isThumb2() && NegImm < 1<<12) 122 // icmp X, #-C -> cmn X, #C 123 return 0; 124 if (ST->isThumb() && NegImm < 1<<8) 125 // icmp X, #-C -> adds X, #C 126 return 0; 127 } 128 129 return getIntImmCost(Imm, Ty); 130 } 131 132 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 133 const Instruction *I) { 134 int ISD = TLI->InstructionOpcodeToISD(Opcode); 135 assert(ISD && "Invalid opcode"); 136 137 // Single to/from double precision conversions. 138 static const CostTblEntry NEONFltDblTbl[] = { 139 // Vector fptrunc/fpext conversions. 140 { ISD::FP_ROUND, MVT::v2f64, 2 }, 141 { ISD::FP_EXTEND, MVT::v2f32, 2 }, 142 { ISD::FP_EXTEND, MVT::v4f32, 4 } 143 }; 144 145 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || 146 ISD == ISD::FP_EXTEND)) { 147 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 148 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 149 return LT.first * Entry->Cost; 150 } 151 152 EVT SrcTy = TLI->getValueType(DL, Src); 153 EVT DstTy = TLI->getValueType(DL, Dst); 154 155 if (!SrcTy.isSimple() || !DstTy.isSimple()) 156 return BaseT::getCastInstrCost(Opcode, Dst, Src); 157 158 // Some arithmetic, load and store operations have specific instructions 159 // to cast up/down their types automatically at no extra cost. 160 // TODO: Get these tables to know at least what the related operations are. 161 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 162 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 163 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 164 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 165 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 166 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 167 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 168 169 // The number of vmovl instructions for the extension. 170 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 171 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 172 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 173 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 174 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 175 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 176 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 177 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 178 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 179 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 180 181 // Operations that we legalize using splitting. 182 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 183 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 184 185 // Vector float <-> i32 conversions. 186 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 187 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 188 189 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 190 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 191 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 192 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 193 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 194 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 195 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 196 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 197 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 198 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 199 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 200 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 201 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 202 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 203 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 204 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 205 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 206 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 207 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 208 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 209 210 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 211 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 212 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 213 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 214 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 215 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 216 217 // Vector double <-> i32 conversions. 218 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 219 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 220 221 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 222 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 223 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 224 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 225 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 226 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 227 228 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 229 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 230 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 231 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 232 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 233 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 234 }; 235 236 if (SrcTy.isVector() && ST->hasNEON()) { 237 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 238 DstTy.getSimpleVT(), 239 SrcTy.getSimpleVT())) 240 return Entry->Cost; 241 } 242 243 // Scalar float to integer conversions. 244 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 245 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 246 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 247 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 248 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 249 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 250 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 251 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 252 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 253 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 254 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 255 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 256 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 257 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 258 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 259 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 260 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 261 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 262 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 263 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 264 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 265 }; 266 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 267 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 268 DstTy.getSimpleVT(), 269 SrcTy.getSimpleVT())) 270 return Entry->Cost; 271 } 272 273 // Scalar integer to float conversions. 274 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 275 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 276 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 277 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 278 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 279 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 280 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 281 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 282 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 283 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 284 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 285 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 286 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 287 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 288 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 289 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 290 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 291 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 292 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 293 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 294 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 295 }; 296 297 if (SrcTy.isInteger() && ST->hasNEON()) { 298 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 299 ISD, DstTy.getSimpleVT(), 300 SrcTy.getSimpleVT())) 301 return Entry->Cost; 302 } 303 304 // Scalar integer conversion costs. 305 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 306 // i16 -> i64 requires two dependent operations. 307 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 308 309 // Truncates on i64 are assumed to be free. 310 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 311 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 312 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 313 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 314 }; 315 316 if (SrcTy.isInteger()) { 317 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 318 DstTy.getSimpleVT(), 319 SrcTy.getSimpleVT())) 320 return Entry->Cost; 321 } 322 323 return BaseT::getCastInstrCost(Opcode, Dst, Src); 324 } 325 326 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 327 unsigned Index) { 328 // Penalize inserting into an D-subregister. We end up with a three times 329 // lower estimated throughput on swift. 330 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 331 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 332 return 3; 333 334 if ((Opcode == Instruction::InsertElement || 335 Opcode == Instruction::ExtractElement)) { 336 // Cross-class copies are expensive on many microarchitectures, 337 // so assume they are expensive by default. 338 if (ValTy->getVectorElementType()->isIntegerTy()) 339 return 3; 340 341 // Even if it's not a cross class copy, this likely leads to mixing 342 // of NEON and VFP code and should be therefore penalized. 343 if (ValTy->isVectorTy() && 344 ValTy->getScalarSizeInBits() <= 32) 345 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 346 } 347 348 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 349 } 350 351 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 352 const Instruction *I) { 353 int ISD = TLI->InstructionOpcodeToISD(Opcode); 354 // On NEON a a vector select gets lowered to vbsl. 355 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 356 // Lowering of some vector selects is currently far from perfect. 357 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 358 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 359 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 360 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 361 }; 362 363 EVT SelCondTy = TLI->getValueType(DL, CondTy); 364 EVT SelValTy = TLI->getValueType(DL, ValTy); 365 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 366 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 367 SelCondTy.getSimpleVT(), 368 SelValTy.getSimpleVT())) 369 return Entry->Cost; 370 } 371 372 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 373 return LT.first; 374 } 375 376 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 377 } 378 379 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 380 const SCEV *Ptr) { 381 // Address computations in vectorized code with non-consecutive addresses will 382 // likely result in more instructions compared to scalar code where the 383 // computation can more often be merged into the index mode. The resulting 384 // extra micro-ops can significantly decrease throughput. 385 unsigned NumVectorInstToHideOverhead = 10; 386 int MaxMergeDistance = 64; 387 388 if (Ty->isVectorTy() && SE && 389 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 390 return NumVectorInstToHideOverhead; 391 392 // In many cases the address computation is not merged into the instruction 393 // addressing mode. 394 return 1; 395 } 396 397 int ARMTTIImpl::getFPOpCost(Type *Ty) { 398 // Use similar logic that's in ARMISelLowering: 399 // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access 400 // to VFP. 401 402 if (ST->hasVFP2() && !ST->isThumb1Only()) { 403 if (Ty->isFloatTy()) { 404 return TargetTransformInfo::TCC_Basic; 405 } 406 407 if (Ty->isDoubleTy()) { 408 return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive : 409 TargetTransformInfo::TCC_Basic; 410 } 411 } 412 413 return TargetTransformInfo::TCC_Expensive; 414 } 415 416 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 417 Type *SubTp) { 418 // We only handle costs of reverse and alternate shuffles for now. 419 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 420 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 421 422 if (Kind == TTI::SK_Reverse) { 423 static const CostTblEntry NEONShuffleTbl[] = { 424 // Reverse shuffle cost one instruction if we are shuffling within a 425 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 426 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 427 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 428 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 429 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 430 431 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 432 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 433 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 434 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 435 436 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 437 438 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, 439 LT.second)) 440 return LT.first * Entry->Cost; 441 442 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 443 } 444 if (Kind == TTI::SK_Alternate) { 445 static const CostTblEntry NEONAltShuffleTbl[] = { 446 // Alt shuffle cost table for ARM. Cost is the number of instructions 447 // required to create the shuffled vector. 448 449 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 450 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 451 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 452 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 453 454 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 455 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 456 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 457 458 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 459 460 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 461 462 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 463 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl, 464 ISD::VECTOR_SHUFFLE, LT.second)) 465 return LT.first * Entry->Cost; 466 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 467 } 468 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 469 } 470 471 int ARMTTIImpl::getArithmeticInstrCost( 472 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 473 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 474 TTI::OperandValueProperties Opd2PropInfo, 475 ArrayRef<const Value *> Args) { 476 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 477 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 478 479 const unsigned FunctionCallDivCost = 20; 480 const unsigned ReciprocalDivCost = 10; 481 static const CostTblEntry CostTbl[] = { 482 // Division. 483 // These costs are somewhat random. Choose a cost of 20 to indicate that 484 // vectorizing devision (added function call) is going to be very expensive. 485 // Double registers types. 486 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 487 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 488 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 489 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 490 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 491 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 492 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 493 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 494 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 495 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 496 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 497 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 498 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 499 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 500 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 501 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 502 // Quad register types. 503 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 504 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 505 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 506 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 507 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 508 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 509 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 510 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 511 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 512 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 513 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 514 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 515 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 516 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 517 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 518 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 519 // Multiplication. 520 }; 521 522 if (ST->hasNEON()) 523 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 524 return LT.first * Entry->Cost; 525 526 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 527 Opd1PropInfo, Opd2PropInfo); 528 529 // This is somewhat of a hack. The problem that we are facing is that SROA 530 // creates a sequence of shift, and, or instructions to construct values. 531 // These sequences are recognized by the ISel and have zero-cost. Not so for 532 // the vectorized code. Because we have support for v2i64 but not i64 those 533 // sequences look particularly beneficial to vectorize. 534 // To work around this we increase the cost of v2i64 operations to make them 535 // seem less beneficial. 536 if (LT.second == MVT::v2i64 && 537 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 538 Cost += 4; 539 540 return Cost; 541 } 542 543 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 544 unsigned AddressSpace, const Instruction *I) { 545 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 546 547 if (Src->isVectorTy() && Alignment != 16 && 548 Src->getVectorElementType()->isDoubleTy()) { 549 // Unaligned loads/stores are extremely inefficient. 550 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 551 return LT.first * 4; 552 } 553 return LT.first; 554 } 555 556 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 557 unsigned Factor, 558 ArrayRef<unsigned> Indices, 559 unsigned Alignment, 560 unsigned AddressSpace) { 561 assert(Factor >= 2 && "Invalid interleave factor"); 562 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 563 564 // vldN/vstN doesn't support vector types of i64/f64 element. 565 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 566 567 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) { 568 unsigned NumElts = VecTy->getVectorNumElements(); 569 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 570 571 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 572 // Accesses having vector types that are a multiple of 128 bits can be 573 // matched to more than one vldN/vstN instruction. 574 if (NumElts % Factor == 0 && 575 TLI->isLegalInterleavedAccessType(SubVecTy, DL)) 576 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); 577 } 578 579 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 580 Alignment, AddressSpace); 581 } 582 583 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 584 TTI::UnrollingPreferences &UP) { 585 // Only currently enable these preferences for M-Class cores. 586 if (!ST->isMClass()) 587 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP); 588 589 // Disable loop unrolling for Oz and Os. 590 UP.OptSizeThreshold = 0; 591 UP.PartialOptSizeThreshold = 0; 592 if (L->getHeader()->getParent()->optForSize()) 593 return; 594 595 // Only enable on Thumb-2 targets. 596 if (!ST->isThumb2()) 597 return; 598 599 SmallVector<BasicBlock*, 4> ExitingBlocks; 600 L->getExitingBlocks(ExitingBlocks); 601 DEBUG(dbgs() << "Loop has:\n" 602 << "Blocks: " << L->getNumBlocks() << "\n" 603 << "Exit blocks: " << ExitingBlocks.size() << "\n"); 604 605 // Only allow another exit other than the latch. This acts as an early exit 606 // as it mirrors the profitability calculation of the runtime unroller. 607 if (ExitingBlocks.size() > 2) 608 return; 609 610 // Limit the CFG of the loop body for targets with a branch predictor. 611 // Allowing 4 blocks permits if-then-else diamonds in the body. 612 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4) 613 return; 614 615 // Scan the loop: don't unroll loops with calls as this could prevent 616 // inlining. 617 unsigned Cost = 0; 618 for (auto *BB : L->getBlocks()) { 619 for (auto &I : *BB) { 620 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 621 ImmutableCallSite CS(&I); 622 if (const Function *F = CS.getCalledFunction()) { 623 if (!isLoweredToCall(F)) 624 continue; 625 } 626 return; 627 } 628 SmallVector<const Value*, 4> Operands(I.value_op_begin(), 629 I.value_op_end()); 630 Cost += getUserCost(&I, Operands); 631 } 632 } 633 634 DEBUG(dbgs() << "Cost of loop: " << Cost << "\n"); 635 636 UP.Partial = true; 637 UP.Runtime = true; 638 UP.UnrollRemainder = true; 639 UP.DefaultUnrollRuntimeCount = 4; 640 641 // Force unrolling small loops can be very useful because of the branch 642 // taken cost of the backedge. 643 if (Cost < 12) 644 UP.Force = true; 645 } 646