1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARMTargetTransformInfo.h" 11 #include "llvm/Support/Debug.h" 12 #include "llvm/Target/CostTable.h" 13 #include "llvm/Target/TargetLowering.h" 14 using namespace llvm; 15 16 #define DEBUG_TYPE "armtti" 17 18 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 19 assert(Ty->isIntegerTy()); 20 21 unsigned Bits = Ty->getPrimitiveSizeInBits(); 22 if (Bits == 0 || Imm.getActiveBits() >= 64) 23 return 4; 24 25 int64_t SImmVal = Imm.getSExtValue(); 26 uint64_t ZImmVal = Imm.getZExtValue(); 27 if (!ST->isThumb()) { 28 if ((SImmVal >= 0 && SImmVal < 65536) || 29 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 30 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 31 return 1; 32 return ST->hasV6T2Ops() ? 2 : 3; 33 } 34 if (ST->isThumb2()) { 35 if ((SImmVal >= 0 && SImmVal < 65536) || 36 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 37 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 38 return 1; 39 return ST->hasV6T2Ops() ? 2 : 3; 40 } 41 // Thumb1. 42 if (SImmVal >= 0 && SImmVal < 256) 43 return 1; 44 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 45 return 2; 46 // Load from constantpool. 47 return 3; 48 } 49 50 51 // Constants smaller than 256 fit in the immediate field of 52 // Thumb1 instructions so we return a zero cost and 1 otherwise. 53 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 54 const APInt &Imm, Type *Ty) { 55 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256) 56 return 0; 57 58 return 1; 59 } 60 61 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 62 Type *Ty) { 63 // Division by a constant can be turned into multiplication, but only if we 64 // know it's constant. So it's not so much that the immediate is cheap (it's 65 // not), but that the alternative is worse. 66 // FIXME: this is probably unneeded with GlobalISel. 67 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 68 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 69 Idx == 1) 70 return 0; 71 72 if (Opcode == Instruction::And) 73 // Conversion to BIC is free, and means we can use ~Imm instead. 74 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty)); 75 76 if (Opcode == Instruction::Add) 77 // Conversion to SUB is free, and means we can use -Imm instead. 78 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty)); 79 80 if (Opcode == Instruction::ICmp && Imm.isNegative() && 81 Ty->getIntegerBitWidth() == 32) { 82 int64_t NegImm = -Imm.getSExtValue(); 83 if (ST->isThumb2() && NegImm < 1<<12) 84 // icmp X, #-C -> cmn X, #C 85 return 0; 86 if (ST->isThumb() && NegImm < 1<<8) 87 // icmp X, #-C -> adds X, #C 88 return 0; 89 } 90 91 return getIntImmCost(Imm, Ty); 92 } 93 94 95 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 96 int ISD = TLI->InstructionOpcodeToISD(Opcode); 97 assert(ISD && "Invalid opcode"); 98 99 // Single to/from double precision conversions. 100 static const CostTblEntry NEONFltDblTbl[] = { 101 // Vector fptrunc/fpext conversions. 102 { ISD::FP_ROUND, MVT::v2f64, 2 }, 103 { ISD::FP_EXTEND, MVT::v2f32, 2 }, 104 { ISD::FP_EXTEND, MVT::v4f32, 4 } 105 }; 106 107 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || 108 ISD == ISD::FP_EXTEND)) { 109 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 110 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 111 return LT.first * Entry->Cost; 112 } 113 114 EVT SrcTy = TLI->getValueType(DL, Src); 115 EVT DstTy = TLI->getValueType(DL, Dst); 116 117 if (!SrcTy.isSimple() || !DstTy.isSimple()) 118 return BaseT::getCastInstrCost(Opcode, Dst, Src); 119 120 // Some arithmetic, load and store operations have specific instructions 121 // to cast up/down their types automatically at no extra cost. 122 // TODO: Get these tables to know at least what the related operations are. 123 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 124 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 125 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 126 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 127 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 128 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 129 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 130 131 // The number of vmovl instructions for the extension. 132 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 133 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 134 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 135 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 136 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 137 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 138 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 139 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 140 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 141 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 142 143 // Operations that we legalize using splitting. 144 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 145 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 146 147 // Vector float <-> i32 conversions. 148 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 149 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 150 151 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 152 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 153 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 154 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 155 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 156 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 157 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 158 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 159 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 160 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 161 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 162 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 163 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 164 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 165 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 166 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 167 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 168 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 169 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 170 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 171 172 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 173 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 174 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 175 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 176 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 177 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 178 179 // Vector double <-> i32 conversions. 180 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 181 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 182 183 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 184 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 185 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 186 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 187 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 188 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 189 190 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 191 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 192 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 193 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 194 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 195 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 196 }; 197 198 if (SrcTy.isVector() && ST->hasNEON()) { 199 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 200 DstTy.getSimpleVT(), 201 SrcTy.getSimpleVT())) 202 return Entry->Cost; 203 } 204 205 // Scalar float to integer conversions. 206 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 207 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 208 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 209 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 210 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 211 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 212 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 213 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 214 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 215 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 216 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 217 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 218 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 219 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 220 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 221 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 222 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 223 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 224 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 225 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 226 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 227 }; 228 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 229 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 230 DstTy.getSimpleVT(), 231 SrcTy.getSimpleVT())) 232 return Entry->Cost; 233 } 234 235 // Scalar integer to float conversions. 236 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 237 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 238 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 239 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 240 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 241 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 242 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 243 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 244 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 245 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 246 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 247 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 248 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 249 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 250 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 251 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 252 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 253 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 254 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 255 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 256 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 257 }; 258 259 if (SrcTy.isInteger() && ST->hasNEON()) { 260 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 261 ISD, DstTy.getSimpleVT(), 262 SrcTy.getSimpleVT())) 263 return Entry->Cost; 264 } 265 266 // Scalar integer conversion costs. 267 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 268 // i16 -> i64 requires two dependent operations. 269 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 270 271 // Truncates on i64 are assumed to be free. 272 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 273 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 274 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 275 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 276 }; 277 278 if (SrcTy.isInteger()) { 279 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 280 DstTy.getSimpleVT(), 281 SrcTy.getSimpleVT())) 282 return Entry->Cost; 283 } 284 285 return BaseT::getCastInstrCost(Opcode, Dst, Src); 286 } 287 288 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 289 unsigned Index) { 290 // Penalize inserting into an D-subregister. We end up with a three times 291 // lower estimated throughput on swift. 292 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement && 293 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32) 294 return 3; 295 296 if ((Opcode == Instruction::InsertElement || 297 Opcode == Instruction::ExtractElement)) { 298 // Cross-class copies are expensive on many microarchitectures, 299 // so assume they are expensive by default. 300 if (ValTy->getVectorElementType()->isIntegerTy()) 301 return 3; 302 303 // Even if it's not a cross class copy, this likely leads to mixing 304 // of NEON and VFP code and should be therefore penalized. 305 if (ValTy->isVectorTy() && 306 ValTy->getScalarSizeInBits() <= 32) 307 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 308 } 309 310 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 311 } 312 313 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 314 315 int ISD = TLI->InstructionOpcodeToISD(Opcode); 316 // On NEON a a vector select gets lowered to vbsl. 317 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 318 // Lowering of some vector selects is currently far from perfect. 319 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 320 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 321 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 322 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 323 }; 324 325 EVT SelCondTy = TLI->getValueType(DL, CondTy); 326 EVT SelValTy = TLI->getValueType(DL, ValTy); 327 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 328 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 329 SelCondTy.getSimpleVT(), 330 SelValTy.getSimpleVT())) 331 return Entry->Cost; 332 } 333 334 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 335 return LT.first; 336 } 337 338 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 339 } 340 341 int ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 342 // Address computations in vectorized code with non-consecutive addresses will 343 // likely result in more instructions compared to scalar code where the 344 // computation can more often be merged into the index mode. The resulting 345 // extra micro-ops can significantly decrease throughput. 346 unsigned NumVectorInstToHideOverhead = 10; 347 348 if (Ty->isVectorTy() && IsComplex) 349 return NumVectorInstToHideOverhead; 350 351 // In many cases the address computation is not merged into the instruction 352 // addressing mode. 353 return 1; 354 } 355 356 int ARMTTIImpl::getFPOpCost(Type *Ty) { 357 // Use similar logic that's in ARMISelLowering: 358 // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access 359 // to VFP. 360 361 if (ST->hasVFP2() && !ST->isThumb1Only()) { 362 if (Ty->isFloatTy()) { 363 return TargetTransformInfo::TCC_Basic; 364 } 365 366 if (Ty->isDoubleTy()) { 367 return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive : 368 TargetTransformInfo::TCC_Basic; 369 } 370 } 371 372 return TargetTransformInfo::TCC_Expensive; 373 } 374 375 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 376 Type *SubTp) { 377 // We only handle costs of reverse and alternate shuffles for now. 378 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 379 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 380 381 if (Kind == TTI::SK_Reverse) { 382 static const CostTblEntry NEONShuffleTbl[] = { 383 // Reverse shuffle cost one instruction if we are shuffling within a 384 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 385 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 386 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 387 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 388 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 389 390 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 391 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 392 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 393 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 394 395 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 396 397 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, 398 LT.second)) 399 return LT.first * Entry->Cost; 400 401 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 402 } 403 if (Kind == TTI::SK_Alternate) { 404 static const CostTblEntry NEONAltShuffleTbl[] = { 405 // Alt shuffle cost table for ARM. Cost is the number of instructions 406 // required to create the shuffled vector. 407 408 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 409 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 410 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 411 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 412 413 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 414 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 415 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 416 417 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 418 419 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 420 421 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 422 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl, 423 ISD::VECTOR_SHUFFLE, LT.second)) 424 return LT.first * Entry->Cost; 425 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 426 } 427 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 428 } 429 430 int ARMTTIImpl::getArithmeticInstrCost( 431 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 432 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 433 TTI::OperandValueProperties Opd2PropInfo) { 434 435 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 436 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 437 438 const unsigned FunctionCallDivCost = 20; 439 const unsigned ReciprocalDivCost = 10; 440 static const CostTblEntry CostTbl[] = { 441 // Division. 442 // These costs are somewhat random. Choose a cost of 20 to indicate that 443 // vectorizing devision (added function call) is going to be very expensive. 444 // Double registers types. 445 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 446 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 447 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 448 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 449 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 450 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 451 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 452 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 453 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 454 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 455 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 456 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 457 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 458 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 459 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 460 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 461 // Quad register types. 462 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 463 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 464 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 465 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 466 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 467 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 468 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 469 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 470 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 471 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 472 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 473 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 474 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 475 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 476 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 477 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 478 // Multiplication. 479 }; 480 481 if (ST->hasNEON()) 482 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 483 return LT.first * Entry->Cost; 484 485 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 486 Opd1PropInfo, Opd2PropInfo); 487 488 // This is somewhat of a hack. The problem that we are facing is that SROA 489 // creates a sequence of shift, and, or instructions to construct values. 490 // These sequences are recognized by the ISel and have zero-cost. Not so for 491 // the vectorized code. Because we have support for v2i64 but not i64 those 492 // sequences look particularly beneficial to vectorize. 493 // To work around this we increase the cost of v2i64 operations to make them 494 // seem less beneficial. 495 if (LT.second == MVT::v2i64 && 496 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 497 Cost += 4; 498 499 return Cost; 500 } 501 502 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 503 unsigned AddressSpace) { 504 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 505 506 if (Src->isVectorTy() && Alignment != 16 && 507 Src->getVectorElementType()->isDoubleTy()) { 508 // Unaligned loads/stores are extremely inefficient. 509 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 510 return LT.first * 4; 511 } 512 return LT.first; 513 } 514 515 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 516 unsigned Factor, 517 ArrayRef<unsigned> Indices, 518 unsigned Alignment, 519 unsigned AddressSpace) { 520 assert(Factor >= 2 && "Invalid interleave factor"); 521 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 522 523 // vldN/vstN doesn't support vector types of i64/f64 element. 524 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 525 526 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) { 527 unsigned NumElts = VecTy->getVectorNumElements(); 528 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 529 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 530 531 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 532 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) 533 return Factor; 534 } 535 536 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 537 Alignment, AddressSpace); 538 } 539