1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARMTargetTransformInfo.h" 11 #include "llvm/Support/Debug.h" 12 #include "llvm/Target/CostTable.h" 13 #include "llvm/Target/TargetLowering.h" 14 using namespace llvm; 15 16 #define DEBUG_TYPE "armtti" 17 18 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 19 assert(Ty->isIntegerTy()); 20 21 unsigned Bits = Ty->getPrimitiveSizeInBits(); 22 if (Bits == 0 || Bits > 64) 23 return 4; 24 25 int64_t SImmVal = Imm.getSExtValue(); 26 uint64_t ZImmVal = Imm.getZExtValue(); 27 if (!ST->isThumb()) { 28 if ((SImmVal >= 0 && SImmVal < 65536) || 29 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 30 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 31 return 1; 32 return ST->hasV6T2Ops() ? 2 : 3; 33 } 34 if (ST->isThumb2()) { 35 if ((SImmVal >= 0 && SImmVal < 65536) || 36 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 37 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 38 return 1; 39 return ST->hasV6T2Ops() ? 2 : 3; 40 } 41 // Thumb1. 42 if (SImmVal >= 0 && SImmVal < 256) 43 return 1; 44 if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 45 return 2; 46 // Load from constantpool. 47 return 3; 48 } 49 50 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 51 Type *Ty) { 52 // Division by a constant can be turned into multiplication, but only if we 53 // know it's constant. So it's not so much that the immediate is cheap (it's 54 // not), but that the alternative is worse. 55 // FIXME: this is probably unneeded with GlobalISel. 56 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv || 57 Opcode == Instruction::SRem || Opcode == Instruction::URem) && 58 Idx == 1) 59 return 0; 60 61 return getIntImmCost(Imm, Ty); 62 } 63 64 65 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 66 int ISD = TLI->InstructionOpcodeToISD(Opcode); 67 assert(ISD && "Invalid opcode"); 68 69 // Single to/from double precision conversions. 70 static const CostTblEntry NEONFltDblTbl[] = { 71 // Vector fptrunc/fpext conversions. 72 { ISD::FP_ROUND, MVT::v2f64, 2 }, 73 { ISD::FP_EXTEND, MVT::v2f32, 2 }, 74 { ISD::FP_EXTEND, MVT::v4f32, 4 } 75 }; 76 77 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || 78 ISD == ISD::FP_EXTEND)) { 79 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 80 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 81 return LT.first * Entry->Cost; 82 } 83 84 EVT SrcTy = TLI->getValueType(DL, Src); 85 EVT DstTy = TLI->getValueType(DL, Dst); 86 87 if (!SrcTy.isSimple() || !DstTy.isSimple()) 88 return BaseT::getCastInstrCost(Opcode, Dst, Src); 89 90 // Some arithmetic, load and store operations have specific instructions 91 // to cast up/down their types automatically at no extra cost. 92 // TODO: Get these tables to know at least what the related operations are. 93 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 94 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 95 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 96 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 97 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 98 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 99 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 100 101 // The number of vmovl instructions for the extension. 102 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 103 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 104 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 105 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 106 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 107 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 108 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 109 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 110 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 111 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 112 113 // Operations that we legalize using splitting. 114 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 115 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 116 117 // Vector float <-> i32 conversions. 118 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 119 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 120 121 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 122 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 123 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 124 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 125 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 126 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 127 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 128 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 129 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 130 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 131 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 132 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 133 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 134 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 135 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 136 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 137 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 138 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 139 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 140 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 141 142 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 143 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 144 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 145 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 146 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 147 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 148 149 // Vector double <-> i32 conversions. 150 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 151 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 152 153 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 154 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 155 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 156 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 157 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 158 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 159 160 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 161 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 162 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 163 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 164 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 165 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 166 }; 167 168 if (SrcTy.isVector() && ST->hasNEON()) { 169 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 170 DstTy.getSimpleVT(), 171 SrcTy.getSimpleVT())) 172 return Entry->Cost; 173 } 174 175 // Scalar float to integer conversions. 176 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 177 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 178 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 179 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 180 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 181 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 182 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 183 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 184 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 185 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 186 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 187 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 188 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 189 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 190 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 191 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 192 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 193 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 194 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 195 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 196 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 197 }; 198 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 199 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 200 DstTy.getSimpleVT(), 201 SrcTy.getSimpleVT())) 202 return Entry->Cost; 203 } 204 205 // Scalar integer to float conversions. 206 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 207 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 208 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 209 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 210 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 211 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 212 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 213 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 214 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 215 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 216 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 217 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 218 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 219 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 220 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 221 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 222 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 223 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 224 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 225 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 226 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 227 }; 228 229 if (SrcTy.isInteger() && ST->hasNEON()) { 230 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 231 ISD, DstTy.getSimpleVT(), 232 SrcTy.getSimpleVT())) 233 return Entry->Cost; 234 } 235 236 // Scalar integer conversion costs. 237 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 238 // i16 -> i64 requires two dependent operations. 239 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 240 241 // Truncates on i64 are assumed to be free. 242 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 243 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 244 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 245 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 246 }; 247 248 if (SrcTy.isInteger()) { 249 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 250 DstTy.getSimpleVT(), 251 SrcTy.getSimpleVT())) 252 return Entry->Cost; 253 } 254 255 return BaseT::getCastInstrCost(Opcode, Dst, Src); 256 } 257 258 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 259 unsigned Index) { 260 // Penalize inserting into an D-subregister. We end up with a three times 261 // lower estimated throughput on swift. 262 if (ST->isSwift() && 263 Opcode == Instruction::InsertElement && 264 ValTy->isVectorTy() && 265 ValTy->getScalarSizeInBits() <= 32) 266 return 3; 267 268 if ((Opcode == Instruction::InsertElement || 269 Opcode == Instruction::ExtractElement)) { 270 // Cross-class copies are expensive on many microarchitectures, 271 // so assume they are expensive by default. 272 if (ValTy->getVectorElementType()->isIntegerTy()) 273 return 3; 274 275 // Even if it's not a cross class copy, this likely leads to mixing 276 // of NEON and VFP code and should be therefore penalized. 277 if (ValTy->isVectorTy() && 278 ValTy->getScalarSizeInBits() <= 32) 279 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 280 } 281 282 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 283 } 284 285 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 286 287 int ISD = TLI->InstructionOpcodeToISD(Opcode); 288 // On NEON a a vector select gets lowered to vbsl. 289 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 290 // Lowering of some vector selects is currently far from perfect. 291 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 292 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 293 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 294 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 295 }; 296 297 EVT SelCondTy = TLI->getValueType(DL, CondTy); 298 EVT SelValTy = TLI->getValueType(DL, ValTy); 299 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 300 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 301 SelCondTy.getSimpleVT(), 302 SelValTy.getSimpleVT())) 303 return Entry->Cost; 304 } 305 306 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 307 return LT.first; 308 } 309 310 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 311 } 312 313 int ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 314 // Address computations in vectorized code with non-consecutive addresses will 315 // likely result in more instructions compared to scalar code where the 316 // computation can more often be merged into the index mode. The resulting 317 // extra micro-ops can significantly decrease throughput. 318 unsigned NumVectorInstToHideOverhead = 10; 319 320 if (Ty->isVectorTy() && IsComplex) 321 return NumVectorInstToHideOverhead; 322 323 // In many cases the address computation is not merged into the instruction 324 // addressing mode. 325 return 1; 326 } 327 328 int ARMTTIImpl::getFPOpCost(Type *Ty) { 329 // Use similar logic that's in ARMISelLowering: 330 // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access 331 // to VFP. 332 333 if (ST->hasVFP2() && !ST->isThumb1Only()) { 334 if (Ty->isFloatTy()) { 335 return TargetTransformInfo::TCC_Basic; 336 } 337 338 if (Ty->isDoubleTy()) { 339 return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive : 340 TargetTransformInfo::TCC_Basic; 341 } 342 } 343 344 return TargetTransformInfo::TCC_Expensive; 345 } 346 347 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 348 Type *SubTp) { 349 // We only handle costs of reverse and alternate shuffles for now. 350 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 351 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 352 353 if (Kind == TTI::SK_Reverse) { 354 static const CostTblEntry NEONShuffleTbl[] = { 355 // Reverse shuffle cost one instruction if we are shuffling within a 356 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 357 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 358 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 359 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 360 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 361 362 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 363 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 364 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 365 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 366 367 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 368 369 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, 370 LT.second)) 371 return LT.first * Entry->Cost; 372 373 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 374 } 375 if (Kind == TTI::SK_Alternate) { 376 static const CostTblEntry NEONAltShuffleTbl[] = { 377 // Alt shuffle cost table for ARM. Cost is the number of instructions 378 // required to create the shuffled vector. 379 380 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 381 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 382 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 383 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 384 385 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 386 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 387 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 388 389 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 390 391 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 392 393 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 394 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl, 395 ISD::VECTOR_SHUFFLE, LT.second)) 396 return LT.first * Entry->Cost; 397 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 398 } 399 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 400 } 401 402 int ARMTTIImpl::getArithmeticInstrCost( 403 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 404 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 405 TTI::OperandValueProperties Opd2PropInfo) { 406 407 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 408 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 409 410 const unsigned FunctionCallDivCost = 20; 411 const unsigned ReciprocalDivCost = 10; 412 static const CostTblEntry CostTbl[] = { 413 // Division. 414 // These costs are somewhat random. Choose a cost of 20 to indicate that 415 // vectorizing devision (added function call) is going to be very expensive. 416 // Double registers types. 417 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 418 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 419 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 420 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 421 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 422 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 423 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 424 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 425 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 426 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 427 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 428 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 429 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 430 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 431 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 432 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 433 // Quad register types. 434 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 435 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 436 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 437 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 438 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 439 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 440 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 441 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 442 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 443 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 444 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 445 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 446 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 447 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 448 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 449 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 450 // Multiplication. 451 }; 452 453 if (ST->hasNEON()) 454 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 455 return LT.first * Entry->Cost; 456 457 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 458 Opd1PropInfo, Opd2PropInfo); 459 460 // This is somewhat of a hack. The problem that we are facing is that SROA 461 // creates a sequence of shift, and, or instructions to construct values. 462 // These sequences are recognized by the ISel and have zero-cost. Not so for 463 // the vectorized code. Because we have support for v2i64 but not i64 those 464 // sequences look particularly beneficial to vectorize. 465 // To work around this we increase the cost of v2i64 operations to make them 466 // seem less beneficial. 467 if (LT.second == MVT::v2i64 && 468 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 469 Cost += 4; 470 471 return Cost; 472 } 473 474 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 475 unsigned AddressSpace) { 476 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 477 478 if (Src->isVectorTy() && Alignment != 16 && 479 Src->getVectorElementType()->isDoubleTy()) { 480 // Unaligned loads/stores are extremely inefficient. 481 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 482 return LT.first * 4; 483 } 484 return LT.first; 485 } 486 487 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 488 unsigned Factor, 489 ArrayRef<unsigned> Indices, 490 unsigned Alignment, 491 unsigned AddressSpace) { 492 assert(Factor >= 2 && "Invalid interleave factor"); 493 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 494 495 // vldN/vstN doesn't support vector types of i64/f64 element. 496 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 497 498 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) { 499 unsigned NumElts = VecTy->getVectorNumElements(); 500 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 501 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 502 503 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 504 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) 505 return Factor; 506 } 507 508 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 509 Alignment, AddressSpace); 510 } 511