1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "AArch64PerfectShuffle.h" 12 #include "MCTargetDesc/AArch64AddressingModes.h" 13 #include "llvm/Analysis/IVDescriptors.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/Analysis/TargetTransformInfo.h" 16 #include "llvm/CodeGen/BasicTTIImpl.h" 17 #include "llvm/CodeGen/CostTable.h" 18 #include "llvm/CodeGen/TargetLowering.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/IR/IntrinsicsAArch64.h" 22 #include "llvm/IR/PatternMatch.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Transforms/InstCombine/InstCombiner.h" 25 #include <algorithm> 26 using namespace llvm; 27 using namespace llvm::PatternMatch; 28 29 #define DEBUG_TYPE "aarch64tti" 30 31 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 32 cl::init(true), cl::Hidden); 33 34 static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10), 35 cl::Hidden); 36 37 static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead", 38 cl::init(10), cl::Hidden); 39 40 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 41 const Function *Callee) const { 42 const TargetMachine &TM = getTLI()->getTargetMachine(); 43 44 const FeatureBitset &CallerBits = 45 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 46 const FeatureBitset &CalleeBits = 47 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 48 49 // Inline a callee if its target-features are a subset of the callers 50 // target-features. 51 return (CallerBits & CalleeBits) == CalleeBits; 52 } 53 54 /// Calculate the cost of materializing a 64-bit value. This helper 55 /// method might only calculate a fraction of a larger immediate. Therefore it 56 /// is valid to return a cost of ZERO. 57 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { 58 // Check if the immediate can be encoded within an instruction. 59 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 60 return 0; 61 62 if (Val < 0) 63 Val = ~Val; 64 65 // Calculate how many moves we will need to materialize this constant. 66 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 67 AArch64_IMM::expandMOVImm(Val, 64, Insn); 68 return Insn.size(); 69 } 70 71 /// Calculate the cost of materializing the given constant. 72 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 73 TTI::TargetCostKind CostKind) { 74 assert(Ty->isIntegerTy()); 75 76 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 77 if (BitSize == 0) 78 return ~0U; 79 80 // Sign-extend all constants to a multiple of 64-bit. 81 APInt ImmVal = Imm; 82 if (BitSize & 0x3f) 83 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 84 85 // Split the constant into 64-bit chunks and calculate the cost for each 86 // chunk. 87 InstructionCost Cost = 0; 88 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 89 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 90 int64_t Val = Tmp.getSExtValue(); 91 Cost += getIntImmCost(Val); 92 } 93 // We need at least one instruction to materialze the constant. 94 return std::max<InstructionCost>(1, Cost); 95 } 96 97 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 98 const APInt &Imm, Type *Ty, 99 TTI::TargetCostKind CostKind, 100 Instruction *Inst) { 101 assert(Ty->isIntegerTy()); 102 103 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 104 // There is no cost model for constants with a bit size of 0. Return TCC_Free 105 // here, so that constant hoisting will ignore this constant. 106 if (BitSize == 0) 107 return TTI::TCC_Free; 108 109 unsigned ImmIdx = ~0U; 110 switch (Opcode) { 111 default: 112 return TTI::TCC_Free; 113 case Instruction::GetElementPtr: 114 // Always hoist the base address of a GetElementPtr. 115 if (Idx == 0) 116 return 2 * TTI::TCC_Basic; 117 return TTI::TCC_Free; 118 case Instruction::Store: 119 ImmIdx = 0; 120 break; 121 case Instruction::Add: 122 case Instruction::Sub: 123 case Instruction::Mul: 124 case Instruction::UDiv: 125 case Instruction::SDiv: 126 case Instruction::URem: 127 case Instruction::SRem: 128 case Instruction::And: 129 case Instruction::Or: 130 case Instruction::Xor: 131 case Instruction::ICmp: 132 ImmIdx = 1; 133 break; 134 // Always return TCC_Free for the shift value of a shift instruction. 135 case Instruction::Shl: 136 case Instruction::LShr: 137 case Instruction::AShr: 138 if (Idx == 1) 139 return TTI::TCC_Free; 140 break; 141 case Instruction::Trunc: 142 case Instruction::ZExt: 143 case Instruction::SExt: 144 case Instruction::IntToPtr: 145 case Instruction::PtrToInt: 146 case Instruction::BitCast: 147 case Instruction::PHI: 148 case Instruction::Call: 149 case Instruction::Select: 150 case Instruction::Ret: 151 case Instruction::Load: 152 break; 153 } 154 155 if (Idx == ImmIdx) { 156 int NumConstants = (BitSize + 63) / 64; 157 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 158 return (Cost <= NumConstants * TTI::TCC_Basic) 159 ? static_cast<int>(TTI::TCC_Free) 160 : Cost; 161 } 162 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 163 } 164 165 InstructionCost 166 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 167 const APInt &Imm, Type *Ty, 168 TTI::TargetCostKind CostKind) { 169 assert(Ty->isIntegerTy()); 170 171 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 172 // There is no cost model for constants with a bit size of 0. Return TCC_Free 173 // here, so that constant hoisting will ignore this constant. 174 if (BitSize == 0) 175 return TTI::TCC_Free; 176 177 // Most (all?) AArch64 intrinsics do not support folding immediates into the 178 // selected instruction, so we compute the materialization cost for the 179 // immediate directly. 180 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 181 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 182 183 switch (IID) { 184 default: 185 return TTI::TCC_Free; 186 case Intrinsic::sadd_with_overflow: 187 case Intrinsic::uadd_with_overflow: 188 case Intrinsic::ssub_with_overflow: 189 case Intrinsic::usub_with_overflow: 190 case Intrinsic::smul_with_overflow: 191 case Intrinsic::umul_with_overflow: 192 if (Idx == 1) { 193 int NumConstants = (BitSize + 63) / 64; 194 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 195 return (Cost <= NumConstants * TTI::TCC_Basic) 196 ? static_cast<int>(TTI::TCC_Free) 197 : Cost; 198 } 199 break; 200 case Intrinsic::experimental_stackmap: 201 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 202 return TTI::TCC_Free; 203 break; 204 case Intrinsic::experimental_patchpoint_void: 205 case Intrinsic::experimental_patchpoint_i64: 206 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 207 return TTI::TCC_Free; 208 break; 209 case Intrinsic::experimental_gc_statepoint: 210 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 211 return TTI::TCC_Free; 212 break; 213 } 214 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 215 } 216 217 TargetTransformInfo::PopcntSupportKind 218 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 219 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 220 if (TyWidth == 32 || TyWidth == 64) 221 return TTI::PSK_FastHardware; 222 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 223 return TTI::PSK_Software; 224 } 225 226 InstructionCost 227 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 228 TTI::TargetCostKind CostKind) { 229 auto *RetTy = ICA.getReturnType(); 230 switch (ICA.getID()) { 231 case Intrinsic::umin: 232 case Intrinsic::umax: 233 case Intrinsic::smin: 234 case Intrinsic::smax: { 235 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 236 MVT::v8i16, MVT::v2i32, MVT::v4i32}; 237 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 238 // v2i64 types get converted to cmp+bif hence the cost of 2 239 if (LT.second == MVT::v2i64) 240 return LT.first * 2; 241 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 242 return LT.first; 243 break; 244 } 245 case Intrinsic::sadd_sat: 246 case Intrinsic::ssub_sat: 247 case Intrinsic::uadd_sat: 248 case Intrinsic::usub_sat: { 249 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 250 MVT::v8i16, MVT::v2i32, MVT::v4i32, 251 MVT::v2i64}; 252 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 253 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we 254 // need to extend the type, as it uses shr(qadd(shl, shl)). 255 unsigned Instrs = 256 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; 257 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 258 return LT.first * Instrs; 259 break; 260 } 261 case Intrinsic::abs: { 262 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 263 MVT::v8i16, MVT::v2i32, MVT::v4i32, 264 MVT::v2i64}; 265 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 266 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) 267 return LT.first; 268 break; 269 } 270 case Intrinsic::experimental_stepvector: { 271 InstructionCost Cost = 1; // Cost of the `index' instruction 272 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 273 // Legalisation of illegal vectors involves an `index' instruction plus 274 // (LT.first - 1) vector adds. 275 if (LT.first > 1) { 276 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); 277 InstructionCost AddCost = 278 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); 279 Cost += AddCost * (LT.first - 1); 280 } 281 return Cost; 282 } 283 case Intrinsic::bitreverse: { 284 static const CostTblEntry BitreverseTbl[] = { 285 {Intrinsic::bitreverse, MVT::i32, 1}, 286 {Intrinsic::bitreverse, MVT::i64, 1}, 287 {Intrinsic::bitreverse, MVT::v8i8, 1}, 288 {Intrinsic::bitreverse, MVT::v16i8, 1}, 289 {Intrinsic::bitreverse, MVT::v4i16, 2}, 290 {Intrinsic::bitreverse, MVT::v8i16, 2}, 291 {Intrinsic::bitreverse, MVT::v2i32, 2}, 292 {Intrinsic::bitreverse, MVT::v4i32, 2}, 293 {Intrinsic::bitreverse, MVT::v1i64, 2}, 294 {Intrinsic::bitreverse, MVT::v2i64, 2}, 295 }; 296 const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy); 297 const auto *Entry = 298 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second); 299 if (Entry) { 300 // Cost Model is using the legal type(i32) that i8 and i16 will be 301 // converted to +1 so that we match the actual lowering cost 302 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 || 303 TLI->getValueType(DL, RetTy, true) == MVT::i16) 304 return LegalisationCost.first * Entry->Cost + 1; 305 306 return LegalisationCost.first * Entry->Cost; 307 } 308 break; 309 } 310 case Intrinsic::ctpop: { 311 static const CostTblEntry CtpopCostTbl[] = { 312 {ISD::CTPOP, MVT::v2i64, 4}, 313 {ISD::CTPOP, MVT::v4i32, 3}, 314 {ISD::CTPOP, MVT::v8i16, 2}, 315 {ISD::CTPOP, MVT::v16i8, 1}, 316 {ISD::CTPOP, MVT::i64, 4}, 317 {ISD::CTPOP, MVT::v2i32, 3}, 318 {ISD::CTPOP, MVT::v4i16, 2}, 319 {ISD::CTPOP, MVT::v8i8, 1}, 320 {ISD::CTPOP, MVT::i32, 5}, 321 }; 322 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 323 MVT MTy = LT.second; 324 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) { 325 // Extra cost of +1 when illegal vector types are legalized by promoting 326 // the integer type. 327 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() != 328 RetTy->getScalarSizeInBits() 329 ? 1 330 : 0; 331 return LT.first * Entry->Cost + ExtraCost; 332 } 333 break; 334 } 335 case Intrinsic::sadd_with_overflow: 336 case Intrinsic::uadd_with_overflow: 337 case Intrinsic::ssub_with_overflow: 338 case Intrinsic::usub_with_overflow: 339 case Intrinsic::smul_with_overflow: 340 case Intrinsic::umul_with_overflow: { 341 static const CostTblEntry WithOverflowCostTbl[] = { 342 {Intrinsic::sadd_with_overflow, MVT::i8, 3}, 343 {Intrinsic::uadd_with_overflow, MVT::i8, 3}, 344 {Intrinsic::sadd_with_overflow, MVT::i16, 3}, 345 {Intrinsic::uadd_with_overflow, MVT::i16, 3}, 346 {Intrinsic::sadd_with_overflow, MVT::i32, 1}, 347 {Intrinsic::uadd_with_overflow, MVT::i32, 1}, 348 {Intrinsic::sadd_with_overflow, MVT::i64, 1}, 349 {Intrinsic::uadd_with_overflow, MVT::i64, 1}, 350 {Intrinsic::ssub_with_overflow, MVT::i8, 3}, 351 {Intrinsic::usub_with_overflow, MVT::i8, 3}, 352 {Intrinsic::ssub_with_overflow, MVT::i16, 3}, 353 {Intrinsic::usub_with_overflow, MVT::i16, 3}, 354 {Intrinsic::ssub_with_overflow, MVT::i32, 1}, 355 {Intrinsic::usub_with_overflow, MVT::i32, 1}, 356 {Intrinsic::ssub_with_overflow, MVT::i64, 1}, 357 {Intrinsic::usub_with_overflow, MVT::i64, 1}, 358 {Intrinsic::smul_with_overflow, MVT::i8, 5}, 359 {Intrinsic::umul_with_overflow, MVT::i8, 4}, 360 {Intrinsic::smul_with_overflow, MVT::i16, 5}, 361 {Intrinsic::umul_with_overflow, MVT::i16, 4}, 362 {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst 363 {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw 364 {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp 365 {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr 366 }; 367 EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true); 368 if (MTy.isSimple()) 369 if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(), 370 MTy.getSimpleVT())) 371 return Entry->Cost; 372 break; 373 } 374 case Intrinsic::fptosi_sat: 375 case Intrinsic::fptoui_sat: { 376 if (ICA.getArgTypes().empty()) 377 break; 378 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat; 379 auto LT = TLI->getTypeLegalizationCost(DL, ICA.getArgTypes()[0]); 380 EVT MTy = TLI->getValueType(DL, RetTy); 381 // Check for the legal types, which are where the size of the input and the 382 // output are the same, or we are using cvt f64->i32 or f32->i64. 383 if ((LT.second == MVT::f32 || LT.second == MVT::f64 || 384 LT.second == MVT::v2f32 || LT.second == MVT::v4f32 || 385 LT.second == MVT::v2f64) && 386 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() || 387 (LT.second == MVT::f64 && MTy == MVT::i32) || 388 (LT.second == MVT::f32 && MTy == MVT::i64))) 389 return LT.first; 390 // Similarly for fp16 sizes 391 if (ST->hasFullFP16() && 392 ((LT.second == MVT::f16 && MTy == MVT::i32) || 393 ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) && 394 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())))) 395 return LT.first; 396 397 // Otherwise we use a legal convert followed by a min+max 398 if ((LT.second.getScalarType() == MVT::f32 || 399 LT.second.getScalarType() == MVT::f64 || 400 (ST->hasFullFP16() && LT.second.getScalarType() == MVT::f16)) && 401 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) { 402 Type *LegalTy = 403 Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits()); 404 if (LT.second.isVector()) 405 LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount()); 406 InstructionCost Cost = 1; 407 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin, 408 LegalTy, {LegalTy, LegalTy}); 409 Cost += getIntrinsicInstrCost(Attrs1, CostKind); 410 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax, 411 LegalTy, {LegalTy, LegalTy}); 412 Cost += getIntrinsicInstrCost(Attrs2, CostKind); 413 return LT.first * Cost; 414 } 415 break; 416 } 417 default: 418 break; 419 } 420 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 421 } 422 423 /// The function will remove redundant reinterprets casting in the presence 424 /// of the control flow 425 static Optional<Instruction *> processPhiNode(InstCombiner &IC, 426 IntrinsicInst &II) { 427 SmallVector<Instruction *, 32> Worklist; 428 auto RequiredType = II.getType(); 429 430 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0)); 431 assert(PN && "Expected Phi Node!"); 432 433 // Don't create a new Phi unless we can remove the old one. 434 if (!PN->hasOneUse()) 435 return None; 436 437 for (Value *IncValPhi : PN->incoming_values()) { 438 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi); 439 if (!Reinterpret || 440 Reinterpret->getIntrinsicID() != 441 Intrinsic::aarch64_sve_convert_to_svbool || 442 RequiredType != Reinterpret->getArgOperand(0)->getType()) 443 return None; 444 } 445 446 // Create the new Phi 447 LLVMContext &Ctx = PN->getContext(); 448 IRBuilder<> Builder(Ctx); 449 Builder.SetInsertPoint(PN); 450 PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues()); 451 Worklist.push_back(PN); 452 453 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) { 454 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I)); 455 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I)); 456 Worklist.push_back(Reinterpret); 457 } 458 459 // Cleanup Phi Node and reinterprets 460 return IC.replaceInstUsesWith(II, NPN); 461 } 462 463 // (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _)))) 464 // => (binop (pred) (from_svbool _) (from_svbool _)) 465 // 466 // The above transformation eliminates a `to_svbool` in the predicate 467 // operand of bitwise operation `binop` by narrowing the vector width of 468 // the operation. For example, it would convert a `<vscale x 16 x i1> 469 // and` into a `<vscale x 4 x i1> and`. This is profitable because 470 // to_svbool must zero the new lanes during widening, whereas 471 // from_svbool is free. 472 static Optional<Instruction *> tryCombineFromSVBoolBinOp(InstCombiner &IC, 473 IntrinsicInst &II) { 474 auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0)); 475 if (!BinOp) 476 return None; 477 478 auto IntrinsicID = BinOp->getIntrinsicID(); 479 switch (IntrinsicID) { 480 case Intrinsic::aarch64_sve_and_z: 481 case Intrinsic::aarch64_sve_bic_z: 482 case Intrinsic::aarch64_sve_eor_z: 483 case Intrinsic::aarch64_sve_nand_z: 484 case Intrinsic::aarch64_sve_nor_z: 485 case Intrinsic::aarch64_sve_orn_z: 486 case Intrinsic::aarch64_sve_orr_z: 487 break; 488 default: 489 return None; 490 } 491 492 auto BinOpPred = BinOp->getOperand(0); 493 auto BinOpOp1 = BinOp->getOperand(1); 494 auto BinOpOp2 = BinOp->getOperand(2); 495 496 auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred); 497 if (!PredIntr || 498 PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool) 499 return None; 500 501 auto PredOp = PredIntr->getOperand(0); 502 auto PredOpTy = cast<VectorType>(PredOp->getType()); 503 if (PredOpTy != II.getType()) 504 return None; 505 506 IRBuilder<> Builder(II.getContext()); 507 Builder.SetInsertPoint(&II); 508 509 SmallVector<Value *> NarrowedBinOpArgs = {PredOp}; 510 auto NarrowBinOpOp1 = Builder.CreateIntrinsic( 511 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1}); 512 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 513 if (BinOpOp1 == BinOpOp2) 514 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 515 else 516 NarrowedBinOpArgs.push_back(Builder.CreateIntrinsic( 517 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2})); 518 519 auto NarrowedBinOp = 520 Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs); 521 return IC.replaceInstUsesWith(II, NarrowedBinOp); 522 } 523 524 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC, 525 IntrinsicInst &II) { 526 // If the reinterpret instruction operand is a PHI Node 527 if (isa<PHINode>(II.getArgOperand(0))) 528 return processPhiNode(IC, II); 529 530 if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II)) 531 return BinOpCombine; 532 533 SmallVector<Instruction *, 32> CandidatesForRemoval; 534 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr; 535 536 const auto *IVTy = cast<VectorType>(II.getType()); 537 538 // Walk the chain of conversions. 539 while (Cursor) { 540 // If the type of the cursor has fewer lanes than the final result, zeroing 541 // must take place, which breaks the equivalence chain. 542 const auto *CursorVTy = cast<VectorType>(Cursor->getType()); 543 if (CursorVTy->getElementCount().getKnownMinValue() < 544 IVTy->getElementCount().getKnownMinValue()) 545 break; 546 547 // If the cursor has the same type as I, it is a viable replacement. 548 if (Cursor->getType() == IVTy) 549 EarliestReplacement = Cursor; 550 551 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor); 552 553 // If this is not an SVE conversion intrinsic, this is the end of the chain. 554 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() == 555 Intrinsic::aarch64_sve_convert_to_svbool || 556 IntrinsicCursor->getIntrinsicID() == 557 Intrinsic::aarch64_sve_convert_from_svbool)) 558 break; 559 560 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor); 561 Cursor = IntrinsicCursor->getOperand(0); 562 } 563 564 // If no viable replacement in the conversion chain was found, there is 565 // nothing to do. 566 if (!EarliestReplacement) 567 return None; 568 569 return IC.replaceInstUsesWith(II, EarliestReplacement); 570 } 571 572 static Optional<Instruction *> instCombineSVESel(InstCombiner &IC, 573 IntrinsicInst &II) { 574 IRBuilder<> Builder(&II); 575 auto Select = Builder.CreateSelect(II.getOperand(0), II.getOperand(1), 576 II.getOperand(2)); 577 return IC.replaceInstUsesWith(II, Select); 578 } 579 580 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC, 581 IntrinsicInst &II) { 582 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 583 if (!Pg) 584 return None; 585 586 if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 587 return None; 588 589 const auto PTruePattern = 590 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 591 if (PTruePattern != AArch64SVEPredPattern::vl1) 592 return None; 593 594 // The intrinsic is inserting into lane zero so use an insert instead. 595 auto *IdxTy = Type::getInt64Ty(II.getContext()); 596 auto *Insert = InsertElementInst::Create( 597 II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0)); 598 Insert->insertBefore(&II); 599 Insert->takeName(&II); 600 601 return IC.replaceInstUsesWith(II, Insert); 602 } 603 604 static Optional<Instruction *> instCombineSVEDupX(InstCombiner &IC, 605 IntrinsicInst &II) { 606 // Replace DupX with a regular IR splat. 607 IRBuilder<> Builder(II.getContext()); 608 Builder.SetInsertPoint(&II); 609 auto *RetTy = cast<ScalableVectorType>(II.getType()); 610 Value *Splat = 611 Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0)); 612 Splat->takeName(&II); 613 return IC.replaceInstUsesWith(II, Splat); 614 } 615 616 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC, 617 IntrinsicInst &II) { 618 LLVMContext &Ctx = II.getContext(); 619 IRBuilder<> Builder(Ctx); 620 Builder.SetInsertPoint(&II); 621 622 // Check that the predicate is all active 623 auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 624 if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 625 return None; 626 627 const auto PTruePattern = 628 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 629 if (PTruePattern != AArch64SVEPredPattern::all) 630 return None; 631 632 // Check that we have a compare of zero.. 633 auto *SplatValue = 634 dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2))); 635 if (!SplatValue || !SplatValue->isZero()) 636 return None; 637 638 // ..against a dupq 639 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 640 if (!DupQLane || 641 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane) 642 return None; 643 644 // Where the dupq is a lane 0 replicate of a vector insert 645 if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero()) 646 return None; 647 648 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0)); 649 if (!VecIns || 650 VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert) 651 return None; 652 653 // Where the vector insert is a fixed constant vector insert into undef at 654 // index zero 655 if (!isa<UndefValue>(VecIns->getArgOperand(0))) 656 return None; 657 658 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero()) 659 return None; 660 661 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1)); 662 if (!ConstVec) 663 return None; 664 665 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType()); 666 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType()); 667 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements()) 668 return None; 669 670 unsigned NumElts = VecTy->getNumElements(); 671 unsigned PredicateBits = 0; 672 673 // Expand intrinsic operands to a 16-bit byte level predicate 674 for (unsigned I = 0; I < NumElts; ++I) { 675 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I)); 676 if (!Arg) 677 return None; 678 if (!Arg->isZero()) 679 PredicateBits |= 1 << (I * (16 / NumElts)); 680 } 681 682 // If all bits are zero bail early with an empty predicate 683 if (PredicateBits == 0) { 684 auto *PFalse = Constant::getNullValue(II.getType()); 685 PFalse->takeName(&II); 686 return IC.replaceInstUsesWith(II, PFalse); 687 } 688 689 // Calculate largest predicate type used (where byte predicate is largest) 690 unsigned Mask = 8; 691 for (unsigned I = 0; I < 16; ++I) 692 if ((PredicateBits & (1 << I)) != 0) 693 Mask |= (I % 8); 694 695 unsigned PredSize = Mask & -Mask; 696 auto *PredType = ScalableVectorType::get( 697 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8)); 698 699 // Ensure all relevant bits are set 700 for (unsigned I = 0; I < 16; I += PredSize) 701 if ((PredicateBits & (1 << I)) == 0) 702 return None; 703 704 auto *PTruePat = 705 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 706 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 707 {PredType}, {PTruePat}); 708 auto *ConvertToSVBool = Builder.CreateIntrinsic( 709 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue}); 710 auto *ConvertFromSVBool = 711 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, 712 {II.getType()}, {ConvertToSVBool}); 713 714 ConvertFromSVBool->takeName(&II); 715 return IC.replaceInstUsesWith(II, ConvertFromSVBool); 716 } 717 718 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC, 719 IntrinsicInst &II) { 720 IRBuilder<> Builder(II.getContext()); 721 Builder.SetInsertPoint(&II); 722 Value *Pg = II.getArgOperand(0); 723 Value *Vec = II.getArgOperand(1); 724 auto IntrinsicID = II.getIntrinsicID(); 725 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta; 726 727 // lastX(splat(X)) --> X 728 if (auto *SplatVal = getSplatValue(Vec)) 729 return IC.replaceInstUsesWith(II, SplatVal); 730 731 // If x and/or y is a splat value then: 732 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y)) 733 Value *LHS, *RHS; 734 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) { 735 if (isSplatValue(LHS) || isSplatValue(RHS)) { 736 auto *OldBinOp = cast<BinaryOperator>(Vec); 737 auto OpC = OldBinOp->getOpcode(); 738 auto *NewLHS = 739 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS}); 740 auto *NewRHS = 741 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS}); 742 auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags( 743 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II); 744 return IC.replaceInstUsesWith(II, NewBinOp); 745 } 746 } 747 748 auto *C = dyn_cast<Constant>(Pg); 749 if (IsAfter && C && C->isNullValue()) { 750 // The intrinsic is extracting lane 0 so use an extract instead. 751 auto *IdxTy = Type::getInt64Ty(II.getContext()); 752 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); 753 Extract->insertBefore(&II); 754 Extract->takeName(&II); 755 return IC.replaceInstUsesWith(II, Extract); 756 } 757 758 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg); 759 if (!IntrPG) 760 return None; 761 762 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 763 return None; 764 765 const auto PTruePattern = 766 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue(); 767 768 // Can the intrinsic's predicate be converted to a known constant index? 769 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern); 770 if (!MinNumElts) 771 return None; 772 773 unsigned Idx = MinNumElts - 1; 774 // Increment the index if extracting the element after the last active 775 // predicate element. 776 if (IsAfter) 777 ++Idx; 778 779 // Ignore extracts whose index is larger than the known minimum vector 780 // length. NOTE: This is an artificial constraint where we prefer to 781 // maintain what the user asked for until an alternative is proven faster. 782 auto *PgVTy = cast<ScalableVectorType>(Pg->getType()); 783 if (Idx >= PgVTy->getMinNumElements()) 784 return None; 785 786 // The intrinsic is extracting a fixed lane so use an extract instead. 787 auto *IdxTy = Type::getInt64Ty(II.getContext()); 788 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); 789 Extract->insertBefore(&II); 790 Extract->takeName(&II); 791 return IC.replaceInstUsesWith(II, Extract); 792 } 793 794 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC, 795 IntrinsicInst &II) { 796 LLVMContext &Ctx = II.getContext(); 797 IRBuilder<> Builder(Ctx); 798 Builder.SetInsertPoint(&II); 799 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr 800 // can work with RDFFR_PP for ptest elimination. 801 auto *AllPat = 802 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 803 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 804 {II.getType()}, {AllPat}); 805 auto *RDFFR = 806 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); 807 RDFFR->takeName(&II); 808 return IC.replaceInstUsesWith(II, RDFFR); 809 } 810 811 static Optional<Instruction *> 812 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) { 813 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue(); 814 815 if (Pattern == AArch64SVEPredPattern::all) { 816 LLVMContext &Ctx = II.getContext(); 817 IRBuilder<> Builder(Ctx); 818 Builder.SetInsertPoint(&II); 819 820 Constant *StepVal = ConstantInt::get(II.getType(), NumElts); 821 auto *VScale = Builder.CreateVScale(StepVal); 822 VScale->takeName(&II); 823 return IC.replaceInstUsesWith(II, VScale); 824 } 825 826 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern); 827 828 return MinNumElts && NumElts >= MinNumElts 829 ? Optional<Instruction *>(IC.replaceInstUsesWith( 830 II, ConstantInt::get(II.getType(), MinNumElts))) 831 : None; 832 } 833 834 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC, 835 IntrinsicInst &II) { 836 IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 837 IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 838 839 if (Op1 && Op2 && 840 Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 841 Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 842 Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) { 843 844 IRBuilder<> Builder(II.getContext()); 845 Builder.SetInsertPoint(&II); 846 847 Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)}; 848 Type *Tys[] = {Op1->getArgOperand(0)->getType()}; 849 850 auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); 851 852 PTest->takeName(&II); 853 return IC.replaceInstUsesWith(II, PTest); 854 } 855 856 return None; 857 } 858 859 static Optional<Instruction *> instCombineSVEVectorFMLA(InstCombiner &IC, 860 IntrinsicInst &II) { 861 // fold (fadd p a (fmul p b c)) -> (fma p a b c) 862 Value *P = II.getOperand(0); 863 Value *A = II.getOperand(1); 864 auto FMul = II.getOperand(2); 865 Value *B, *C; 866 if (!match(FMul, m_Intrinsic<Intrinsic::aarch64_sve_fmul>( 867 m_Specific(P), m_Value(B), m_Value(C)))) 868 return None; 869 870 if (!FMul->hasOneUse()) 871 return None; 872 873 llvm::FastMathFlags FAddFlags = II.getFastMathFlags(); 874 // Stop the combine when the flags on the inputs differ in case dropping flags 875 // would lead to us missing out on more beneficial optimizations. 876 if (FAddFlags != cast<CallInst>(FMul)->getFastMathFlags()) 877 return None; 878 if (!FAddFlags.allowContract()) 879 return None; 880 881 IRBuilder<> Builder(II.getContext()); 882 Builder.SetInsertPoint(&II); 883 auto FMLA = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fmla, 884 {II.getType()}, {P, A, B, C}, &II); 885 FMLA->setFastMathFlags(FAddFlags); 886 return IC.replaceInstUsesWith(II, FMLA); 887 } 888 889 static bool isAllActivePredicate(Value *Pred) { 890 // Look through convert.from.svbool(convert.to.svbool(...) chain. 891 Value *UncastedPred; 892 if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>( 893 m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>( 894 m_Value(UncastedPred))))) 895 // If the predicate has the same or less lanes than the uncasted 896 // predicate then we know the casting has no effect. 897 if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <= 898 cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements()) 899 Pred = UncastedPred; 900 901 return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 902 m_ConstantInt<AArch64SVEPredPattern::all>())); 903 } 904 905 static Optional<Instruction *> 906 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 907 IRBuilder<> Builder(II.getContext()); 908 Builder.SetInsertPoint(&II); 909 910 Value *Pred = II.getOperand(0); 911 Value *PtrOp = II.getOperand(1); 912 Type *VecTy = II.getType(); 913 Value *VecPtr = Builder.CreateBitCast(PtrOp, VecTy->getPointerTo()); 914 915 if (isAllActivePredicate(Pred)) { 916 LoadInst *Load = Builder.CreateLoad(VecTy, VecPtr); 917 Load->copyMetadata(II); 918 return IC.replaceInstUsesWith(II, Load); 919 } 920 921 CallInst *MaskedLoad = 922 Builder.CreateMaskedLoad(VecTy, VecPtr, PtrOp->getPointerAlignment(DL), 923 Pred, ConstantAggregateZero::get(VecTy)); 924 MaskedLoad->copyMetadata(II); 925 return IC.replaceInstUsesWith(II, MaskedLoad); 926 } 927 928 static Optional<Instruction *> 929 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 930 IRBuilder<> Builder(II.getContext()); 931 Builder.SetInsertPoint(&II); 932 933 Value *VecOp = II.getOperand(0); 934 Value *Pred = II.getOperand(1); 935 Value *PtrOp = II.getOperand(2); 936 Value *VecPtr = 937 Builder.CreateBitCast(PtrOp, VecOp->getType()->getPointerTo()); 938 939 if (isAllActivePredicate(Pred)) { 940 StoreInst *Store = Builder.CreateStore(VecOp, VecPtr); 941 Store->copyMetadata(II); 942 return IC.eraseInstFromFunction(II); 943 } 944 945 CallInst *MaskedStore = Builder.CreateMaskedStore( 946 VecOp, VecPtr, PtrOp->getPointerAlignment(DL), Pred); 947 MaskedStore->copyMetadata(II); 948 return IC.eraseInstFromFunction(II); 949 } 950 951 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) { 952 switch (Intrinsic) { 953 case Intrinsic::aarch64_sve_fmul: 954 return Instruction::BinaryOps::FMul; 955 case Intrinsic::aarch64_sve_fadd: 956 return Instruction::BinaryOps::FAdd; 957 case Intrinsic::aarch64_sve_fsub: 958 return Instruction::BinaryOps::FSub; 959 default: 960 return Instruction::BinaryOpsEnd; 961 } 962 } 963 964 static Optional<Instruction *> instCombineSVEVectorBinOp(InstCombiner &IC, 965 IntrinsicInst &II) { 966 auto *OpPredicate = II.getOperand(0); 967 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID()); 968 if (BinOpCode == Instruction::BinaryOpsEnd || 969 !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 970 m_ConstantInt<AArch64SVEPredPattern::all>()))) 971 return None; 972 IRBuilder<> Builder(II.getContext()); 973 Builder.SetInsertPoint(&II); 974 Builder.setFastMathFlags(II.getFastMathFlags()); 975 auto BinOp = 976 Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2)); 977 return IC.replaceInstUsesWith(II, BinOp); 978 } 979 980 static Optional<Instruction *> instCombineSVEVectorFAdd(InstCombiner &IC, 981 IntrinsicInst &II) { 982 if (auto FMLA = instCombineSVEVectorFMLA(IC, II)) 983 return FMLA; 984 return instCombineSVEVectorBinOp(IC, II); 985 } 986 987 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC, 988 IntrinsicInst &II) { 989 auto *OpPredicate = II.getOperand(0); 990 auto *OpMultiplicand = II.getOperand(1); 991 auto *OpMultiplier = II.getOperand(2); 992 993 IRBuilder<> Builder(II.getContext()); 994 Builder.SetInsertPoint(&II); 995 996 // Return true if a given instruction is a unit splat value, false otherwise. 997 auto IsUnitSplat = [](auto *I) { 998 auto *SplatValue = getSplatValue(I); 999 if (!SplatValue) 1000 return false; 1001 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 1002 }; 1003 1004 // Return true if a given instruction is an aarch64_sve_dup intrinsic call 1005 // with a unit splat value, false otherwise. 1006 auto IsUnitDup = [](auto *I) { 1007 auto *IntrI = dyn_cast<IntrinsicInst>(I); 1008 if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup) 1009 return false; 1010 1011 auto *SplatValue = IntrI->getOperand(2); 1012 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 1013 }; 1014 1015 if (IsUnitSplat(OpMultiplier)) { 1016 // [f]mul pg %n, (dupx 1) => %n 1017 OpMultiplicand->takeName(&II); 1018 return IC.replaceInstUsesWith(II, OpMultiplicand); 1019 } else if (IsUnitDup(OpMultiplier)) { 1020 // [f]mul pg %n, (dup pg 1) => %n 1021 auto *DupInst = cast<IntrinsicInst>(OpMultiplier); 1022 auto *DupPg = DupInst->getOperand(1); 1023 // TODO: this is naive. The optimization is still valid if DupPg 1024 // 'encompasses' OpPredicate, not only if they're the same predicate. 1025 if (OpPredicate == DupPg) { 1026 OpMultiplicand->takeName(&II); 1027 return IC.replaceInstUsesWith(II, OpMultiplicand); 1028 } 1029 } 1030 1031 return instCombineSVEVectorBinOp(IC, II); 1032 } 1033 1034 static Optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC, 1035 IntrinsicInst &II) { 1036 IRBuilder<> Builder(II.getContext()); 1037 Builder.SetInsertPoint(&II); 1038 Value *UnpackArg = II.getArgOperand(0); 1039 auto *RetTy = cast<ScalableVectorType>(II.getType()); 1040 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi || 1041 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo; 1042 1043 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X)) 1044 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X)) 1045 if (auto *ScalarArg = getSplatValue(UnpackArg)) { 1046 ScalarArg = 1047 Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned); 1048 Value *NewVal = 1049 Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg); 1050 NewVal->takeName(&II); 1051 return IC.replaceInstUsesWith(II, NewVal); 1052 } 1053 1054 return None; 1055 } 1056 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC, 1057 IntrinsicInst &II) { 1058 auto *OpVal = II.getOperand(0); 1059 auto *OpIndices = II.getOperand(1); 1060 VectorType *VTy = cast<VectorType>(II.getType()); 1061 1062 // Check whether OpIndices is a constant splat value < minimal element count 1063 // of result. 1064 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices)); 1065 if (!SplatValue || 1066 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue())) 1067 return None; 1068 1069 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to 1070 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization. 1071 IRBuilder<> Builder(II.getContext()); 1072 Builder.SetInsertPoint(&II); 1073 auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue); 1074 auto *VectorSplat = 1075 Builder.CreateVectorSplat(VTy->getElementCount(), Extract); 1076 1077 VectorSplat->takeName(&II); 1078 return IC.replaceInstUsesWith(II, VectorSplat); 1079 } 1080 1081 static Optional<Instruction *> instCombineSVETupleGet(InstCombiner &IC, 1082 IntrinsicInst &II) { 1083 // Try to remove sequences of tuple get/set. 1084 Value *SetTuple, *SetIndex, *SetValue; 1085 auto *GetTuple = II.getArgOperand(0); 1086 auto *GetIndex = II.getArgOperand(1); 1087 // Check that we have tuple_get(GetTuple, GetIndex) where GetTuple is a 1088 // call to tuple_set i.e. tuple_set(SetTuple, SetIndex, SetValue). 1089 // Make sure that the types of the current intrinsic and SetValue match 1090 // in order to safely remove the sequence. 1091 if (!match(GetTuple, 1092 m_Intrinsic<Intrinsic::aarch64_sve_tuple_set>( 1093 m_Value(SetTuple), m_Value(SetIndex), m_Value(SetValue))) || 1094 SetValue->getType() != II.getType()) 1095 return None; 1096 // Case where we get the same index right after setting it. 1097 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) --> SetValue 1098 if (GetIndex == SetIndex) 1099 return IC.replaceInstUsesWith(II, SetValue); 1100 // If we are getting a different index than what was set in the tuple_set 1101 // intrinsic. We can just set the input tuple to the one up in the chain. 1102 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) 1103 // --> tuple_get(SetTuple, GetIndex) 1104 return IC.replaceOperand(II, 0, SetTuple); 1105 } 1106 1107 static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC, 1108 IntrinsicInst &II) { 1109 // zip1(uzp1(A, B), uzp2(A, B)) --> A 1110 // zip2(uzp1(A, B), uzp2(A, B)) --> B 1111 Value *A, *B; 1112 if (match(II.getArgOperand(0), 1113 m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) && 1114 match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>( 1115 m_Specific(A), m_Specific(B)))) 1116 return IC.replaceInstUsesWith( 1117 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B)); 1118 1119 return None; 1120 } 1121 1122 static Optional<Instruction *> instCombineLD1GatherIndex(InstCombiner &IC, 1123 IntrinsicInst &II) { 1124 Value *Mask = II.getOperand(0); 1125 Value *BasePtr = II.getOperand(1); 1126 Value *Index = II.getOperand(2); 1127 Type *Ty = II.getType(); 1128 Value *PassThru = ConstantAggregateZero::get(Ty); 1129 1130 // Contiguous gather => masked load. 1131 // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1)) 1132 // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer) 1133 Value *IndexBase; 1134 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1135 m_Value(IndexBase), m_SpecificInt(1)))) { 1136 IRBuilder<> Builder(II.getContext()); 1137 Builder.SetInsertPoint(&II); 1138 1139 Align Alignment = 1140 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1141 1142 Type *VecPtrTy = PointerType::getUnqual(Ty); 1143 Value *Ptr = Builder.CreateGEP( 1144 cast<VectorType>(Ty)->getElementType(), BasePtr, IndexBase); 1145 Ptr = Builder.CreateBitCast(Ptr, VecPtrTy); 1146 CallInst *MaskedLoad = 1147 Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru); 1148 MaskedLoad->takeName(&II); 1149 return IC.replaceInstUsesWith(II, MaskedLoad); 1150 } 1151 1152 return None; 1153 } 1154 1155 static Optional<Instruction *> instCombineST1ScatterIndex(InstCombiner &IC, 1156 IntrinsicInst &II) { 1157 Value *Val = II.getOperand(0); 1158 Value *Mask = II.getOperand(1); 1159 Value *BasePtr = II.getOperand(2); 1160 Value *Index = II.getOperand(3); 1161 Type *Ty = Val->getType(); 1162 1163 // Contiguous scatter => masked store. 1164 // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1)) 1165 // => (masked.store Value (gep BasePtr IndexBase) Align Mask) 1166 Value *IndexBase; 1167 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1168 m_Value(IndexBase), m_SpecificInt(1)))) { 1169 IRBuilder<> Builder(II.getContext()); 1170 Builder.SetInsertPoint(&II); 1171 1172 Align Alignment = 1173 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1174 1175 Value *Ptr = Builder.CreateGEP( 1176 cast<VectorType>(Ty)->getElementType(), BasePtr, IndexBase); 1177 Type *VecPtrTy = PointerType::getUnqual(Ty); 1178 Ptr = Builder.CreateBitCast(Ptr, VecPtrTy); 1179 1180 (void)Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask); 1181 1182 return IC.eraseInstFromFunction(II); 1183 } 1184 1185 return None; 1186 } 1187 1188 static Optional<Instruction *> instCombineSVESDIV(InstCombiner &IC, 1189 IntrinsicInst &II) { 1190 IRBuilder<> Builder(II.getContext()); 1191 Builder.SetInsertPoint(&II); 1192 Type *Int32Ty = Builder.getInt32Ty(); 1193 Value *Pred = II.getOperand(0); 1194 Value *Vec = II.getOperand(1); 1195 Value *DivVec = II.getOperand(2); 1196 1197 Value *SplatValue = getSplatValue(DivVec); 1198 ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue); 1199 if (!SplatConstantInt) 1200 return None; 1201 APInt Divisor = SplatConstantInt->getValue(); 1202 1203 if (Divisor.isPowerOf2()) { 1204 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1205 auto ASRD = Builder.CreateIntrinsic( 1206 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1207 return IC.replaceInstUsesWith(II, ASRD); 1208 } 1209 if (Divisor.isNegatedPowerOf2()) { 1210 Divisor.negate(); 1211 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1212 auto ASRD = Builder.CreateIntrinsic( 1213 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1214 auto NEG = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_neg, 1215 {ASRD->getType()}, {ASRD, Pred, ASRD}); 1216 return IC.replaceInstUsesWith(II, NEG); 1217 } 1218 1219 return None; 1220 } 1221 1222 Optional<Instruction *> 1223 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, 1224 IntrinsicInst &II) const { 1225 Intrinsic::ID IID = II.getIntrinsicID(); 1226 switch (IID) { 1227 default: 1228 break; 1229 case Intrinsic::aarch64_sve_convert_from_svbool: 1230 return instCombineConvertFromSVBool(IC, II); 1231 case Intrinsic::aarch64_sve_dup: 1232 return instCombineSVEDup(IC, II); 1233 case Intrinsic::aarch64_sve_dup_x: 1234 return instCombineSVEDupX(IC, II); 1235 case Intrinsic::aarch64_sve_cmpne: 1236 case Intrinsic::aarch64_sve_cmpne_wide: 1237 return instCombineSVECmpNE(IC, II); 1238 case Intrinsic::aarch64_sve_rdffr: 1239 return instCombineRDFFR(IC, II); 1240 case Intrinsic::aarch64_sve_lasta: 1241 case Intrinsic::aarch64_sve_lastb: 1242 return instCombineSVELast(IC, II); 1243 case Intrinsic::aarch64_sve_cntd: 1244 return instCombineSVECntElts(IC, II, 2); 1245 case Intrinsic::aarch64_sve_cntw: 1246 return instCombineSVECntElts(IC, II, 4); 1247 case Intrinsic::aarch64_sve_cnth: 1248 return instCombineSVECntElts(IC, II, 8); 1249 case Intrinsic::aarch64_sve_cntb: 1250 return instCombineSVECntElts(IC, II, 16); 1251 case Intrinsic::aarch64_sve_ptest_any: 1252 case Intrinsic::aarch64_sve_ptest_first: 1253 case Intrinsic::aarch64_sve_ptest_last: 1254 return instCombineSVEPTest(IC, II); 1255 case Intrinsic::aarch64_sve_mul: 1256 case Intrinsic::aarch64_sve_fmul: 1257 return instCombineSVEVectorMul(IC, II); 1258 case Intrinsic::aarch64_sve_fadd: 1259 return instCombineSVEVectorFAdd(IC, II); 1260 case Intrinsic::aarch64_sve_fsub: 1261 return instCombineSVEVectorBinOp(IC, II); 1262 case Intrinsic::aarch64_sve_tbl: 1263 return instCombineSVETBL(IC, II); 1264 case Intrinsic::aarch64_sve_uunpkhi: 1265 case Intrinsic::aarch64_sve_uunpklo: 1266 case Intrinsic::aarch64_sve_sunpkhi: 1267 case Intrinsic::aarch64_sve_sunpklo: 1268 return instCombineSVEUnpack(IC, II); 1269 case Intrinsic::aarch64_sve_tuple_get: 1270 return instCombineSVETupleGet(IC, II); 1271 case Intrinsic::aarch64_sve_zip1: 1272 case Intrinsic::aarch64_sve_zip2: 1273 return instCombineSVEZip(IC, II); 1274 case Intrinsic::aarch64_sve_ld1_gather_index: 1275 return instCombineLD1GatherIndex(IC, II); 1276 case Intrinsic::aarch64_sve_st1_scatter_index: 1277 return instCombineST1ScatterIndex(IC, II); 1278 case Intrinsic::aarch64_sve_ld1: 1279 return instCombineSVELD1(IC, II, DL); 1280 case Intrinsic::aarch64_sve_st1: 1281 return instCombineSVEST1(IC, II, DL); 1282 case Intrinsic::aarch64_sve_sdiv: 1283 return instCombineSVESDIV(IC, II); 1284 case Intrinsic::aarch64_sve_sel: 1285 return instCombineSVESel(IC, II); 1286 } 1287 1288 return None; 1289 } 1290 1291 Optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic( 1292 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts, 1293 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, 1294 std::function<void(Instruction *, unsigned, APInt, APInt &)> 1295 SimplifyAndSetOp) const { 1296 switch (II.getIntrinsicID()) { 1297 default: 1298 break; 1299 case Intrinsic::aarch64_neon_fcvtxn: 1300 case Intrinsic::aarch64_neon_rshrn: 1301 case Intrinsic::aarch64_neon_sqrshrn: 1302 case Intrinsic::aarch64_neon_sqrshrun: 1303 case Intrinsic::aarch64_neon_sqshrn: 1304 case Intrinsic::aarch64_neon_sqshrun: 1305 case Intrinsic::aarch64_neon_sqxtn: 1306 case Intrinsic::aarch64_neon_sqxtun: 1307 case Intrinsic::aarch64_neon_uqrshrn: 1308 case Intrinsic::aarch64_neon_uqshrn: 1309 case Intrinsic::aarch64_neon_uqxtn: 1310 SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts); 1311 break; 1312 } 1313 1314 return None; 1315 } 1316 1317 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 1318 ArrayRef<const Value *> Args) { 1319 1320 // A helper that returns a vector type from the given type. The number of 1321 // elements in type Ty determines the vector width. 1322 auto toVectorTy = [&](Type *ArgTy) { 1323 return VectorType::get(ArgTy->getScalarType(), 1324 cast<VectorType>(DstTy)->getElementCount()); 1325 }; 1326 1327 // Exit early if DstTy is not a vector type whose elements are at least 1328 // 16-bits wide. 1329 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 1330 return false; 1331 1332 // Determine if the operation has a widening variant. We consider both the 1333 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 1334 // instructions. 1335 // 1336 // TODO: Add additional widening operations (e.g., shl, etc.) once we 1337 // verify that their extending operands are eliminated during code 1338 // generation. 1339 switch (Opcode) { 1340 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 1341 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 1342 case Instruction::Mul: // SMULL(2), UMULL(2) 1343 break; 1344 default: 1345 return false; 1346 } 1347 1348 // To be a widening instruction (either the "wide" or "long" versions), the 1349 // second operand must be a sign- or zero extend. 1350 if (Args.size() != 2 || 1351 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1]))) 1352 return false; 1353 auto *Extend = cast<CastInst>(Args[1]); 1354 auto *Arg0 = dyn_cast<CastInst>(Args[0]); 1355 1356 // A mul only has a mull version (not like addw). Both operands need to be 1357 // extending and the same type. 1358 if (Opcode == Instruction::Mul && 1359 (!Arg0 || Arg0->getOpcode() != Extend->getOpcode() || 1360 Arg0->getOperand(0)->getType() != Extend->getOperand(0)->getType())) 1361 return false; 1362 1363 // Legalize the destination type and ensure it can be used in a widening 1364 // operation. 1365 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 1366 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 1367 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 1368 return false; 1369 1370 // Legalize the source type and ensure it can be used in a widening 1371 // operation. 1372 auto *SrcTy = toVectorTy(Extend->getSrcTy()); 1373 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 1374 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 1375 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 1376 return false; 1377 1378 // Get the total number of vector elements in the legalized types. 1379 InstructionCost NumDstEls = 1380 DstTyL.first * DstTyL.second.getVectorMinNumElements(); 1381 InstructionCost NumSrcEls = 1382 SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 1383 1384 // Return true if the legalized types have the same number of vector elements 1385 // and the destination element type size is twice that of the source type. 1386 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 1387 } 1388 1389 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1390 Type *Src, 1391 TTI::CastContextHint CCH, 1392 TTI::TargetCostKind CostKind, 1393 const Instruction *I) { 1394 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1395 assert(ISD && "Invalid opcode"); 1396 1397 // If the cast is observable, and it is used by a widening instruction (e.g., 1398 // uaddl, saddw, etc.), it may be free. 1399 if (I && I->hasOneUser()) { 1400 auto *SingleUser = cast<Instruction>(*I->user_begin()); 1401 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 1402 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 1403 // If the cast is the second operand, it is free. We will generate either 1404 // a "wide" or "long" version of the widening instruction. 1405 if (I == SingleUser->getOperand(1)) 1406 return 0; 1407 // If the cast is not the second operand, it will be free if it looks the 1408 // same as the second operand. In this case, we will generate a "long" 1409 // version of the widening instruction. 1410 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 1411 if (I->getOpcode() == unsigned(Cast->getOpcode()) && 1412 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 1413 return 0; 1414 } 1415 } 1416 1417 // TODO: Allow non-throughput costs that aren't binary. 1418 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1419 if (CostKind != TTI::TCK_RecipThroughput) 1420 return Cost == 0 ? 0 : 1; 1421 return Cost; 1422 }; 1423 1424 EVT SrcTy = TLI->getValueType(DL, Src); 1425 EVT DstTy = TLI->getValueType(DL, Dst); 1426 1427 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1428 return AdjustCost( 1429 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1430 1431 static const TypeConversionCostTblEntry 1432 ConversionTbl[] = { 1433 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1434 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 1435 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1436 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 1437 1438 // Truncations on nxvmiN 1439 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, 1440 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, 1441 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, 1442 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, 1443 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, 1444 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, 1445 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, 1446 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, 1447 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, 1448 { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 }, 1449 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, 1450 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, 1451 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, 1452 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, 1453 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, 1454 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, 1455 1456 // The number of shll instructions for the extension. 1457 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1458 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1459 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1460 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1461 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1462 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1463 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1464 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1465 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1466 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1467 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1468 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1469 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1470 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1471 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1472 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1473 1474 // LowerVectorINT_TO_FP: 1475 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1476 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1477 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1478 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1479 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1480 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1481 1482 // Complex: to v2f32 1483 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1484 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1485 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1486 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1487 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1488 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1489 1490 // Complex: to v4f32 1491 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 1492 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1493 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1494 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1495 1496 // Complex: to v8f32 1497 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1498 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1499 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1500 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1501 1502 // Complex: to v16f32 1503 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1504 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1505 1506 // Complex: to v2f64 1507 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1508 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1509 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1510 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1511 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1512 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1513 1514 1515 // LowerVectorFP_TO_INT 1516 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 1517 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 1518 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1519 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1520 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1521 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1522 1523 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 1524 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 1525 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 1526 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 1527 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 1528 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 1529 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 1530 1531 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 1532 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 1533 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 1534 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 1535 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 1536 1537 // Complex, from nxv2f32. 1538 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1539 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1540 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1541 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1542 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1543 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1544 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1545 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1546 1547 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 1548 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 1549 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 1550 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 1551 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 1552 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 1553 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 1554 1555 // Complex, from nxv2f64. 1556 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1557 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1558 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1559 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1560 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1561 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1562 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1563 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1564 1565 // Complex, from nxv4f32. 1566 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1567 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1568 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1569 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1570 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1571 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1572 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1573 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1574 1575 // Complex, from nxv8f64. Illegal -> illegal conversions not required. 1576 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1577 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1578 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1579 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1580 1581 // Complex, from nxv4f64. Illegal -> illegal conversions not required. 1582 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1583 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1584 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1585 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1586 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1587 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1588 1589 // Complex, from nxv8f32. Illegal -> illegal conversions not required. 1590 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1591 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1592 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1593 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1594 1595 // Complex, from nxv8f16. 1596 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1597 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1598 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1599 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1600 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1601 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1602 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1603 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1604 1605 // Complex, from nxv4f16. 1606 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1607 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1608 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1609 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1610 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1611 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1612 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1613 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1614 1615 // Complex, from nxv2f16. 1616 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1617 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1618 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1619 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1620 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1621 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1622 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1623 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1624 1625 // Truncate from nxvmf32 to nxvmf16. 1626 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, 1627 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, 1628 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, 1629 1630 // Truncate from nxvmf64 to nxvmf16. 1631 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, 1632 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, 1633 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, 1634 1635 // Truncate from nxvmf64 to nxvmf32. 1636 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, 1637 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, 1638 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, 1639 1640 // Extend from nxvmf16 to nxvmf32. 1641 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, 1642 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, 1643 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, 1644 1645 // Extend from nxvmf16 to nxvmf64. 1646 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, 1647 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, 1648 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, 1649 1650 // Extend from nxvmf32 to nxvmf64. 1651 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, 1652 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, 1653 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, 1654 1655 // Bitcasts from float to integer 1656 { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 }, 1657 { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 }, 1658 { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 }, 1659 1660 // Bitcasts from integer to float 1661 { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 }, 1662 { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 }, 1663 { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 }, 1664 }; 1665 1666 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 1667 DstTy.getSimpleVT(), 1668 SrcTy.getSimpleVT())) 1669 return AdjustCost(Entry->Cost); 1670 1671 static const TypeConversionCostTblEntry FP16Tbl[] = { 1672 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs 1673 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1}, 1674 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs 1675 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1}, 1676 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs 1677 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2}, 1678 {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn 1679 {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2}, 1680 {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs 1681 {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1}, 1682 {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs 1683 {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4}, 1684 {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn 1685 {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3}, 1686 {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs 1687 {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2}, 1688 {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs 1689 {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8}, 1690 {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf 1691 {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf 1692 {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf 1693 {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf 1694 }; 1695 1696 if (ST->hasFullFP16()) 1697 if (const auto *Entry = ConvertCostTableLookup( 1698 FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 1699 return AdjustCost(Entry->Cost); 1700 1701 return AdjustCost( 1702 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1703 } 1704 1705 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, 1706 Type *Dst, 1707 VectorType *VecTy, 1708 unsigned Index) { 1709 1710 // Make sure we were given a valid extend opcode. 1711 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 1712 "Invalid opcode"); 1713 1714 // We are extending an element we extract from a vector, so the source type 1715 // of the extend is the element type of the vector. 1716 auto *Src = VecTy->getElementType(); 1717 1718 // Sign- and zero-extends are for integer types only. 1719 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 1720 1721 // Get the cost for the extract. We compute the cost (if any) for the extend 1722 // below. 1723 InstructionCost Cost = 1724 getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 1725 1726 // Legalize the types. 1727 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 1728 auto DstVT = TLI->getValueType(DL, Dst); 1729 auto SrcVT = TLI->getValueType(DL, Src); 1730 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1731 1732 // If the resulting type is still a vector and the destination type is legal, 1733 // we may get the extension for free. If not, get the default cost for the 1734 // extend. 1735 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 1736 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1737 CostKind); 1738 1739 // The destination type should be larger than the element type. If not, get 1740 // the default cost for the extend. 1741 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 1742 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1743 CostKind); 1744 1745 switch (Opcode) { 1746 default: 1747 llvm_unreachable("Opcode should be either SExt or ZExt"); 1748 1749 // For sign-extends, we only need a smov, which performs the extension 1750 // automatically. 1751 case Instruction::SExt: 1752 return Cost; 1753 1754 // For zero-extends, the extend is performed automatically by a umov unless 1755 // the destination type is i64 and the element type is i8 or i16. 1756 case Instruction::ZExt: 1757 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 1758 return Cost; 1759 } 1760 1761 // If we are unable to perform the extend for free, get the default cost. 1762 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1763 CostKind); 1764 } 1765 1766 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 1767 TTI::TargetCostKind CostKind, 1768 const Instruction *I) { 1769 if (CostKind != TTI::TCK_RecipThroughput) 1770 return Opcode == Instruction::PHI ? 0 : 1; 1771 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 1772 // Branches are assumed to be predicted. 1773 return 0; 1774 } 1775 1776 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 1777 unsigned Index) { 1778 assert(Val->isVectorTy() && "This must be a vector type"); 1779 1780 if (Index != -1U) { 1781 // Legalize the type. 1782 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1783 1784 // This type is legalized to a scalar type. 1785 if (!LT.second.isVector()) 1786 return 0; 1787 1788 // The type may be split. For fixed-width vectors we can normalize the 1789 // index to the new type. 1790 if (LT.second.isFixedLengthVector()) { 1791 unsigned Width = LT.second.getVectorNumElements(); 1792 Index = Index % Width; 1793 } 1794 1795 // The element at index zero is already inside the vector. 1796 if (Index == 0) 1797 return 0; 1798 } 1799 1800 // All other insert/extracts cost this much. 1801 return ST->getVectorInsertExtractBaseCost(); 1802 } 1803 1804 InstructionCost AArch64TTIImpl::getArithmeticInstrCost( 1805 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 1806 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, 1807 TTI::OperandValueProperties Opd1PropInfo, 1808 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 1809 const Instruction *CxtI) { 1810 // TODO: Handle more cost kinds. 1811 if (CostKind != TTI::TCK_RecipThroughput) 1812 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1813 Opd2Info, Opd1PropInfo, 1814 Opd2PropInfo, Args, CxtI); 1815 1816 // Legalize the type. 1817 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1818 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1819 1820 switch (ISD) { 1821 default: 1822 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1823 Opd2Info, Opd1PropInfo, Opd2PropInfo); 1824 case ISD::SDIV: 1825 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 1826 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 1827 // On AArch64, scalar signed division by constants power-of-two are 1828 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 1829 // The OperandValue properties many not be same as that of previous 1830 // operation; conservatively assume OP_None. 1831 InstructionCost Cost = getArithmeticInstrCost( 1832 Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, 1833 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1834 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Opd1Info, 1835 Opd2Info, TargetTransformInfo::OP_None, 1836 TargetTransformInfo::OP_None); 1837 Cost += getArithmeticInstrCost( 1838 Instruction::Select, Ty, CostKind, Opd1Info, Opd2Info, 1839 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1840 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Opd1Info, 1841 Opd2Info, TargetTransformInfo::OP_None, 1842 TargetTransformInfo::OP_None); 1843 return Cost; 1844 } 1845 LLVM_FALLTHROUGH; 1846 case ISD::UDIV: { 1847 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { 1848 auto VT = TLI->getValueType(DL, Ty); 1849 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 1850 // Vector signed division by constant are expanded to the 1851 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 1852 // to MULHS + SUB + SRL + ADD + SRL. 1853 InstructionCost MulCost = getArithmeticInstrCost( 1854 Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info, 1855 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1856 InstructionCost AddCost = getArithmeticInstrCost( 1857 Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, 1858 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1859 InstructionCost ShrCost = getArithmeticInstrCost( 1860 Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, 1861 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1862 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 1863 } 1864 } 1865 1866 InstructionCost Cost = BaseT::getArithmeticInstrCost( 1867 Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); 1868 if (Ty->isVectorTy()) { 1869 // On AArch64, vector divisions are not supported natively and are 1870 // expanded into scalar divisions of each pair of elements. 1871 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, 1872 Opd1Info, Opd2Info, Opd1PropInfo, 1873 Opd2PropInfo); 1874 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 1875 Opd1Info, Opd2Info, Opd1PropInfo, 1876 Opd2PropInfo); 1877 // TODO: if one of the arguments is scalar, then it's not necessary to 1878 // double the cost of handling the vector elements. 1879 Cost += Cost; 1880 } 1881 return Cost; 1882 } 1883 case ISD::MUL: 1884 // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive 1885 // as elements are extracted from the vectors and the muls scalarized. 1886 // As getScalarizationOverhead is a bit too pessimistic, we estimate the 1887 // cost for a i64 vector directly here, which is: 1888 // - four 2-cost i64 extracts, 1889 // - two 2-cost i64 inserts, and 1890 // - two 1-cost muls. 1891 // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with 1892 // LT.first = 2 the cost is 28. If both operands are extensions it will not 1893 // need to scalarize so the cost can be cheaper (smull or umull). 1894 if (LT.second != MVT::v2i64 || isWideningInstruction(Ty, Opcode, Args)) 1895 return LT.first; 1896 return LT.first * 14; 1897 case ISD::ADD: 1898 case ISD::XOR: 1899 case ISD::OR: 1900 case ISD::AND: 1901 case ISD::SRL: 1902 case ISD::SRA: 1903 case ISD::SHL: 1904 // These nodes are marked as 'custom' for combining purposes only. 1905 // We know that they are legal. See LowerAdd in ISelLowering. 1906 return LT.first; 1907 1908 case ISD::FADD: 1909 case ISD::FSUB: 1910 case ISD::FMUL: 1911 case ISD::FDIV: 1912 case ISD::FNEG: 1913 // These nodes are marked as 'custom' just to lower them to SVE. 1914 // We know said lowering will incur no additional cost. 1915 if (!Ty->getScalarType()->isFP128Ty()) 1916 return 2 * LT.first; 1917 1918 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1919 Opd2Info, Opd1PropInfo, Opd2PropInfo); 1920 } 1921 } 1922 1923 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, 1924 ScalarEvolution *SE, 1925 const SCEV *Ptr) { 1926 // Address computations in vectorized code with non-consecutive addresses will 1927 // likely result in more instructions compared to scalar code where the 1928 // computation can more often be merged into the index mode. The resulting 1929 // extra micro-ops can significantly decrease throughput. 1930 unsigned NumVectorInstToHideOverhead = 10; 1931 int MaxMergeDistance = 64; 1932 1933 if (Ty->isVectorTy() && SE && 1934 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 1935 return NumVectorInstToHideOverhead; 1936 1937 // In many cases the address computation is not merged into the instruction 1938 // addressing mode. 1939 return 1; 1940 } 1941 1942 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 1943 Type *CondTy, 1944 CmpInst::Predicate VecPred, 1945 TTI::TargetCostKind CostKind, 1946 const Instruction *I) { 1947 // TODO: Handle other cost kinds. 1948 if (CostKind != TTI::TCK_RecipThroughput) 1949 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 1950 I); 1951 1952 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1953 // We don't lower some vector selects well that are wider than the register 1954 // width. 1955 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 1956 // We would need this many instructions to hide the scalarization happening. 1957 const int AmortizationCost = 20; 1958 1959 // If VecPred is not set, check if we can get a predicate from the context 1960 // instruction, if its type matches the requested ValTy. 1961 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 1962 CmpInst::Predicate CurrentPred; 1963 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 1964 m_Value()))) 1965 VecPred = CurrentPred; 1966 } 1967 // Check if we have a compare/select chain that can be lowered using 1968 // a (F)CMxx & BFI pair. 1969 if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE || 1970 VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT || 1971 VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ || 1972 VecPred == CmpInst::FCMP_UNE) { 1973 static const auto ValidMinMaxTys = { 1974 MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, 1975 MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64}; 1976 static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16}; 1977 1978 auto LT = TLI->getTypeLegalizationCost(DL, ValTy); 1979 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; }) || 1980 (ST->hasFullFP16() && 1981 any_of(ValidFP16MinMaxTys, [<](MVT M) { return M == LT.second; }))) 1982 return LT.first; 1983 } 1984 1985 static const TypeConversionCostTblEntry 1986 VectorSelectTbl[] = { 1987 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 1988 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 1989 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 1990 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 1991 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 1992 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 1993 }; 1994 1995 EVT SelCondTy = TLI->getValueType(DL, CondTy); 1996 EVT SelValTy = TLI->getValueType(DL, ValTy); 1997 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 1998 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 1999 SelCondTy.getSimpleVT(), 2000 SelValTy.getSimpleVT())) 2001 return Entry->Cost; 2002 } 2003 } 2004 // The base case handles scalable vectors fine for now, since it treats the 2005 // cost as 1 * legalization cost. 2006 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2007 } 2008 2009 AArch64TTIImpl::TTI::MemCmpExpansionOptions 2010 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 2011 TTI::MemCmpExpansionOptions Options; 2012 if (ST->requiresStrictAlign()) { 2013 // TODO: Add cost modeling for strict align. Misaligned loads expand to 2014 // a bunch of instructions when strict align is enabled. 2015 return Options; 2016 } 2017 Options.AllowOverlappingLoads = true; 2018 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 2019 Options.NumLoadsPerBlock = Options.MaxNumLoads; 2020 // TODO: Though vector loads usually perform well on AArch64, in some targets 2021 // they may wake up the FP unit, which raises the power consumption. Perhaps 2022 // they could be used with no holds barred (-O3). 2023 Options.LoadSizes = {8, 4, 2, 1}; 2024 return Options; 2025 } 2026 2027 InstructionCost 2028 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 2029 Align Alignment, unsigned AddressSpace, 2030 TTI::TargetCostKind CostKind) { 2031 if (useNeonVector(Src)) 2032 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 2033 CostKind); 2034 auto LT = TLI->getTypeLegalizationCost(DL, Src); 2035 if (!LT.first.isValid()) 2036 return InstructionCost::getInvalid(); 2037 2038 // The code-generator is currently not able to handle scalable vectors 2039 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 2040 // it. This change will be removed when code-generation for these types is 2041 // sufficiently reliable. 2042 if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1)) 2043 return InstructionCost::getInvalid(); 2044 2045 return LT.first * 2; 2046 } 2047 2048 static unsigned getSVEGatherScatterOverhead(unsigned Opcode) { 2049 return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead; 2050 } 2051 2052 InstructionCost AArch64TTIImpl::getGatherScatterOpCost( 2053 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 2054 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 2055 if (useNeonVector(DataTy)) 2056 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 2057 Alignment, CostKind, I); 2058 auto *VT = cast<VectorType>(DataTy); 2059 auto LT = TLI->getTypeLegalizationCost(DL, DataTy); 2060 if (!LT.first.isValid()) 2061 return InstructionCost::getInvalid(); 2062 2063 // The code-generator is currently not able to handle scalable vectors 2064 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 2065 // it. This change will be removed when code-generation for these types is 2066 // sufficiently reliable. 2067 if (cast<VectorType>(DataTy)->getElementCount() == 2068 ElementCount::getScalable(1)) 2069 return InstructionCost::getInvalid(); 2070 2071 ElementCount LegalVF = LT.second.getVectorElementCount(); 2072 InstructionCost MemOpCost = 2073 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); 2074 // Add on an overhead cost for using gathers/scatters. 2075 // TODO: At the moment this is applied unilaterally for all CPUs, but at some 2076 // point we may want a per-CPU overhead. 2077 MemOpCost *= getSVEGatherScatterOverhead(Opcode); 2078 return LT.first * MemOpCost * getMaxNumElements(LegalVF); 2079 } 2080 2081 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 2082 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 2083 } 2084 2085 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 2086 MaybeAlign Alignment, 2087 unsigned AddressSpace, 2088 TTI::TargetCostKind CostKind, 2089 const Instruction *I) { 2090 EVT VT = TLI->getValueType(DL, Ty, true); 2091 // Type legalization can't handle structs 2092 if (VT == MVT::Other) 2093 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 2094 CostKind); 2095 2096 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 2097 if (!LT.first.isValid()) 2098 return InstructionCost::getInvalid(); 2099 2100 // The code-generator is currently not able to handle scalable vectors 2101 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 2102 // it. This change will be removed when code-generation for these types is 2103 // sufficiently reliable. 2104 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty)) 2105 if (VTy->getElementCount() == ElementCount::getScalable(1)) 2106 return InstructionCost::getInvalid(); 2107 2108 // TODO: consider latency as well for TCK_SizeAndLatency. 2109 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 2110 return LT.first; 2111 2112 if (CostKind != TTI::TCK_RecipThroughput) 2113 return 1; 2114 2115 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 2116 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 2117 // Unaligned stores are extremely inefficient. We don't split all 2118 // unaligned 128-bit stores because the negative impact that has shown in 2119 // practice on inlined block copy code. 2120 // We make such stores expensive so that we will only vectorize if there 2121 // are 6 other instructions getting vectorized. 2122 const int AmortizationCost = 6; 2123 2124 return LT.first * 2 * AmortizationCost; 2125 } 2126 2127 // Check truncating stores and extending loads. 2128 if (useNeonVector(Ty) && 2129 Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) { 2130 // v4i8 types are lowered to scalar a load/store and sshll/xtn. 2131 if (VT == MVT::v4i8) 2132 return 2; 2133 // Otherwise we need to scalarize. 2134 return cast<FixedVectorType>(Ty)->getNumElements() * 2; 2135 } 2136 2137 return LT.first; 2138 } 2139 2140 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( 2141 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 2142 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 2143 bool UseMaskForCond, bool UseMaskForGaps) { 2144 assert(Factor >= 2 && "Invalid interleave factor"); 2145 auto *VecVTy = cast<FixedVectorType>(VecTy); 2146 2147 if (!UseMaskForCond && !UseMaskForGaps && 2148 Factor <= TLI->getMaxSupportedInterleaveFactor()) { 2149 unsigned NumElts = VecVTy->getNumElements(); 2150 auto *SubVecTy = 2151 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 2152 2153 // ldN/stN only support legal vector types of size 64 or 128 in bits. 2154 // Accesses having vector types that are a multiple of 128 bits can be 2155 // matched to more than one ldN/stN instruction. 2156 bool UseScalable; 2157 if (NumElts % Factor == 0 && 2158 TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable)) 2159 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable); 2160 } 2161 2162 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2163 Alignment, AddressSpace, CostKind, 2164 UseMaskForCond, UseMaskForGaps); 2165 } 2166 2167 InstructionCost 2168 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 2169 InstructionCost Cost = 0; 2170 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2171 for (auto *I : Tys) { 2172 if (!I->isVectorTy()) 2173 continue; 2174 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 2175 128) 2176 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 2177 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 2178 } 2179 return Cost; 2180 } 2181 2182 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 2183 return ST->getMaxInterleaveFactor(); 2184 } 2185 2186 // For Falkor, we want to avoid having too many strided loads in a loop since 2187 // that can exhaust the HW prefetcher resources. We adjust the unroller 2188 // MaxCount preference below to attempt to ensure unrolling doesn't create too 2189 // many strided loads. 2190 static void 2191 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2192 TargetTransformInfo::UnrollingPreferences &UP) { 2193 enum { MaxStridedLoads = 7 }; 2194 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 2195 int StridedLoads = 0; 2196 // FIXME? We could make this more precise by looking at the CFG and 2197 // e.g. not counting loads in each side of an if-then-else diamond. 2198 for (const auto BB : L->blocks()) { 2199 for (auto &I : *BB) { 2200 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 2201 if (!LMemI) 2202 continue; 2203 2204 Value *PtrValue = LMemI->getPointerOperand(); 2205 if (L->isLoopInvariant(PtrValue)) 2206 continue; 2207 2208 const SCEV *LSCEV = SE.getSCEV(PtrValue); 2209 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 2210 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 2211 continue; 2212 2213 // FIXME? We could take pairing of unrolled load copies into account 2214 // by looking at the AddRec, but we would probably have to limit this 2215 // to loops with no stores or other memory optimization barriers. 2216 ++StridedLoads; 2217 // We've seen enough strided loads that seeing more won't make a 2218 // difference. 2219 if (StridedLoads > MaxStridedLoads / 2) 2220 return StridedLoads; 2221 } 2222 } 2223 return StridedLoads; 2224 }; 2225 2226 int StridedLoads = countStridedLoads(L, SE); 2227 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 2228 << " strided loads\n"); 2229 // Pick the largest power of 2 unroll count that won't result in too many 2230 // strided loads. 2231 if (StridedLoads) { 2232 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 2233 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 2234 << UP.MaxCount << '\n'); 2235 } 2236 } 2237 2238 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2239 TTI::UnrollingPreferences &UP, 2240 OptimizationRemarkEmitter *ORE) { 2241 // Enable partial unrolling and runtime unrolling. 2242 BaseT::getUnrollingPreferences(L, SE, UP, ORE); 2243 2244 UP.UpperBound = true; 2245 2246 // For inner loop, it is more likely to be a hot one, and the runtime check 2247 // can be promoted out from LICM pass, so the overhead is less, let's try 2248 // a larger threshold to unroll more loops. 2249 if (L->getLoopDepth() > 1) 2250 UP.PartialThreshold *= 2; 2251 2252 // Disable partial & runtime unrolling on -Os. 2253 UP.PartialOptSizeThreshold = 0; 2254 2255 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 2256 EnableFalkorHWPFUnrollFix) 2257 getFalkorUnrollingPreferences(L, SE, UP); 2258 2259 // Scan the loop: don't unroll loops with calls as this could prevent 2260 // inlining. Don't unroll vector loops either, as they don't benefit much from 2261 // unrolling. 2262 for (auto *BB : L->getBlocks()) { 2263 for (auto &I : *BB) { 2264 // Don't unroll vectorised loop. 2265 if (I.getType()->isVectorTy()) 2266 return; 2267 2268 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 2269 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 2270 if (!isLoweredToCall(F)) 2271 continue; 2272 } 2273 return; 2274 } 2275 } 2276 } 2277 2278 // Enable runtime unrolling for in-order models 2279 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by 2280 // checking for that case, we can ensure that the default behaviour is 2281 // unchanged 2282 if (ST->getProcFamily() != AArch64Subtarget::Others && 2283 !ST->getSchedModel().isOutOfOrder()) { 2284 UP.Runtime = true; 2285 UP.Partial = true; 2286 UP.UnrollRemainder = true; 2287 UP.DefaultUnrollRuntimeCount = 4; 2288 2289 UP.UnrollAndJam = true; 2290 UP.UnrollAndJamInnerLoopThreshold = 60; 2291 } 2292 } 2293 2294 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2295 TTI::PeelingPreferences &PP) { 2296 BaseT::getPeelingPreferences(L, SE, PP); 2297 } 2298 2299 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 2300 Type *ExpectedType) { 2301 switch (Inst->getIntrinsicID()) { 2302 default: 2303 return nullptr; 2304 case Intrinsic::aarch64_neon_st2: 2305 case Intrinsic::aarch64_neon_st3: 2306 case Intrinsic::aarch64_neon_st4: { 2307 // Create a struct type 2308 StructType *ST = dyn_cast<StructType>(ExpectedType); 2309 if (!ST) 2310 return nullptr; 2311 unsigned NumElts = Inst->arg_size() - 1; 2312 if (ST->getNumElements() != NumElts) 2313 return nullptr; 2314 for (unsigned i = 0, e = NumElts; i != e; ++i) { 2315 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 2316 return nullptr; 2317 } 2318 Value *Res = UndefValue::get(ExpectedType); 2319 IRBuilder<> Builder(Inst); 2320 for (unsigned i = 0, e = NumElts; i != e; ++i) { 2321 Value *L = Inst->getArgOperand(i); 2322 Res = Builder.CreateInsertValue(Res, L, i); 2323 } 2324 return Res; 2325 } 2326 case Intrinsic::aarch64_neon_ld2: 2327 case Intrinsic::aarch64_neon_ld3: 2328 case Intrinsic::aarch64_neon_ld4: 2329 if (Inst->getType() == ExpectedType) 2330 return Inst; 2331 return nullptr; 2332 } 2333 } 2334 2335 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 2336 MemIntrinsicInfo &Info) { 2337 switch (Inst->getIntrinsicID()) { 2338 default: 2339 break; 2340 case Intrinsic::aarch64_neon_ld2: 2341 case Intrinsic::aarch64_neon_ld3: 2342 case Intrinsic::aarch64_neon_ld4: 2343 Info.ReadMem = true; 2344 Info.WriteMem = false; 2345 Info.PtrVal = Inst->getArgOperand(0); 2346 break; 2347 case Intrinsic::aarch64_neon_st2: 2348 case Intrinsic::aarch64_neon_st3: 2349 case Intrinsic::aarch64_neon_st4: 2350 Info.ReadMem = false; 2351 Info.WriteMem = true; 2352 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1); 2353 break; 2354 } 2355 2356 switch (Inst->getIntrinsicID()) { 2357 default: 2358 return false; 2359 case Intrinsic::aarch64_neon_ld2: 2360 case Intrinsic::aarch64_neon_st2: 2361 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 2362 break; 2363 case Intrinsic::aarch64_neon_ld3: 2364 case Intrinsic::aarch64_neon_st3: 2365 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 2366 break; 2367 case Intrinsic::aarch64_neon_ld4: 2368 case Intrinsic::aarch64_neon_st4: 2369 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 2370 break; 2371 } 2372 return true; 2373 } 2374 2375 /// See if \p I should be considered for address type promotion. We check if \p 2376 /// I is a sext with right type and used in memory accesses. If it used in a 2377 /// "complex" getelementptr, we allow it to be promoted without finding other 2378 /// sext instructions that sign extended the same initial value. A getelementptr 2379 /// is considered as "complex" if it has more than 2 operands. 2380 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 2381 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 2382 bool Considerable = false; 2383 AllowPromotionWithoutCommonHeader = false; 2384 if (!isa<SExtInst>(&I)) 2385 return false; 2386 Type *ConsideredSExtType = 2387 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 2388 if (I.getType() != ConsideredSExtType) 2389 return false; 2390 // See if the sext is the one with the right type and used in at least one 2391 // GetElementPtrInst. 2392 for (const User *U : I.users()) { 2393 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 2394 Considerable = true; 2395 // A getelementptr is considered as "complex" if it has more than 2 2396 // operands. We will promote a SExt used in such complex GEP as we 2397 // expect some computation to be merged if they are done on 64 bits. 2398 if (GEPInst->getNumOperands() > 2) { 2399 AllowPromotionWithoutCommonHeader = true; 2400 break; 2401 } 2402 } 2403 } 2404 return Considerable; 2405 } 2406 2407 bool AArch64TTIImpl::isLegalToVectorizeReduction( 2408 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 2409 if (!VF.isScalable()) 2410 return true; 2411 2412 Type *Ty = RdxDesc.getRecurrenceType(); 2413 if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty)) 2414 return false; 2415 2416 switch (RdxDesc.getRecurrenceKind()) { 2417 case RecurKind::Add: 2418 case RecurKind::FAdd: 2419 case RecurKind::And: 2420 case RecurKind::Or: 2421 case RecurKind::Xor: 2422 case RecurKind::SMin: 2423 case RecurKind::SMax: 2424 case RecurKind::UMin: 2425 case RecurKind::UMax: 2426 case RecurKind::FMin: 2427 case RecurKind::FMax: 2428 case RecurKind::SelectICmp: 2429 case RecurKind::SelectFCmp: 2430 case RecurKind::FMulAdd: 2431 return true; 2432 default: 2433 return false; 2434 } 2435 } 2436 2437 InstructionCost 2438 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 2439 bool IsUnsigned, 2440 TTI::TargetCostKind CostKind) { 2441 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 2442 2443 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16()) 2444 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 2445 2446 assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) && 2447 "Both vector needs to be equally scalable"); 2448 2449 InstructionCost LegalizationCost = 0; 2450 if (LT.first > 1) { 2451 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 2452 unsigned MinMaxOpcode = 2453 Ty->isFPOrFPVectorTy() 2454 ? Intrinsic::maxnum 2455 : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin); 2456 IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy}); 2457 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1); 2458 } 2459 2460 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 2461 } 2462 2463 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( 2464 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { 2465 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2466 InstructionCost LegalizationCost = 0; 2467 if (LT.first > 1) { 2468 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 2469 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 2470 LegalizationCost *= LT.first - 1; 2471 } 2472 2473 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2474 assert(ISD && "Invalid opcode"); 2475 // Add the final reduction cost for the legal horizontal reduction 2476 switch (ISD) { 2477 case ISD::ADD: 2478 case ISD::AND: 2479 case ISD::OR: 2480 case ISD::XOR: 2481 case ISD::FADD: 2482 return LegalizationCost + 2; 2483 default: 2484 return InstructionCost::getInvalid(); 2485 } 2486 } 2487 2488 InstructionCost 2489 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 2490 Optional<FastMathFlags> FMF, 2491 TTI::TargetCostKind CostKind) { 2492 if (TTI::requiresOrderedReduction(FMF)) { 2493 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) { 2494 InstructionCost BaseCost = 2495 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2496 // Add on extra cost to reflect the extra overhead on some CPUs. We still 2497 // end up vectorizing for more computationally intensive loops. 2498 return BaseCost + FixedVTy->getNumElements(); 2499 } 2500 2501 if (Opcode != Instruction::FAdd) 2502 return InstructionCost::getInvalid(); 2503 2504 auto *VTy = cast<ScalableVectorType>(ValTy); 2505 InstructionCost Cost = 2506 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind); 2507 Cost *= getMaxNumElements(VTy->getElementCount()); 2508 return Cost; 2509 } 2510 2511 if (isa<ScalableVectorType>(ValTy)) 2512 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind); 2513 2514 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2515 MVT MTy = LT.second; 2516 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2517 assert(ISD && "Invalid opcode"); 2518 2519 // Horizontal adds can use the 'addv' instruction. We model the cost of these 2520 // instructions as twice a normal vector add, plus 1 for each legalization 2521 // step (LT.first). This is the only arithmetic vector reduction operation for 2522 // which we have an instruction. 2523 // OR, XOR and AND costs should match the codegen from: 2524 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll 2525 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll 2526 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll 2527 static const CostTblEntry CostTblNoPairwise[]{ 2528 {ISD::ADD, MVT::v8i8, 2}, 2529 {ISD::ADD, MVT::v16i8, 2}, 2530 {ISD::ADD, MVT::v4i16, 2}, 2531 {ISD::ADD, MVT::v8i16, 2}, 2532 {ISD::ADD, MVT::v4i32, 2}, 2533 {ISD::OR, MVT::v8i8, 15}, 2534 {ISD::OR, MVT::v16i8, 17}, 2535 {ISD::OR, MVT::v4i16, 7}, 2536 {ISD::OR, MVT::v8i16, 9}, 2537 {ISD::OR, MVT::v2i32, 3}, 2538 {ISD::OR, MVT::v4i32, 5}, 2539 {ISD::OR, MVT::v2i64, 3}, 2540 {ISD::XOR, MVT::v8i8, 15}, 2541 {ISD::XOR, MVT::v16i8, 17}, 2542 {ISD::XOR, MVT::v4i16, 7}, 2543 {ISD::XOR, MVT::v8i16, 9}, 2544 {ISD::XOR, MVT::v2i32, 3}, 2545 {ISD::XOR, MVT::v4i32, 5}, 2546 {ISD::XOR, MVT::v2i64, 3}, 2547 {ISD::AND, MVT::v8i8, 15}, 2548 {ISD::AND, MVT::v16i8, 17}, 2549 {ISD::AND, MVT::v4i16, 7}, 2550 {ISD::AND, MVT::v8i16, 9}, 2551 {ISD::AND, MVT::v2i32, 3}, 2552 {ISD::AND, MVT::v4i32, 5}, 2553 {ISD::AND, MVT::v2i64, 3}, 2554 }; 2555 switch (ISD) { 2556 default: 2557 break; 2558 case ISD::ADD: 2559 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 2560 return (LT.first - 1) + Entry->Cost; 2561 break; 2562 case ISD::XOR: 2563 case ISD::AND: 2564 case ISD::OR: 2565 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy); 2566 if (!Entry) 2567 break; 2568 auto *ValVTy = cast<FixedVectorType>(ValTy); 2569 if (!ValVTy->getElementType()->isIntegerTy(1) && 2570 MTy.getVectorNumElements() <= ValVTy->getNumElements() && 2571 isPowerOf2_32(ValVTy->getNumElements())) { 2572 InstructionCost ExtraCost = 0; 2573 if (LT.first != 1) { 2574 // Type needs to be split, so there is an extra cost of LT.first - 1 2575 // arithmetic ops. 2576 auto *Ty = FixedVectorType::get(ValTy->getElementType(), 2577 MTy.getVectorNumElements()); 2578 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 2579 ExtraCost *= LT.first - 1; 2580 } 2581 return Entry->Cost + ExtraCost; 2582 } 2583 break; 2584 } 2585 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2586 } 2587 2588 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) { 2589 static const CostTblEntry ShuffleTbl[] = { 2590 { TTI::SK_Splice, MVT::nxv16i8, 1 }, 2591 { TTI::SK_Splice, MVT::nxv8i16, 1 }, 2592 { TTI::SK_Splice, MVT::nxv4i32, 1 }, 2593 { TTI::SK_Splice, MVT::nxv2i64, 1 }, 2594 { TTI::SK_Splice, MVT::nxv2f16, 1 }, 2595 { TTI::SK_Splice, MVT::nxv4f16, 1 }, 2596 { TTI::SK_Splice, MVT::nxv8f16, 1 }, 2597 { TTI::SK_Splice, MVT::nxv2bf16, 1 }, 2598 { TTI::SK_Splice, MVT::nxv4bf16, 1 }, 2599 { TTI::SK_Splice, MVT::nxv8bf16, 1 }, 2600 { TTI::SK_Splice, MVT::nxv2f32, 1 }, 2601 { TTI::SK_Splice, MVT::nxv4f32, 1 }, 2602 { TTI::SK_Splice, MVT::nxv2f64, 1 }, 2603 }; 2604 2605 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2606 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext()); 2607 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2608 EVT PromotedVT = LT.second.getScalarType() == MVT::i1 2609 ? TLI->getPromotedVTForPredicate(EVT(LT.second)) 2610 : LT.second; 2611 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext()); 2612 InstructionCost LegalizationCost = 0; 2613 if (Index < 0) { 2614 LegalizationCost = 2615 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy, 2616 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 2617 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy, 2618 CmpInst::BAD_ICMP_PREDICATE, CostKind); 2619 } 2620 2621 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp 2622 // Cost performed on a promoted type. 2623 if (LT.second.getScalarType() == MVT::i1) { 2624 LegalizationCost += 2625 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy, 2626 TTI::CastContextHint::None, CostKind) + 2627 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy, 2628 TTI::CastContextHint::None, CostKind); 2629 } 2630 const auto *Entry = 2631 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT()); 2632 assert(Entry && "Illegal Type for Splice"); 2633 LegalizationCost += Entry->Cost; 2634 return LegalizationCost * LT.first; 2635 } 2636 2637 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 2638 VectorType *Tp, 2639 ArrayRef<int> Mask, int Index, 2640 VectorType *SubTp, 2641 ArrayRef<const Value *> Args) { 2642 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2643 // If we have a Mask, and the LT is being legalized somehow, split the Mask 2644 // into smaller vectors and sum the cost of each shuffle. 2645 if (!Mask.empty() && isa<FixedVectorType>(Tp) && LT.second.isVector() && 2646 Tp->getScalarSizeInBits() == LT.second.getScalarSizeInBits() && 2647 cast<FixedVectorType>(Tp)->getNumElements() > 2648 LT.second.getVectorNumElements() && 2649 !Index && !SubTp) { 2650 unsigned TpNumElts = cast<FixedVectorType>(Tp)->getNumElements(); 2651 assert(Mask.size() == TpNumElts && "Expected Mask and Tp size to match!"); 2652 unsigned LTNumElts = LT.second.getVectorNumElements(); 2653 unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts; 2654 VectorType *NTp = 2655 VectorType::get(Tp->getScalarType(), LT.second.getVectorElementCount()); 2656 InstructionCost Cost; 2657 for (unsigned N = 0; N < NumVecs; N++) { 2658 SmallVector<int> NMask; 2659 // Split the existing mask into chunks of size LTNumElts. Track the source 2660 // sub-vectors to ensure the result has at most 2 inputs. 2661 unsigned Source1, Source2; 2662 unsigned NumSources = 0; 2663 for (unsigned E = 0; E < LTNumElts; E++) { 2664 int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E] 2665 : UndefMaskElem; 2666 if (MaskElt < 0) { 2667 NMask.push_back(UndefMaskElem); 2668 continue; 2669 } 2670 2671 // Calculate which source from the input this comes from and whether it 2672 // is new to us. 2673 unsigned Source = MaskElt / LTNumElts; 2674 if (NumSources == 0) { 2675 Source1 = Source; 2676 NumSources = 1; 2677 } else if (NumSources == 1 && Source != Source1) { 2678 Source2 = Source; 2679 NumSources = 2; 2680 } else if (NumSources >= 2 && Source != Source1 && Source != Source2) { 2681 NumSources++; 2682 } 2683 2684 // Add to the new mask. For the NumSources>2 case these are not correct, 2685 // but are only used for the modular lane number. 2686 if (Source == Source1) 2687 NMask.push_back(MaskElt % LTNumElts); 2688 else if (Source == Source2) 2689 NMask.push_back(MaskElt % LTNumElts + LTNumElts); 2690 else 2691 NMask.push_back(MaskElt % LTNumElts); 2692 } 2693 // If the sub-mask has at most 2 input sub-vectors then re-cost it using 2694 // getShuffleCost. If not then cost it using the worst case. 2695 if (NumSources <= 2) 2696 Cost += getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc 2697 : TTI::SK_PermuteTwoSrc, 2698 NTp, NMask, 0, nullptr, Args); 2699 else if (any_of(enumerate(NMask), [&](const auto &ME) { 2700 return ME.value() % LTNumElts == ME.index(); 2701 })) 2702 Cost += LTNumElts - 1; 2703 else 2704 Cost += LTNumElts; 2705 } 2706 return Cost; 2707 } 2708 2709 Kind = improveShuffleKindFromMask(Kind, Mask); 2710 2711 // Check for broadcast loads. 2712 if (Kind == TTI::SK_Broadcast) { 2713 bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]); 2714 if (IsLoad && LT.second.isVector() && 2715 isLegalBroadcastLoad(Tp->getElementType(), 2716 LT.second.getVectorElementCount())) 2717 return 0; // broadcast is handled by ld1r 2718 } 2719 2720 // If we have 4 elements for the shuffle and a Mask, get the cost straight 2721 // from the perfect shuffle tables. 2722 if (Mask.size() == 4 && Tp->getElementCount() == ElementCount::getFixed(4) && 2723 (Tp->getScalarSizeInBits() == 16 || Tp->getScalarSizeInBits() == 32) && 2724 all_of(Mask, [](int E) { return E < 8; })) 2725 return getPerfectShuffleCost(Mask); 2726 2727 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 2728 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || 2729 Kind == TTI::SK_Reverse) { 2730 2731 static const CostTblEntry ShuffleTbl[] = { 2732 // Broadcast shuffle kinds can be performed with 'dup'. 2733 { TTI::SK_Broadcast, MVT::v8i8, 1 }, 2734 { TTI::SK_Broadcast, MVT::v16i8, 1 }, 2735 { TTI::SK_Broadcast, MVT::v4i16, 1 }, 2736 { TTI::SK_Broadcast, MVT::v8i16, 1 }, 2737 { TTI::SK_Broadcast, MVT::v2i32, 1 }, 2738 { TTI::SK_Broadcast, MVT::v4i32, 1 }, 2739 { TTI::SK_Broadcast, MVT::v2i64, 1 }, 2740 { TTI::SK_Broadcast, MVT::v2f32, 1 }, 2741 { TTI::SK_Broadcast, MVT::v4f32, 1 }, 2742 { TTI::SK_Broadcast, MVT::v2f64, 1 }, 2743 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 2744 // 'zip1/zip2' instructions. 2745 { TTI::SK_Transpose, MVT::v8i8, 1 }, 2746 { TTI::SK_Transpose, MVT::v16i8, 1 }, 2747 { TTI::SK_Transpose, MVT::v4i16, 1 }, 2748 { TTI::SK_Transpose, MVT::v8i16, 1 }, 2749 { TTI::SK_Transpose, MVT::v2i32, 1 }, 2750 { TTI::SK_Transpose, MVT::v4i32, 1 }, 2751 { TTI::SK_Transpose, MVT::v2i64, 1 }, 2752 { TTI::SK_Transpose, MVT::v2f32, 1 }, 2753 { TTI::SK_Transpose, MVT::v4f32, 1 }, 2754 { TTI::SK_Transpose, MVT::v2f64, 1 }, 2755 // Select shuffle kinds. 2756 // TODO: handle vXi8/vXi16. 2757 { TTI::SK_Select, MVT::v2i32, 1 }, // mov. 2758 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). 2759 { TTI::SK_Select, MVT::v2i64, 1 }, // mov. 2760 { TTI::SK_Select, MVT::v2f32, 1 }, // mov. 2761 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). 2762 { TTI::SK_Select, MVT::v2f64, 1 }, // mov. 2763 // PermuteSingleSrc shuffle kinds. 2764 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. 2765 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. 2766 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. 2767 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. 2768 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. 2769 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. 2770 { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case. 2771 { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case. 2772 { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case. 2773 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl 2774 { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl 2775 { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl 2776 { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl 2777 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl 2778 // Reverse can be lowered with `rev`. 2779 { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov. 2780 { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT 2781 { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov. 2782 { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov. 2783 { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT 2784 { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov. 2785 // Broadcast shuffle kinds for scalable vectors 2786 { TTI::SK_Broadcast, MVT::nxv16i8, 1 }, 2787 { TTI::SK_Broadcast, MVT::nxv8i16, 1 }, 2788 { TTI::SK_Broadcast, MVT::nxv4i32, 1 }, 2789 { TTI::SK_Broadcast, MVT::nxv2i64, 1 }, 2790 { TTI::SK_Broadcast, MVT::nxv2f16, 1 }, 2791 { TTI::SK_Broadcast, MVT::nxv4f16, 1 }, 2792 { TTI::SK_Broadcast, MVT::nxv8f16, 1 }, 2793 { TTI::SK_Broadcast, MVT::nxv2bf16, 1 }, 2794 { TTI::SK_Broadcast, MVT::nxv4bf16, 1 }, 2795 { TTI::SK_Broadcast, MVT::nxv8bf16, 1 }, 2796 { TTI::SK_Broadcast, MVT::nxv2f32, 1 }, 2797 { TTI::SK_Broadcast, MVT::nxv4f32, 1 }, 2798 { TTI::SK_Broadcast, MVT::nxv2f64, 1 }, 2799 { TTI::SK_Broadcast, MVT::nxv16i1, 1 }, 2800 { TTI::SK_Broadcast, MVT::nxv8i1, 1 }, 2801 { TTI::SK_Broadcast, MVT::nxv4i1, 1 }, 2802 { TTI::SK_Broadcast, MVT::nxv2i1, 1 }, 2803 // Handle the cases for vector.reverse with scalable vectors 2804 { TTI::SK_Reverse, MVT::nxv16i8, 1 }, 2805 { TTI::SK_Reverse, MVT::nxv8i16, 1 }, 2806 { TTI::SK_Reverse, MVT::nxv4i32, 1 }, 2807 { TTI::SK_Reverse, MVT::nxv2i64, 1 }, 2808 { TTI::SK_Reverse, MVT::nxv2f16, 1 }, 2809 { TTI::SK_Reverse, MVT::nxv4f16, 1 }, 2810 { TTI::SK_Reverse, MVT::nxv8f16, 1 }, 2811 { TTI::SK_Reverse, MVT::nxv2bf16, 1 }, 2812 { TTI::SK_Reverse, MVT::nxv4bf16, 1 }, 2813 { TTI::SK_Reverse, MVT::nxv8bf16, 1 }, 2814 { TTI::SK_Reverse, MVT::nxv2f32, 1 }, 2815 { TTI::SK_Reverse, MVT::nxv4f32, 1 }, 2816 { TTI::SK_Reverse, MVT::nxv2f64, 1 }, 2817 { TTI::SK_Reverse, MVT::nxv16i1, 1 }, 2818 { TTI::SK_Reverse, MVT::nxv8i1, 1 }, 2819 { TTI::SK_Reverse, MVT::nxv4i1, 1 }, 2820 { TTI::SK_Reverse, MVT::nxv2i1, 1 }, 2821 }; 2822 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 2823 return LT.first * Entry->Cost; 2824 } 2825 2826 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp)) 2827 return getSpliceCost(Tp, Index); 2828 2829 // Inserting a subvector can often be done with either a D, S or H register 2830 // move, so long as the inserted vector is "aligned". 2831 if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() && 2832 LT.second.getSizeInBits() <= 128 && SubTp) { 2833 std::pair<InstructionCost, MVT> SubLT = 2834 TLI->getTypeLegalizationCost(DL, SubTp); 2835 if (SubLT.second.isVector()) { 2836 int NumElts = LT.second.getVectorNumElements(); 2837 int NumSubElts = SubLT.second.getVectorNumElements(); 2838 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 2839 return SubLT.first; 2840 } 2841 } 2842 2843 return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 2844 } 2845