1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AArch64TargetTransformInfo.h" 10 #include "AArch64ExpandImm.h" 11 #include "MCTargetDesc/AArch64AddressingModes.h" 12 #include "llvm/Analysis/IVDescriptors.h" 13 #include "llvm/Analysis/LoopInfo.h" 14 #include "llvm/Analysis/TargetTransformInfo.h" 15 #include "llvm/CodeGen/BasicTTIImpl.h" 16 #include "llvm/CodeGen/CostTable.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/IR/Intrinsics.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/IR/IntrinsicsAArch64.h" 21 #include "llvm/IR/PatternMatch.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Transforms/InstCombine/InstCombiner.h" 24 #include <algorithm> 25 using namespace llvm; 26 using namespace llvm::PatternMatch; 27 28 #define DEBUG_TYPE "aarch64tti" 29 30 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", 31 cl::init(true), cl::Hidden); 32 33 static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10), 34 cl::Hidden); 35 36 static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead", 37 cl::init(10), cl::Hidden); 38 39 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, 40 const Function *Callee) const { 41 const TargetMachine &TM = getTLI()->getTargetMachine(); 42 43 const FeatureBitset &CallerBits = 44 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 45 const FeatureBitset &CalleeBits = 46 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 47 48 // Inline a callee if its target-features are a subset of the callers 49 // target-features. 50 return (CallerBits & CalleeBits) == CalleeBits; 51 } 52 53 /// Calculate the cost of materializing a 64-bit value. This helper 54 /// method might only calculate a fraction of a larger immediate. Therefore it 55 /// is valid to return a cost of ZERO. 56 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { 57 // Check if the immediate can be encoded within an instruction. 58 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) 59 return 0; 60 61 if (Val < 0) 62 Val = ~Val; 63 64 // Calculate how many moves we will need to materialize this constant. 65 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn; 66 AArch64_IMM::expandMOVImm(Val, 64, Insn); 67 return Insn.size(); 68 } 69 70 /// Calculate the cost of materializing the given constant. 71 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 72 TTI::TargetCostKind CostKind) { 73 assert(Ty->isIntegerTy()); 74 75 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 76 if (BitSize == 0) 77 return ~0U; 78 79 // Sign-extend all constants to a multiple of 64-bit. 80 APInt ImmVal = Imm; 81 if (BitSize & 0x3f) 82 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 83 84 // Split the constant into 64-bit chunks and calculate the cost for each 85 // chunk. 86 InstructionCost Cost = 0; 87 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 88 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 89 int64_t Val = Tmp.getSExtValue(); 90 Cost += getIntImmCost(Val); 91 } 92 // We need at least one instruction to materialze the constant. 93 return std::max<InstructionCost>(1, Cost); 94 } 95 96 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 97 const APInt &Imm, Type *Ty, 98 TTI::TargetCostKind CostKind, 99 Instruction *Inst) { 100 assert(Ty->isIntegerTy()); 101 102 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 103 // There is no cost model for constants with a bit size of 0. Return TCC_Free 104 // here, so that constant hoisting will ignore this constant. 105 if (BitSize == 0) 106 return TTI::TCC_Free; 107 108 unsigned ImmIdx = ~0U; 109 switch (Opcode) { 110 default: 111 return TTI::TCC_Free; 112 case Instruction::GetElementPtr: 113 // Always hoist the base address of a GetElementPtr. 114 if (Idx == 0) 115 return 2 * TTI::TCC_Basic; 116 return TTI::TCC_Free; 117 case Instruction::Store: 118 ImmIdx = 0; 119 break; 120 case Instruction::Add: 121 case Instruction::Sub: 122 case Instruction::Mul: 123 case Instruction::UDiv: 124 case Instruction::SDiv: 125 case Instruction::URem: 126 case Instruction::SRem: 127 case Instruction::And: 128 case Instruction::Or: 129 case Instruction::Xor: 130 case Instruction::ICmp: 131 ImmIdx = 1; 132 break; 133 // Always return TCC_Free for the shift value of a shift instruction. 134 case Instruction::Shl: 135 case Instruction::LShr: 136 case Instruction::AShr: 137 if (Idx == 1) 138 return TTI::TCC_Free; 139 break; 140 case Instruction::Trunc: 141 case Instruction::ZExt: 142 case Instruction::SExt: 143 case Instruction::IntToPtr: 144 case Instruction::PtrToInt: 145 case Instruction::BitCast: 146 case Instruction::PHI: 147 case Instruction::Call: 148 case Instruction::Select: 149 case Instruction::Ret: 150 case Instruction::Load: 151 break; 152 } 153 154 if (Idx == ImmIdx) { 155 int NumConstants = (BitSize + 63) / 64; 156 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 157 return (Cost <= NumConstants * TTI::TCC_Basic) 158 ? static_cast<int>(TTI::TCC_Free) 159 : Cost; 160 } 161 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 162 } 163 164 InstructionCost 165 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 166 const APInt &Imm, Type *Ty, 167 TTI::TargetCostKind CostKind) { 168 assert(Ty->isIntegerTy()); 169 170 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 171 // There is no cost model for constants with a bit size of 0. Return TCC_Free 172 // here, so that constant hoisting will ignore this constant. 173 if (BitSize == 0) 174 return TTI::TCC_Free; 175 176 // Most (all?) AArch64 intrinsics do not support folding immediates into the 177 // selected instruction, so we compute the materialization cost for the 178 // immediate directly. 179 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) 180 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 181 182 switch (IID) { 183 default: 184 return TTI::TCC_Free; 185 case Intrinsic::sadd_with_overflow: 186 case Intrinsic::uadd_with_overflow: 187 case Intrinsic::ssub_with_overflow: 188 case Intrinsic::usub_with_overflow: 189 case Intrinsic::smul_with_overflow: 190 case Intrinsic::umul_with_overflow: 191 if (Idx == 1) { 192 int NumConstants = (BitSize + 63) / 64; 193 InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 194 return (Cost <= NumConstants * TTI::TCC_Basic) 195 ? static_cast<int>(TTI::TCC_Free) 196 : Cost; 197 } 198 break; 199 case Intrinsic::experimental_stackmap: 200 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 201 return TTI::TCC_Free; 202 break; 203 case Intrinsic::experimental_patchpoint_void: 204 case Intrinsic::experimental_patchpoint_i64: 205 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 206 return TTI::TCC_Free; 207 break; 208 case Intrinsic::experimental_gc_statepoint: 209 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 210 return TTI::TCC_Free; 211 break; 212 } 213 return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); 214 } 215 216 TargetTransformInfo::PopcntSupportKind 217 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { 218 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 219 if (TyWidth == 32 || TyWidth == 64) 220 return TTI::PSK_FastHardware; 221 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. 222 return TTI::PSK_Software; 223 } 224 225 InstructionCost 226 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 227 TTI::TargetCostKind CostKind) { 228 auto *RetTy = ICA.getReturnType(); 229 switch (ICA.getID()) { 230 case Intrinsic::umin: 231 case Intrinsic::umax: 232 case Intrinsic::smin: 233 case Intrinsic::smax: { 234 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 235 MVT::v8i16, MVT::v2i32, MVT::v4i32}; 236 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 237 // v2i64 types get converted to cmp+bif hence the cost of 2 238 if (LT.second == MVT::v2i64) 239 return LT.first * 2; 240 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) 241 return LT.first; 242 break; 243 } 244 case Intrinsic::sadd_sat: 245 case Intrinsic::ssub_sat: 246 case Intrinsic::uadd_sat: 247 case Intrinsic::usub_sat: { 248 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 249 MVT::v8i16, MVT::v2i32, MVT::v4i32, 250 MVT::v2i64}; 251 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 252 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we 253 // need to extend the type, as it uses shr(qadd(shl, shl)). 254 unsigned Instrs = 255 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; 256 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) 257 return LT.first * Instrs; 258 break; 259 } 260 case Intrinsic::abs: { 261 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, 262 MVT::v8i16, MVT::v2i32, MVT::v4i32, 263 MVT::v2i64}; 264 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 265 if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) 266 return LT.first; 267 break; 268 } 269 case Intrinsic::experimental_stepvector: { 270 InstructionCost Cost = 1; // Cost of the `index' instruction 271 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 272 // Legalisation of illegal vectors involves an `index' instruction plus 273 // (LT.first - 1) vector adds. 274 if (LT.first > 1) { 275 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); 276 InstructionCost AddCost = 277 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); 278 Cost += AddCost * (LT.first - 1); 279 } 280 return Cost; 281 } 282 case Intrinsic::bitreverse: { 283 static const CostTblEntry BitreverseTbl[] = { 284 {Intrinsic::bitreverse, MVT::i32, 1}, 285 {Intrinsic::bitreverse, MVT::i64, 1}, 286 {Intrinsic::bitreverse, MVT::v8i8, 1}, 287 {Intrinsic::bitreverse, MVT::v16i8, 1}, 288 {Intrinsic::bitreverse, MVT::v4i16, 2}, 289 {Intrinsic::bitreverse, MVT::v8i16, 2}, 290 {Intrinsic::bitreverse, MVT::v2i32, 2}, 291 {Intrinsic::bitreverse, MVT::v4i32, 2}, 292 {Intrinsic::bitreverse, MVT::v1i64, 2}, 293 {Intrinsic::bitreverse, MVT::v2i64, 2}, 294 }; 295 const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy); 296 const auto *Entry = 297 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second); 298 if (Entry) { 299 // Cost Model is using the legal type(i32) that i8 and i16 will be 300 // converted to +1 so that we match the actual lowering cost 301 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 || 302 TLI->getValueType(DL, RetTy, true) == MVT::i16) 303 return LegalisationCost.first * Entry->Cost + 1; 304 305 return LegalisationCost.first * Entry->Cost; 306 } 307 break; 308 } 309 case Intrinsic::ctpop: { 310 static const CostTblEntry CtpopCostTbl[] = { 311 {ISD::CTPOP, MVT::v2i64, 4}, 312 {ISD::CTPOP, MVT::v4i32, 3}, 313 {ISD::CTPOP, MVT::v8i16, 2}, 314 {ISD::CTPOP, MVT::v16i8, 1}, 315 {ISD::CTPOP, MVT::i64, 4}, 316 {ISD::CTPOP, MVT::v2i32, 3}, 317 {ISD::CTPOP, MVT::v4i16, 2}, 318 {ISD::CTPOP, MVT::v8i8, 1}, 319 {ISD::CTPOP, MVT::i32, 5}, 320 }; 321 auto LT = TLI->getTypeLegalizationCost(DL, RetTy); 322 MVT MTy = LT.second; 323 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) { 324 // Extra cost of +1 when illegal vector types are legalized by promoting 325 // the integer type. 326 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() != 327 RetTy->getScalarSizeInBits() 328 ? 1 329 : 0; 330 return LT.first * Entry->Cost + ExtraCost; 331 } 332 break; 333 } 334 case Intrinsic::sadd_with_overflow: 335 case Intrinsic::uadd_with_overflow: 336 case Intrinsic::ssub_with_overflow: 337 case Intrinsic::usub_with_overflow: 338 case Intrinsic::smul_with_overflow: 339 case Intrinsic::umul_with_overflow: { 340 static const CostTblEntry WithOverflowCostTbl[] = { 341 {Intrinsic::sadd_with_overflow, MVT::i8, 3}, 342 {Intrinsic::uadd_with_overflow, MVT::i8, 3}, 343 {Intrinsic::sadd_with_overflow, MVT::i16, 3}, 344 {Intrinsic::uadd_with_overflow, MVT::i16, 3}, 345 {Intrinsic::sadd_with_overflow, MVT::i32, 1}, 346 {Intrinsic::uadd_with_overflow, MVT::i32, 1}, 347 {Intrinsic::sadd_with_overflow, MVT::i64, 1}, 348 {Intrinsic::uadd_with_overflow, MVT::i64, 1}, 349 {Intrinsic::ssub_with_overflow, MVT::i8, 3}, 350 {Intrinsic::usub_with_overflow, MVT::i8, 3}, 351 {Intrinsic::ssub_with_overflow, MVT::i16, 3}, 352 {Intrinsic::usub_with_overflow, MVT::i16, 3}, 353 {Intrinsic::ssub_with_overflow, MVT::i32, 1}, 354 {Intrinsic::usub_with_overflow, MVT::i32, 1}, 355 {Intrinsic::ssub_with_overflow, MVT::i64, 1}, 356 {Intrinsic::usub_with_overflow, MVT::i64, 1}, 357 {Intrinsic::smul_with_overflow, MVT::i8, 5}, 358 {Intrinsic::umul_with_overflow, MVT::i8, 4}, 359 {Intrinsic::smul_with_overflow, MVT::i16, 5}, 360 {Intrinsic::umul_with_overflow, MVT::i16, 4}, 361 {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst 362 {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw 363 {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp 364 {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr 365 }; 366 EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true); 367 if (MTy.isSimple()) 368 if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(), 369 MTy.getSimpleVT())) 370 return Entry->Cost; 371 break; 372 } 373 default: 374 break; 375 } 376 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 377 } 378 379 /// The function will remove redundant reinterprets casting in the presence 380 /// of the control flow 381 static Optional<Instruction *> processPhiNode(InstCombiner &IC, 382 IntrinsicInst &II) { 383 SmallVector<Instruction *, 32> Worklist; 384 auto RequiredType = II.getType(); 385 386 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0)); 387 assert(PN && "Expected Phi Node!"); 388 389 // Don't create a new Phi unless we can remove the old one. 390 if (!PN->hasOneUse()) 391 return None; 392 393 for (Value *IncValPhi : PN->incoming_values()) { 394 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi); 395 if (!Reinterpret || 396 Reinterpret->getIntrinsicID() != 397 Intrinsic::aarch64_sve_convert_to_svbool || 398 RequiredType != Reinterpret->getArgOperand(0)->getType()) 399 return None; 400 } 401 402 // Create the new Phi 403 LLVMContext &Ctx = PN->getContext(); 404 IRBuilder<> Builder(Ctx); 405 Builder.SetInsertPoint(PN); 406 PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues()); 407 Worklist.push_back(PN); 408 409 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) { 410 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I)); 411 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I)); 412 Worklist.push_back(Reinterpret); 413 } 414 415 // Cleanup Phi Node and reinterprets 416 return IC.replaceInstUsesWith(II, NPN); 417 } 418 419 // (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _)))) 420 // => (binop (pred) (from_svbool _) (from_svbool _)) 421 // 422 // The above transformation eliminates a `to_svbool` in the predicate 423 // operand of bitwise operation `binop` by narrowing the vector width of 424 // the operation. For example, it would convert a `<vscale x 16 x i1> 425 // and` into a `<vscale x 4 x i1> and`. This is profitable because 426 // to_svbool must zero the new lanes during widening, whereas 427 // from_svbool is free. 428 static Optional<Instruction *> tryCombineFromSVBoolBinOp(InstCombiner &IC, 429 IntrinsicInst &II) { 430 auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0)); 431 if (!BinOp) 432 return None; 433 434 auto IntrinsicID = BinOp->getIntrinsicID(); 435 switch (IntrinsicID) { 436 case Intrinsic::aarch64_sve_and_z: 437 case Intrinsic::aarch64_sve_bic_z: 438 case Intrinsic::aarch64_sve_eor_z: 439 case Intrinsic::aarch64_sve_nand_z: 440 case Intrinsic::aarch64_sve_nor_z: 441 case Intrinsic::aarch64_sve_orn_z: 442 case Intrinsic::aarch64_sve_orr_z: 443 break; 444 default: 445 return None; 446 } 447 448 auto BinOpPred = BinOp->getOperand(0); 449 auto BinOpOp1 = BinOp->getOperand(1); 450 auto BinOpOp2 = BinOp->getOperand(2); 451 452 auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred); 453 if (!PredIntr || 454 PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool) 455 return None; 456 457 auto PredOp = PredIntr->getOperand(0); 458 auto PredOpTy = cast<VectorType>(PredOp->getType()); 459 if (PredOpTy != II.getType()) 460 return None; 461 462 IRBuilder<> Builder(II.getContext()); 463 Builder.SetInsertPoint(&II); 464 465 SmallVector<Value *> NarrowedBinOpArgs = {PredOp}; 466 auto NarrowBinOpOp1 = Builder.CreateIntrinsic( 467 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1}); 468 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 469 if (BinOpOp1 == BinOpOp2) 470 NarrowedBinOpArgs.push_back(NarrowBinOpOp1); 471 else 472 NarrowedBinOpArgs.push_back(Builder.CreateIntrinsic( 473 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2})); 474 475 auto NarrowedBinOp = 476 Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs); 477 return IC.replaceInstUsesWith(II, NarrowedBinOp); 478 } 479 480 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC, 481 IntrinsicInst &II) { 482 // If the reinterpret instruction operand is a PHI Node 483 if (isa<PHINode>(II.getArgOperand(0))) 484 return processPhiNode(IC, II); 485 486 if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II)) 487 return BinOpCombine; 488 489 SmallVector<Instruction *, 32> CandidatesForRemoval; 490 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr; 491 492 const auto *IVTy = cast<VectorType>(II.getType()); 493 494 // Walk the chain of conversions. 495 while (Cursor) { 496 // If the type of the cursor has fewer lanes than the final result, zeroing 497 // must take place, which breaks the equivalence chain. 498 const auto *CursorVTy = cast<VectorType>(Cursor->getType()); 499 if (CursorVTy->getElementCount().getKnownMinValue() < 500 IVTy->getElementCount().getKnownMinValue()) 501 break; 502 503 // If the cursor has the same type as I, it is a viable replacement. 504 if (Cursor->getType() == IVTy) 505 EarliestReplacement = Cursor; 506 507 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor); 508 509 // If this is not an SVE conversion intrinsic, this is the end of the chain. 510 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() == 511 Intrinsic::aarch64_sve_convert_to_svbool || 512 IntrinsicCursor->getIntrinsicID() == 513 Intrinsic::aarch64_sve_convert_from_svbool)) 514 break; 515 516 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor); 517 Cursor = IntrinsicCursor->getOperand(0); 518 } 519 520 // If no viable replacement in the conversion chain was found, there is 521 // nothing to do. 522 if (!EarliestReplacement) 523 return None; 524 525 return IC.replaceInstUsesWith(II, EarliestReplacement); 526 } 527 528 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC, 529 IntrinsicInst &II) { 530 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 531 if (!Pg) 532 return None; 533 534 if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 535 return None; 536 537 const auto PTruePattern = 538 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 539 if (PTruePattern != AArch64SVEPredPattern::vl1) 540 return None; 541 542 // The intrinsic is inserting into lane zero so use an insert instead. 543 auto *IdxTy = Type::getInt64Ty(II.getContext()); 544 auto *Insert = InsertElementInst::Create( 545 II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0)); 546 Insert->insertBefore(&II); 547 Insert->takeName(&II); 548 549 return IC.replaceInstUsesWith(II, Insert); 550 } 551 552 static Optional<Instruction *> instCombineSVEDupX(InstCombiner &IC, 553 IntrinsicInst &II) { 554 // Replace DupX with a regular IR splat. 555 IRBuilder<> Builder(II.getContext()); 556 Builder.SetInsertPoint(&II); 557 auto *RetTy = cast<ScalableVectorType>(II.getType()); 558 Value *Splat = 559 Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0)); 560 Splat->takeName(&II); 561 return IC.replaceInstUsesWith(II, Splat); 562 } 563 564 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC, 565 IntrinsicInst &II) { 566 LLVMContext &Ctx = II.getContext(); 567 IRBuilder<> Builder(Ctx); 568 Builder.SetInsertPoint(&II); 569 570 // Check that the predicate is all active 571 auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 572 if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 573 return None; 574 575 const auto PTruePattern = 576 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue(); 577 if (PTruePattern != AArch64SVEPredPattern::all) 578 return None; 579 580 // Check that we have a compare of zero.. 581 auto *SplatValue = 582 dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2))); 583 if (!SplatValue || !SplatValue->isZero()) 584 return None; 585 586 // ..against a dupq 587 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 588 if (!DupQLane || 589 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane) 590 return None; 591 592 // Where the dupq is a lane 0 replicate of a vector insert 593 if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero()) 594 return None; 595 596 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0)); 597 if (!VecIns || 598 VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert) 599 return None; 600 601 // Where the vector insert is a fixed constant vector insert into undef at 602 // index zero 603 if (!isa<UndefValue>(VecIns->getArgOperand(0))) 604 return None; 605 606 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero()) 607 return None; 608 609 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1)); 610 if (!ConstVec) 611 return None; 612 613 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType()); 614 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType()); 615 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements()) 616 return None; 617 618 unsigned NumElts = VecTy->getNumElements(); 619 unsigned PredicateBits = 0; 620 621 // Expand intrinsic operands to a 16-bit byte level predicate 622 for (unsigned I = 0; I < NumElts; ++I) { 623 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I)); 624 if (!Arg) 625 return None; 626 if (!Arg->isZero()) 627 PredicateBits |= 1 << (I * (16 / NumElts)); 628 } 629 630 // If all bits are zero bail early with an empty predicate 631 if (PredicateBits == 0) { 632 auto *PFalse = Constant::getNullValue(II.getType()); 633 PFalse->takeName(&II); 634 return IC.replaceInstUsesWith(II, PFalse); 635 } 636 637 // Calculate largest predicate type used (where byte predicate is largest) 638 unsigned Mask = 8; 639 for (unsigned I = 0; I < 16; ++I) 640 if ((PredicateBits & (1 << I)) != 0) 641 Mask |= (I % 8); 642 643 unsigned PredSize = Mask & -Mask; 644 auto *PredType = ScalableVectorType::get( 645 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8)); 646 647 // Ensure all relevant bits are set 648 for (unsigned I = 0; I < 16; I += PredSize) 649 if ((PredicateBits & (1 << I)) == 0) 650 return None; 651 652 auto *PTruePat = 653 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 654 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 655 {PredType}, {PTruePat}); 656 auto *ConvertToSVBool = Builder.CreateIntrinsic( 657 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue}); 658 auto *ConvertFromSVBool = 659 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, 660 {II.getType()}, {ConvertToSVBool}); 661 662 ConvertFromSVBool->takeName(&II); 663 return IC.replaceInstUsesWith(II, ConvertFromSVBool); 664 } 665 666 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC, 667 IntrinsicInst &II) { 668 IRBuilder<> Builder(II.getContext()); 669 Builder.SetInsertPoint(&II); 670 Value *Pg = II.getArgOperand(0); 671 Value *Vec = II.getArgOperand(1); 672 auto IntrinsicID = II.getIntrinsicID(); 673 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta; 674 675 // lastX(splat(X)) --> X 676 if (auto *SplatVal = getSplatValue(Vec)) 677 return IC.replaceInstUsesWith(II, SplatVal); 678 679 // If x and/or y is a splat value then: 680 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y)) 681 Value *LHS, *RHS; 682 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) { 683 if (isSplatValue(LHS) || isSplatValue(RHS)) { 684 auto *OldBinOp = cast<BinaryOperator>(Vec); 685 auto OpC = OldBinOp->getOpcode(); 686 auto *NewLHS = 687 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS}); 688 auto *NewRHS = 689 Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS}); 690 auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags( 691 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II); 692 return IC.replaceInstUsesWith(II, NewBinOp); 693 } 694 } 695 696 auto *C = dyn_cast<Constant>(Pg); 697 if (IsAfter && C && C->isNullValue()) { 698 // The intrinsic is extracting lane 0 so use an extract instead. 699 auto *IdxTy = Type::getInt64Ty(II.getContext()); 700 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); 701 Extract->insertBefore(&II); 702 Extract->takeName(&II); 703 return IC.replaceInstUsesWith(II, Extract); 704 } 705 706 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg); 707 if (!IntrPG) 708 return None; 709 710 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) 711 return None; 712 713 const auto PTruePattern = 714 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue(); 715 716 // Can the intrinsic's predicate be converted to a known constant index? 717 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern); 718 if (!MinNumElts) 719 return None; 720 721 unsigned Idx = MinNumElts - 1; 722 // Increment the index if extracting the element after the last active 723 // predicate element. 724 if (IsAfter) 725 ++Idx; 726 727 // Ignore extracts whose index is larger than the known minimum vector 728 // length. NOTE: This is an artificial constraint where we prefer to 729 // maintain what the user asked for until an alternative is proven faster. 730 auto *PgVTy = cast<ScalableVectorType>(Pg->getType()); 731 if (Idx >= PgVTy->getMinNumElements()) 732 return None; 733 734 // The intrinsic is extracting a fixed lane so use an extract instead. 735 auto *IdxTy = Type::getInt64Ty(II.getContext()); 736 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); 737 Extract->insertBefore(&II); 738 Extract->takeName(&II); 739 return IC.replaceInstUsesWith(II, Extract); 740 } 741 742 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC, 743 IntrinsicInst &II) { 744 LLVMContext &Ctx = II.getContext(); 745 IRBuilder<> Builder(Ctx); 746 Builder.SetInsertPoint(&II); 747 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr 748 // can work with RDFFR_PP for ptest elimination. 749 auto *AllPat = 750 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); 751 auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, 752 {II.getType()}, {AllPat}); 753 auto *RDFFR = 754 Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); 755 RDFFR->takeName(&II); 756 return IC.replaceInstUsesWith(II, RDFFR); 757 } 758 759 static Optional<Instruction *> 760 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) { 761 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue(); 762 763 if (Pattern == AArch64SVEPredPattern::all) { 764 LLVMContext &Ctx = II.getContext(); 765 IRBuilder<> Builder(Ctx); 766 Builder.SetInsertPoint(&II); 767 768 Constant *StepVal = ConstantInt::get(II.getType(), NumElts); 769 auto *VScale = Builder.CreateVScale(StepVal); 770 VScale->takeName(&II); 771 return IC.replaceInstUsesWith(II, VScale); 772 } 773 774 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern); 775 776 return MinNumElts && NumElts >= MinNumElts 777 ? Optional<Instruction *>(IC.replaceInstUsesWith( 778 II, ConstantInt::get(II.getType(), MinNumElts))) 779 : None; 780 } 781 782 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC, 783 IntrinsicInst &II) { 784 IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0)); 785 IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1)); 786 787 if (Op1 && Op2 && 788 Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 789 Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && 790 Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) { 791 792 IRBuilder<> Builder(II.getContext()); 793 Builder.SetInsertPoint(&II); 794 795 Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)}; 796 Type *Tys[] = {Op1->getArgOperand(0)->getType()}; 797 798 auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); 799 800 PTest->takeName(&II); 801 return IC.replaceInstUsesWith(II, PTest); 802 } 803 804 return None; 805 } 806 807 static Optional<Instruction *> instCombineSVEVectorFMLA(InstCombiner &IC, 808 IntrinsicInst &II) { 809 // fold (fadd p a (fmul p b c)) -> (fma p a b c) 810 Value *P = II.getOperand(0); 811 Value *A = II.getOperand(1); 812 auto FMul = II.getOperand(2); 813 Value *B, *C; 814 if (!match(FMul, m_Intrinsic<Intrinsic::aarch64_sve_fmul>( 815 m_Specific(P), m_Value(B), m_Value(C)))) 816 return None; 817 818 if (!FMul->hasOneUse()) 819 return None; 820 821 llvm::FastMathFlags FAddFlags = II.getFastMathFlags(); 822 // Stop the combine when the flags on the inputs differ in case dropping flags 823 // would lead to us missing out on more beneficial optimizations. 824 if (FAddFlags != cast<CallInst>(FMul)->getFastMathFlags()) 825 return None; 826 if (!FAddFlags.allowContract()) 827 return None; 828 829 IRBuilder<> Builder(II.getContext()); 830 Builder.SetInsertPoint(&II); 831 auto FMLA = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fmla, 832 {II.getType()}, {P, A, B, C}, &II); 833 FMLA->setFastMathFlags(FAddFlags); 834 return IC.replaceInstUsesWith(II, FMLA); 835 } 836 837 static bool isAllActivePredicate(Value *Pred) { 838 // Look through convert.from.svbool(convert.to.svbool(...) chain. 839 Value *UncastedPred; 840 if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>( 841 m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>( 842 m_Value(UncastedPred))))) 843 // If the predicate has the same or less lanes than the uncasted 844 // predicate then we know the casting has no effect. 845 if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <= 846 cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements()) 847 Pred = UncastedPred; 848 849 return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 850 m_ConstantInt<AArch64SVEPredPattern::all>())); 851 } 852 853 static Optional<Instruction *> 854 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 855 IRBuilder<> Builder(II.getContext()); 856 Builder.SetInsertPoint(&II); 857 858 Value *Pred = II.getOperand(0); 859 Value *PtrOp = II.getOperand(1); 860 Type *VecTy = II.getType(); 861 Value *VecPtr = Builder.CreateBitCast(PtrOp, VecTy->getPointerTo()); 862 863 if (isAllActivePredicate(Pred)) { 864 LoadInst *Load = Builder.CreateLoad(VecTy, VecPtr); 865 Load->copyMetadata(II); 866 return IC.replaceInstUsesWith(II, Load); 867 } 868 869 CallInst *MaskedLoad = 870 Builder.CreateMaskedLoad(VecTy, VecPtr, PtrOp->getPointerAlignment(DL), 871 Pred, ConstantAggregateZero::get(VecTy)); 872 MaskedLoad->copyMetadata(II); 873 return IC.replaceInstUsesWith(II, MaskedLoad); 874 } 875 876 static Optional<Instruction *> 877 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) { 878 IRBuilder<> Builder(II.getContext()); 879 Builder.SetInsertPoint(&II); 880 881 Value *VecOp = II.getOperand(0); 882 Value *Pred = II.getOperand(1); 883 Value *PtrOp = II.getOperand(2); 884 Value *VecPtr = 885 Builder.CreateBitCast(PtrOp, VecOp->getType()->getPointerTo()); 886 887 if (isAllActivePredicate(Pred)) { 888 StoreInst *Store = Builder.CreateStore(VecOp, VecPtr); 889 Store->copyMetadata(II); 890 return IC.eraseInstFromFunction(II); 891 } 892 893 CallInst *MaskedStore = Builder.CreateMaskedStore( 894 VecOp, VecPtr, PtrOp->getPointerAlignment(DL), Pred); 895 MaskedStore->copyMetadata(II); 896 return IC.eraseInstFromFunction(II); 897 } 898 899 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) { 900 switch (Intrinsic) { 901 case Intrinsic::aarch64_sve_fmul: 902 return Instruction::BinaryOps::FMul; 903 case Intrinsic::aarch64_sve_fadd: 904 return Instruction::BinaryOps::FAdd; 905 case Intrinsic::aarch64_sve_fsub: 906 return Instruction::BinaryOps::FSub; 907 default: 908 return Instruction::BinaryOpsEnd; 909 } 910 } 911 912 static Optional<Instruction *> instCombineSVEVectorBinOp(InstCombiner &IC, 913 IntrinsicInst &II) { 914 auto *OpPredicate = II.getOperand(0); 915 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID()); 916 if (BinOpCode == Instruction::BinaryOpsEnd || 917 !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>( 918 m_ConstantInt<AArch64SVEPredPattern::all>()))) 919 return None; 920 IRBuilder<> Builder(II.getContext()); 921 Builder.SetInsertPoint(&II); 922 Builder.setFastMathFlags(II.getFastMathFlags()); 923 auto BinOp = 924 Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2)); 925 return IC.replaceInstUsesWith(II, BinOp); 926 } 927 928 static Optional<Instruction *> instCombineSVEVectorFAdd(InstCombiner &IC, 929 IntrinsicInst &II) { 930 if (auto FMLA = instCombineSVEVectorFMLA(IC, II)) 931 return FMLA; 932 return instCombineSVEVectorBinOp(IC, II); 933 } 934 935 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC, 936 IntrinsicInst &II) { 937 auto *OpPredicate = II.getOperand(0); 938 auto *OpMultiplicand = II.getOperand(1); 939 auto *OpMultiplier = II.getOperand(2); 940 941 IRBuilder<> Builder(II.getContext()); 942 Builder.SetInsertPoint(&II); 943 944 // Return true if a given instruction is a unit splat value, false otherwise. 945 auto IsUnitSplat = [](auto *I) { 946 auto *SplatValue = getSplatValue(I); 947 if (!SplatValue) 948 return false; 949 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 950 }; 951 952 // Return true if a given instruction is an aarch64_sve_dup intrinsic call 953 // with a unit splat value, false otherwise. 954 auto IsUnitDup = [](auto *I) { 955 auto *IntrI = dyn_cast<IntrinsicInst>(I); 956 if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup) 957 return false; 958 959 auto *SplatValue = IntrI->getOperand(2); 960 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); 961 }; 962 963 if (IsUnitSplat(OpMultiplier)) { 964 // [f]mul pg %n, (dupx 1) => %n 965 OpMultiplicand->takeName(&II); 966 return IC.replaceInstUsesWith(II, OpMultiplicand); 967 } else if (IsUnitDup(OpMultiplier)) { 968 // [f]mul pg %n, (dup pg 1) => %n 969 auto *DupInst = cast<IntrinsicInst>(OpMultiplier); 970 auto *DupPg = DupInst->getOperand(1); 971 // TODO: this is naive. The optimization is still valid if DupPg 972 // 'encompasses' OpPredicate, not only if they're the same predicate. 973 if (OpPredicate == DupPg) { 974 OpMultiplicand->takeName(&II); 975 return IC.replaceInstUsesWith(II, OpMultiplicand); 976 } 977 } 978 979 return instCombineSVEVectorBinOp(IC, II); 980 } 981 982 static Optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC, 983 IntrinsicInst &II) { 984 IRBuilder<> Builder(II.getContext()); 985 Builder.SetInsertPoint(&II); 986 Value *UnpackArg = II.getArgOperand(0); 987 auto *RetTy = cast<ScalableVectorType>(II.getType()); 988 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi || 989 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo; 990 991 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X)) 992 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X)) 993 if (auto *ScalarArg = getSplatValue(UnpackArg)) { 994 ScalarArg = 995 Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned); 996 Value *NewVal = 997 Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg); 998 NewVal->takeName(&II); 999 return IC.replaceInstUsesWith(II, NewVal); 1000 } 1001 1002 return None; 1003 } 1004 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC, 1005 IntrinsicInst &II) { 1006 auto *OpVal = II.getOperand(0); 1007 auto *OpIndices = II.getOperand(1); 1008 VectorType *VTy = cast<VectorType>(II.getType()); 1009 1010 // Check whether OpIndices is a constant splat value < minimal element count 1011 // of result. 1012 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices)); 1013 if (!SplatValue || 1014 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue())) 1015 return None; 1016 1017 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to 1018 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization. 1019 IRBuilder<> Builder(II.getContext()); 1020 Builder.SetInsertPoint(&II); 1021 auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue); 1022 auto *VectorSplat = 1023 Builder.CreateVectorSplat(VTy->getElementCount(), Extract); 1024 1025 VectorSplat->takeName(&II); 1026 return IC.replaceInstUsesWith(II, VectorSplat); 1027 } 1028 1029 static Optional<Instruction *> instCombineSVETupleGet(InstCombiner &IC, 1030 IntrinsicInst &II) { 1031 // Try to remove sequences of tuple get/set. 1032 Value *SetTuple, *SetIndex, *SetValue; 1033 auto *GetTuple = II.getArgOperand(0); 1034 auto *GetIndex = II.getArgOperand(1); 1035 // Check that we have tuple_get(GetTuple, GetIndex) where GetTuple is a 1036 // call to tuple_set i.e. tuple_set(SetTuple, SetIndex, SetValue). 1037 // Make sure that the types of the current intrinsic and SetValue match 1038 // in order to safely remove the sequence. 1039 if (!match(GetTuple, 1040 m_Intrinsic<Intrinsic::aarch64_sve_tuple_set>( 1041 m_Value(SetTuple), m_Value(SetIndex), m_Value(SetValue))) || 1042 SetValue->getType() != II.getType()) 1043 return None; 1044 // Case where we get the same index right after setting it. 1045 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) --> SetValue 1046 if (GetIndex == SetIndex) 1047 return IC.replaceInstUsesWith(II, SetValue); 1048 // If we are getting a different index than what was set in the tuple_set 1049 // intrinsic. We can just set the input tuple to the one up in the chain. 1050 // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) 1051 // --> tuple_get(SetTuple, GetIndex) 1052 return IC.replaceOperand(II, 0, SetTuple); 1053 } 1054 1055 static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC, 1056 IntrinsicInst &II) { 1057 // zip1(uzp1(A, B), uzp2(A, B)) --> A 1058 // zip2(uzp1(A, B), uzp2(A, B)) --> B 1059 Value *A, *B; 1060 if (match(II.getArgOperand(0), 1061 m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) && 1062 match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>( 1063 m_Specific(A), m_Specific(B)))) 1064 return IC.replaceInstUsesWith( 1065 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B)); 1066 1067 return None; 1068 } 1069 1070 static Optional<Instruction *> instCombineLD1GatherIndex(InstCombiner &IC, 1071 IntrinsicInst &II) { 1072 Value *Mask = II.getOperand(0); 1073 Value *BasePtr = II.getOperand(1); 1074 Value *Index = II.getOperand(2); 1075 Type *Ty = II.getType(); 1076 Value *PassThru = ConstantAggregateZero::get(Ty); 1077 1078 // Contiguous gather => masked load. 1079 // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1)) 1080 // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer) 1081 Value *IndexBase; 1082 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1083 m_Value(IndexBase), m_SpecificInt(1)))) { 1084 IRBuilder<> Builder(II.getContext()); 1085 Builder.SetInsertPoint(&II); 1086 1087 Align Alignment = 1088 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1089 1090 Type *VecPtrTy = PointerType::getUnqual(Ty); 1091 Value *Ptr = Builder.CreateGEP( 1092 cast<VectorType>(Ty)->getElementType(), BasePtr, IndexBase); 1093 Ptr = Builder.CreateBitCast(Ptr, VecPtrTy); 1094 CallInst *MaskedLoad = 1095 Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru); 1096 MaskedLoad->takeName(&II); 1097 return IC.replaceInstUsesWith(II, MaskedLoad); 1098 } 1099 1100 return None; 1101 } 1102 1103 static Optional<Instruction *> instCombineST1ScatterIndex(InstCombiner &IC, 1104 IntrinsicInst &II) { 1105 Value *Val = II.getOperand(0); 1106 Value *Mask = II.getOperand(1); 1107 Value *BasePtr = II.getOperand(2); 1108 Value *Index = II.getOperand(3); 1109 Type *Ty = Val->getType(); 1110 1111 // Contiguous scatter => masked store. 1112 // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1)) 1113 // => (masked.store Value (gep BasePtr IndexBase) Align Mask) 1114 Value *IndexBase; 1115 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>( 1116 m_Value(IndexBase), m_SpecificInt(1)))) { 1117 IRBuilder<> Builder(II.getContext()); 1118 Builder.SetInsertPoint(&II); 1119 1120 Align Alignment = 1121 BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); 1122 1123 Value *Ptr = Builder.CreateGEP( 1124 cast<VectorType>(Ty)->getElementType(), BasePtr, IndexBase); 1125 Type *VecPtrTy = PointerType::getUnqual(Ty); 1126 Ptr = Builder.CreateBitCast(Ptr, VecPtrTy); 1127 1128 (void)Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask); 1129 1130 return IC.eraseInstFromFunction(II); 1131 } 1132 1133 return None; 1134 } 1135 1136 static Optional<Instruction *> instCombineSVESDIV(InstCombiner &IC, 1137 IntrinsicInst &II) { 1138 IRBuilder<> Builder(II.getContext()); 1139 Builder.SetInsertPoint(&II); 1140 Type *Int32Ty = Builder.getInt32Ty(); 1141 Value *Pred = II.getOperand(0); 1142 Value *Vec = II.getOperand(1); 1143 Value *DivVec = II.getOperand(2); 1144 1145 Value *SplatValue = getSplatValue(DivVec); 1146 ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue); 1147 if (!SplatConstantInt) 1148 return None; 1149 APInt Divisor = SplatConstantInt->getValue(); 1150 1151 if (Divisor.isPowerOf2()) { 1152 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1153 auto ASRD = Builder.CreateIntrinsic( 1154 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1155 return IC.replaceInstUsesWith(II, ASRD); 1156 } 1157 if (Divisor.isNegatedPowerOf2()) { 1158 Divisor.negate(); 1159 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2()); 1160 auto ASRD = Builder.CreateIntrinsic( 1161 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2}); 1162 auto NEG = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_neg, 1163 {ASRD->getType()}, {ASRD, Pred, ASRD}); 1164 return IC.replaceInstUsesWith(II, NEG); 1165 } 1166 1167 return None; 1168 } 1169 1170 Optional<Instruction *> 1171 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, 1172 IntrinsicInst &II) const { 1173 Intrinsic::ID IID = II.getIntrinsicID(); 1174 switch (IID) { 1175 default: 1176 break; 1177 case Intrinsic::aarch64_sve_convert_from_svbool: 1178 return instCombineConvertFromSVBool(IC, II); 1179 case Intrinsic::aarch64_sve_dup: 1180 return instCombineSVEDup(IC, II); 1181 case Intrinsic::aarch64_sve_dup_x: 1182 return instCombineSVEDupX(IC, II); 1183 case Intrinsic::aarch64_sve_cmpne: 1184 case Intrinsic::aarch64_sve_cmpne_wide: 1185 return instCombineSVECmpNE(IC, II); 1186 case Intrinsic::aarch64_sve_rdffr: 1187 return instCombineRDFFR(IC, II); 1188 case Intrinsic::aarch64_sve_lasta: 1189 case Intrinsic::aarch64_sve_lastb: 1190 return instCombineSVELast(IC, II); 1191 case Intrinsic::aarch64_sve_cntd: 1192 return instCombineSVECntElts(IC, II, 2); 1193 case Intrinsic::aarch64_sve_cntw: 1194 return instCombineSVECntElts(IC, II, 4); 1195 case Intrinsic::aarch64_sve_cnth: 1196 return instCombineSVECntElts(IC, II, 8); 1197 case Intrinsic::aarch64_sve_cntb: 1198 return instCombineSVECntElts(IC, II, 16); 1199 case Intrinsic::aarch64_sve_ptest_any: 1200 case Intrinsic::aarch64_sve_ptest_first: 1201 case Intrinsic::aarch64_sve_ptest_last: 1202 return instCombineSVEPTest(IC, II); 1203 case Intrinsic::aarch64_sve_mul: 1204 case Intrinsic::aarch64_sve_fmul: 1205 return instCombineSVEVectorMul(IC, II); 1206 case Intrinsic::aarch64_sve_fadd: 1207 return instCombineSVEVectorFAdd(IC, II); 1208 case Intrinsic::aarch64_sve_fsub: 1209 return instCombineSVEVectorBinOp(IC, II); 1210 case Intrinsic::aarch64_sve_tbl: 1211 return instCombineSVETBL(IC, II); 1212 case Intrinsic::aarch64_sve_uunpkhi: 1213 case Intrinsic::aarch64_sve_uunpklo: 1214 case Intrinsic::aarch64_sve_sunpkhi: 1215 case Intrinsic::aarch64_sve_sunpklo: 1216 return instCombineSVEUnpack(IC, II); 1217 case Intrinsic::aarch64_sve_tuple_get: 1218 return instCombineSVETupleGet(IC, II); 1219 case Intrinsic::aarch64_sve_zip1: 1220 case Intrinsic::aarch64_sve_zip2: 1221 return instCombineSVEZip(IC, II); 1222 case Intrinsic::aarch64_sve_ld1_gather_index: 1223 return instCombineLD1GatherIndex(IC, II); 1224 case Intrinsic::aarch64_sve_st1_scatter_index: 1225 return instCombineST1ScatterIndex(IC, II); 1226 case Intrinsic::aarch64_sve_ld1: 1227 return instCombineSVELD1(IC, II, DL); 1228 case Intrinsic::aarch64_sve_st1: 1229 return instCombineSVEST1(IC, II, DL); 1230 case Intrinsic::aarch64_sve_sdiv: 1231 return instCombineSVESDIV(IC, II); 1232 } 1233 1234 return None; 1235 } 1236 1237 Optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic( 1238 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts, 1239 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, 1240 std::function<void(Instruction *, unsigned, APInt, APInt &)> 1241 SimplifyAndSetOp) const { 1242 switch (II.getIntrinsicID()) { 1243 default: 1244 break; 1245 case Intrinsic::aarch64_neon_fcvtxn: 1246 case Intrinsic::aarch64_neon_rshrn: 1247 case Intrinsic::aarch64_neon_sqrshrn: 1248 case Intrinsic::aarch64_neon_sqrshrun: 1249 case Intrinsic::aarch64_neon_sqshrn: 1250 case Intrinsic::aarch64_neon_sqshrun: 1251 case Intrinsic::aarch64_neon_sqxtn: 1252 case Intrinsic::aarch64_neon_sqxtun: 1253 case Intrinsic::aarch64_neon_uqrshrn: 1254 case Intrinsic::aarch64_neon_uqshrn: 1255 case Intrinsic::aarch64_neon_uqxtn: 1256 SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts); 1257 break; 1258 } 1259 1260 return None; 1261 } 1262 1263 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, 1264 ArrayRef<const Value *> Args) { 1265 1266 // A helper that returns a vector type from the given type. The number of 1267 // elements in type Ty determine the vector width. 1268 auto toVectorTy = [&](Type *ArgTy) { 1269 return VectorType::get(ArgTy->getScalarType(), 1270 cast<VectorType>(DstTy)->getElementCount()); 1271 }; 1272 1273 // Exit early if DstTy is not a vector type whose elements are at least 1274 // 16-bits wide. 1275 if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) 1276 return false; 1277 1278 // Determine if the operation has a widening variant. We consider both the 1279 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the 1280 // instructions. 1281 // 1282 // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we 1283 // verify that their extending operands are eliminated during code 1284 // generation. 1285 switch (Opcode) { 1286 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). 1287 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). 1288 break; 1289 default: 1290 return false; 1291 } 1292 1293 // To be a widening instruction (either the "wide" or "long" versions), the 1294 // second operand must be a sign- or zero extend having a single user. We 1295 // only consider extends having a single user because they may otherwise not 1296 // be eliminated. 1297 if (Args.size() != 2 || 1298 (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) || 1299 !Args[1]->hasOneUse()) 1300 return false; 1301 auto *Extend = cast<CastInst>(Args[1]); 1302 1303 // Legalize the destination type and ensure it can be used in a widening 1304 // operation. 1305 auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); 1306 unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); 1307 if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) 1308 return false; 1309 1310 // Legalize the source type and ensure it can be used in a widening 1311 // operation. 1312 auto *SrcTy = toVectorTy(Extend->getSrcTy()); 1313 auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); 1314 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); 1315 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) 1316 return false; 1317 1318 // Get the total number of vector elements in the legalized types. 1319 InstructionCost NumDstEls = 1320 DstTyL.first * DstTyL.second.getVectorMinNumElements(); 1321 InstructionCost NumSrcEls = 1322 SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); 1323 1324 // Return true if the legalized types have the same number of vector elements 1325 // and the destination element type size is twice that of the source type. 1326 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; 1327 } 1328 1329 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1330 Type *Src, 1331 TTI::CastContextHint CCH, 1332 TTI::TargetCostKind CostKind, 1333 const Instruction *I) { 1334 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1335 assert(ISD && "Invalid opcode"); 1336 1337 // If the cast is observable, and it is used by a widening instruction (e.g., 1338 // uaddl, saddw, etc.), it may be free. 1339 if (I && I->hasOneUse()) { 1340 auto *SingleUser = cast<Instruction>(*I->user_begin()); 1341 SmallVector<const Value *, 4> Operands(SingleUser->operand_values()); 1342 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { 1343 // If the cast is the second operand, it is free. We will generate either 1344 // a "wide" or "long" version of the widening instruction. 1345 if (I == SingleUser->getOperand(1)) 1346 return 0; 1347 // If the cast is not the second operand, it will be free if it looks the 1348 // same as the second operand. In this case, we will generate a "long" 1349 // version of the widening instruction. 1350 if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1))) 1351 if (I->getOpcode() == unsigned(Cast->getOpcode()) && 1352 cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy()) 1353 return 0; 1354 } 1355 } 1356 1357 // TODO: Allow non-throughput costs that aren't binary. 1358 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1359 if (CostKind != TTI::TCK_RecipThroughput) 1360 return Cost == 0 ? 0 : 1; 1361 return Cost; 1362 }; 1363 1364 EVT SrcTy = TLI->getValueType(DL, Src); 1365 EVT DstTy = TLI->getValueType(DL, Dst); 1366 1367 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1368 return AdjustCost( 1369 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1370 1371 static const TypeConversionCostTblEntry 1372 ConversionTbl[] = { 1373 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1374 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 1375 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1376 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 1377 1378 // Truncations on nxvmiN 1379 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, 1380 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, 1381 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, 1382 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, 1383 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, 1384 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, 1385 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, 1386 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, 1387 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, 1388 { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 }, 1389 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, 1390 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, 1391 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, 1392 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, 1393 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, 1394 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, 1395 1396 // The number of shll instructions for the extension. 1397 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1398 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1399 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1400 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1401 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1402 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1403 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1404 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1405 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1406 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 1407 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1408 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 1409 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1410 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1411 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1412 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 1413 1414 // LowerVectorINT_TO_FP: 1415 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1416 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1417 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1418 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1419 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1420 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1421 1422 // Complex: to v2f32 1423 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1424 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1425 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1426 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 1427 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, 1428 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 1429 1430 // Complex: to v4f32 1431 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, 1432 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1433 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1434 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1435 1436 // Complex: to v8f32 1437 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1438 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1439 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, 1440 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1441 1442 // Complex: to v16f32 1443 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1444 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, 1445 1446 // Complex: to v2f64 1447 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1448 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1449 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1450 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 1451 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, 1452 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 1453 1454 1455 // LowerVectorFP_TO_INT 1456 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, 1457 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 1458 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1459 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1460 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1461 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1462 1463 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 1464 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 1465 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, 1466 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, 1467 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 1468 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, 1469 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, 1470 1471 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 1472 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 1473 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, 1474 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 1475 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, 1476 1477 // Complex, from nxv2f32. 1478 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1479 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1480 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1481 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1482 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, 1483 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, 1484 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, 1485 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, 1486 1487 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. 1488 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 1489 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 1490 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, 1491 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 1492 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 1493 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, 1494 1495 // Complex, from nxv2f64. 1496 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1497 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1498 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1499 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1500 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, 1501 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, 1502 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, 1503 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, 1504 1505 // Complex, from nxv4f32. 1506 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1507 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1508 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1509 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1510 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, 1511 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, 1512 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, 1513 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, 1514 1515 // Complex, from nxv8f64. Illegal -> illegal conversions not required. 1516 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1517 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1518 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, 1519 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, 1520 1521 // Complex, from nxv4f64. Illegal -> illegal conversions not required. 1522 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1523 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1524 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1525 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, 1526 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, 1527 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, 1528 1529 // Complex, from nxv8f32. Illegal -> illegal conversions not required. 1530 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1531 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1532 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, 1533 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, 1534 1535 // Complex, from nxv8f16. 1536 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1537 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1538 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1539 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1540 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, 1541 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, 1542 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, 1543 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, 1544 1545 // Complex, from nxv4f16. 1546 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1547 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1548 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1549 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1550 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, 1551 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, 1552 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, 1553 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, 1554 1555 // Complex, from nxv2f16. 1556 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1557 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1558 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1559 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1560 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, 1561 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, 1562 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, 1563 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, 1564 1565 // Truncate from nxvmf32 to nxvmf16. 1566 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, 1567 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, 1568 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, 1569 1570 // Truncate from nxvmf64 to nxvmf16. 1571 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, 1572 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, 1573 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, 1574 1575 // Truncate from nxvmf64 to nxvmf32. 1576 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, 1577 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, 1578 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, 1579 1580 // Extend from nxvmf16 to nxvmf32. 1581 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, 1582 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, 1583 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, 1584 1585 // Extend from nxvmf16 to nxvmf64. 1586 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, 1587 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, 1588 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, 1589 1590 // Extend from nxvmf32 to nxvmf64. 1591 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, 1592 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, 1593 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, 1594 1595 // Bitcasts from float to integer 1596 { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 }, 1597 { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 }, 1598 { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 }, 1599 1600 // Bitcasts from integer to float 1601 { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 }, 1602 { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 }, 1603 { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 }, 1604 }; 1605 1606 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, 1607 DstTy.getSimpleVT(), 1608 SrcTy.getSimpleVT())) 1609 return AdjustCost(Entry->Cost); 1610 1611 static const TypeConversionCostTblEntry FP16Tbl[] = { 1612 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs 1613 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1}, 1614 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs 1615 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1}, 1616 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs 1617 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2}, 1618 {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn 1619 {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2}, 1620 {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs 1621 {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1}, 1622 {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs 1623 {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4}, 1624 {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn 1625 {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3}, 1626 {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs 1627 {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2}, 1628 {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs 1629 {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8}, 1630 {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf 1631 {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf 1632 {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf 1633 {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf 1634 }; 1635 1636 if (ST->hasFullFP16()) 1637 if (const auto *Entry = ConvertCostTableLookup( 1638 FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) 1639 return AdjustCost(Entry->Cost); 1640 1641 return AdjustCost( 1642 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 1643 } 1644 1645 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, 1646 Type *Dst, 1647 VectorType *VecTy, 1648 unsigned Index) { 1649 1650 // Make sure we were given a valid extend opcode. 1651 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && 1652 "Invalid opcode"); 1653 1654 // We are extending an element we extract from a vector, so the source type 1655 // of the extend is the element type of the vector. 1656 auto *Src = VecTy->getElementType(); 1657 1658 // Sign- and zero-extends are for integer types only. 1659 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type"); 1660 1661 // Get the cost for the extract. We compute the cost (if any) for the extend 1662 // below. 1663 InstructionCost Cost = 1664 getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); 1665 1666 // Legalize the types. 1667 auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); 1668 auto DstVT = TLI->getValueType(DL, Dst); 1669 auto SrcVT = TLI->getValueType(DL, Src); 1670 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 1671 1672 // If the resulting type is still a vector and the destination type is legal, 1673 // we may get the extension for free. If not, get the default cost for the 1674 // extend. 1675 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) 1676 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1677 CostKind); 1678 1679 // The destination type should be larger than the element type. If not, get 1680 // the default cost for the extend. 1681 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) 1682 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1683 CostKind); 1684 1685 switch (Opcode) { 1686 default: 1687 llvm_unreachable("Opcode should be either SExt or ZExt"); 1688 1689 // For sign-extends, we only need a smov, which performs the extension 1690 // automatically. 1691 case Instruction::SExt: 1692 return Cost; 1693 1694 // For zero-extends, the extend is performed automatically by a umov unless 1695 // the destination type is i64 and the element type is i8 or i16. 1696 case Instruction::ZExt: 1697 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) 1698 return Cost; 1699 } 1700 1701 // If we are unable to perform the extend for free, get the default cost. 1702 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, 1703 CostKind); 1704 } 1705 1706 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, 1707 TTI::TargetCostKind CostKind, 1708 const Instruction *I) { 1709 if (CostKind != TTI::TCK_RecipThroughput) 1710 return Opcode == Instruction::PHI ? 0 : 1; 1711 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); 1712 // Branches are assumed to be predicted. 1713 return 0; 1714 } 1715 1716 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 1717 unsigned Index) { 1718 assert(Val->isVectorTy() && "This must be a vector type"); 1719 1720 if (Index != -1U) { 1721 // Legalize the type. 1722 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1723 1724 // This type is legalized to a scalar type. 1725 if (!LT.second.isVector()) 1726 return 0; 1727 1728 // The type may be split. For fixed-width vectors we can normalize the 1729 // index to the new type. 1730 if (LT.second.isFixedLengthVector()) { 1731 unsigned Width = LT.second.getVectorNumElements(); 1732 Index = Index % Width; 1733 } 1734 1735 // The element at index zero is already inside the vector. 1736 if (Index == 0) 1737 return 0; 1738 } 1739 1740 // All other insert/extracts cost this much. 1741 return ST->getVectorInsertExtractBaseCost(); 1742 } 1743 1744 InstructionCost AArch64TTIImpl::getArithmeticInstrCost( 1745 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 1746 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, 1747 TTI::OperandValueProperties Opd1PropInfo, 1748 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 1749 const Instruction *CxtI) { 1750 // TODO: Handle more cost kinds. 1751 if (CostKind != TTI::TCK_RecipThroughput) 1752 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1753 Opd2Info, Opd1PropInfo, 1754 Opd2PropInfo, Args, CxtI); 1755 1756 // Legalize the type. 1757 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 1758 1759 // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), 1760 // add in the widening overhead specified by the sub-target. Since the 1761 // extends feeding widening instructions are performed automatically, they 1762 // aren't present in the generated code and have a zero cost. By adding a 1763 // widening overhead here, we attach the total cost of the combined operation 1764 // to the widening instruction. 1765 InstructionCost Cost = 0; 1766 if (isWideningInstruction(Ty, Opcode, Args)) 1767 Cost += ST->getWideningBaseCost(); 1768 1769 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1770 1771 switch (ISD) { 1772 default: 1773 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1774 Opd2Info, 1775 Opd1PropInfo, Opd2PropInfo); 1776 case ISD::SDIV: 1777 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && 1778 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 1779 // On AArch64, scalar signed division by constants power-of-two are 1780 // normally expanded to the sequence ADD + CMP + SELECT + SRA. 1781 // The OperandValue properties many not be same as that of previous 1782 // operation; conservatively assume OP_None. 1783 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, 1784 Opd1Info, Opd2Info, 1785 TargetTransformInfo::OP_None, 1786 TargetTransformInfo::OP_None); 1787 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, 1788 Opd1Info, Opd2Info, 1789 TargetTransformInfo::OP_None, 1790 TargetTransformInfo::OP_None); 1791 Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind, 1792 Opd1Info, Opd2Info, 1793 TargetTransformInfo::OP_None, 1794 TargetTransformInfo::OP_None); 1795 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, 1796 Opd1Info, Opd2Info, 1797 TargetTransformInfo::OP_None, 1798 TargetTransformInfo::OP_None); 1799 return Cost; 1800 } 1801 LLVM_FALLTHROUGH; 1802 case ISD::UDIV: 1803 if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { 1804 auto VT = TLI->getValueType(DL, Ty); 1805 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { 1806 // Vector signed division by constant are expanded to the 1807 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division 1808 // to MULHS + SUB + SRL + ADD + SRL. 1809 InstructionCost MulCost = getArithmeticInstrCost( 1810 Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info, 1811 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1812 InstructionCost AddCost = getArithmeticInstrCost( 1813 Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, 1814 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1815 InstructionCost ShrCost = getArithmeticInstrCost( 1816 Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, 1817 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1818 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; 1819 } 1820 } 1821 1822 Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1823 Opd2Info, 1824 Opd1PropInfo, Opd2PropInfo); 1825 if (Ty->isVectorTy()) { 1826 // On AArch64, vector divisions are not supported natively and are 1827 // expanded into scalar divisions of each pair of elements. 1828 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, 1829 Opd1Info, Opd2Info, Opd1PropInfo, 1830 Opd2PropInfo); 1831 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, 1832 Opd1Info, Opd2Info, Opd1PropInfo, 1833 Opd2PropInfo); 1834 // TODO: if one of the arguments is scalar, then it's not necessary to 1835 // double the cost of handling the vector elements. 1836 Cost += Cost; 1837 } 1838 return Cost; 1839 1840 case ISD::MUL: 1841 if (LT.second != MVT::v2i64) 1842 return (Cost + 1) * LT.first; 1843 // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive 1844 // as elements are extracted from the vectors and the muls scalarized. 1845 // As getScalarizationOverhead is a bit too pessimistic, we estimate the 1846 // cost for a i64 vector directly here, which is: 1847 // - four i64 extracts, 1848 // - two i64 inserts, and 1849 // - two muls. 1850 // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with 1851 // LT.first = 2 the cost is 16. 1852 return LT.first * 8; 1853 case ISD::ADD: 1854 case ISD::XOR: 1855 case ISD::OR: 1856 case ISD::AND: 1857 case ISD::SRL: 1858 case ISD::SRA: 1859 case ISD::SHL: 1860 // These nodes are marked as 'custom' for combining purposes only. 1861 // We know that they are legal. See LowerAdd in ISelLowering. 1862 return (Cost + 1) * LT.first; 1863 1864 case ISD::FADD: 1865 case ISD::FSUB: 1866 case ISD::FMUL: 1867 case ISD::FDIV: 1868 case ISD::FNEG: 1869 // These nodes are marked as 'custom' just to lower them to SVE. 1870 // We know said lowering will incur no additional cost. 1871 if (!Ty->getScalarType()->isFP128Ty()) 1872 return (Cost + 2) * LT.first; 1873 1874 return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, 1875 Opd2Info, 1876 Opd1PropInfo, Opd2PropInfo); 1877 } 1878 } 1879 1880 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, 1881 ScalarEvolution *SE, 1882 const SCEV *Ptr) { 1883 // Address computations in vectorized code with non-consecutive addresses will 1884 // likely result in more instructions compared to scalar code where the 1885 // computation can more often be merged into the index mode. The resulting 1886 // extra micro-ops can significantly decrease throughput. 1887 unsigned NumVectorInstToHideOverhead = 10; 1888 int MaxMergeDistance = 64; 1889 1890 if (Ty->isVectorTy() && SE && 1891 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) 1892 return NumVectorInstToHideOverhead; 1893 1894 // In many cases the address computation is not merged into the instruction 1895 // addressing mode. 1896 return 1; 1897 } 1898 1899 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 1900 Type *CondTy, 1901 CmpInst::Predicate VecPred, 1902 TTI::TargetCostKind CostKind, 1903 const Instruction *I) { 1904 // TODO: Handle other cost kinds. 1905 if (CostKind != TTI::TCK_RecipThroughput) 1906 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 1907 I); 1908 1909 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1910 // We don't lower some vector selects well that are wider than the register 1911 // width. 1912 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) { 1913 // We would need this many instructions to hide the scalarization happening. 1914 const int AmortizationCost = 20; 1915 1916 // If VecPred is not set, check if we can get a predicate from the context 1917 // instruction, if its type matches the requested ValTy. 1918 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { 1919 CmpInst::Predicate CurrentPred; 1920 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), 1921 m_Value()))) 1922 VecPred = CurrentPred; 1923 } 1924 // Check if we have a compare/select chain that can be lowered using 1925 // a (F)CMxx & BFI pair. 1926 if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE || 1927 VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT || 1928 VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ || 1929 VecPred == CmpInst::FCMP_UNE) { 1930 static const auto ValidMinMaxTys = { 1931 MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, 1932 MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64}; 1933 static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16}; 1934 1935 auto LT = TLI->getTypeLegalizationCost(DL, ValTy); 1936 if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; }) || 1937 (ST->hasFullFP16() && 1938 any_of(ValidFP16MinMaxTys, [<](MVT M) { return M == LT.second; }))) 1939 return LT.first; 1940 } 1941 1942 static const TypeConversionCostTblEntry 1943 VectorSelectTbl[] = { 1944 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, 1945 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, 1946 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, 1947 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, 1948 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, 1949 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } 1950 }; 1951 1952 EVT SelCondTy = TLI->getValueType(DL, CondTy); 1953 EVT SelValTy = TLI->getValueType(DL, ValTy); 1954 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 1955 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, 1956 SelCondTy.getSimpleVT(), 1957 SelValTy.getSimpleVT())) 1958 return Entry->Cost; 1959 } 1960 } 1961 // The base case handles scalable vectors fine for now, since it treats the 1962 // cost as 1 * legalization cost. 1963 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 1964 } 1965 1966 AArch64TTIImpl::TTI::MemCmpExpansionOptions 1967 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 1968 TTI::MemCmpExpansionOptions Options; 1969 if (ST->requiresStrictAlign()) { 1970 // TODO: Add cost modeling for strict align. Misaligned loads expand to 1971 // a bunch of instructions when strict align is enabled. 1972 return Options; 1973 } 1974 Options.AllowOverlappingLoads = true; 1975 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 1976 Options.NumLoadsPerBlock = Options.MaxNumLoads; 1977 // TODO: Though vector loads usually perform well on AArch64, in some targets 1978 // they may wake up the FP unit, which raises the power consumption. Perhaps 1979 // they could be used with no holds barred (-O3). 1980 Options.LoadSizes = {8, 4, 2, 1}; 1981 return Options; 1982 } 1983 1984 InstructionCost 1985 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 1986 Align Alignment, unsigned AddressSpace, 1987 TTI::TargetCostKind CostKind) { 1988 if (useNeonVector(Src)) 1989 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 1990 CostKind); 1991 auto LT = TLI->getTypeLegalizationCost(DL, Src); 1992 if (!LT.first.isValid()) 1993 return InstructionCost::getInvalid(); 1994 1995 // The code-generator is currently not able to handle scalable vectors 1996 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 1997 // it. This change will be removed when code-generation for these types is 1998 // sufficiently reliable. 1999 if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1)) 2000 return InstructionCost::getInvalid(); 2001 2002 return LT.first * 2; 2003 } 2004 2005 static unsigned getSVEGatherScatterOverhead(unsigned Opcode) { 2006 return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead; 2007 } 2008 2009 InstructionCost AArch64TTIImpl::getGatherScatterOpCost( 2010 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 2011 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { 2012 if (useNeonVector(DataTy)) 2013 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 2014 Alignment, CostKind, I); 2015 auto *VT = cast<VectorType>(DataTy); 2016 auto LT = TLI->getTypeLegalizationCost(DL, DataTy); 2017 if (!LT.first.isValid()) 2018 return InstructionCost::getInvalid(); 2019 2020 // The code-generator is currently not able to handle scalable vectors 2021 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 2022 // it. This change will be removed when code-generation for these types is 2023 // sufficiently reliable. 2024 if (cast<VectorType>(DataTy)->getElementCount() == 2025 ElementCount::getScalable(1)) 2026 return InstructionCost::getInvalid(); 2027 2028 ElementCount LegalVF = LT.second.getVectorElementCount(); 2029 InstructionCost MemOpCost = 2030 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); 2031 // Add on an overhead cost for using gathers/scatters. 2032 // TODO: At the moment this is applied unilaterally for all CPUs, but at some 2033 // point we may want a per-CPU overhead. 2034 MemOpCost *= getSVEGatherScatterOverhead(Opcode); 2035 return LT.first * MemOpCost * getMaxNumElements(LegalVF); 2036 } 2037 2038 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { 2039 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors(); 2040 } 2041 2042 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, 2043 MaybeAlign Alignment, 2044 unsigned AddressSpace, 2045 TTI::TargetCostKind CostKind, 2046 const Instruction *I) { 2047 EVT VT = TLI->getValueType(DL, Ty, true); 2048 // Type legalization can't handle structs 2049 if (VT == MVT::Other) 2050 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, 2051 CostKind); 2052 2053 auto LT = TLI->getTypeLegalizationCost(DL, Ty); 2054 if (!LT.first.isValid()) 2055 return InstructionCost::getInvalid(); 2056 2057 // The code-generator is currently not able to handle scalable vectors 2058 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting 2059 // it. This change will be removed when code-generation for these types is 2060 // sufficiently reliable. 2061 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty)) 2062 if (VTy->getElementCount() == ElementCount::getScalable(1)) 2063 return InstructionCost::getInvalid(); 2064 2065 // TODO: consider latency as well for TCK_SizeAndLatency. 2066 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) 2067 return LT.first; 2068 2069 if (CostKind != TTI::TCK_RecipThroughput) 2070 return 1; 2071 2072 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && 2073 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { 2074 // Unaligned stores are extremely inefficient. We don't split all 2075 // unaligned 128-bit stores because the negative impact that has shown in 2076 // practice on inlined block copy code. 2077 // We make such stores expensive so that we will only vectorize if there 2078 // are 6 other instructions getting vectorized. 2079 const int AmortizationCost = 6; 2080 2081 return LT.first * 2 * AmortizationCost; 2082 } 2083 2084 // Check truncating stores and extending loads. 2085 if (useNeonVector(Ty) && 2086 Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) { 2087 // v4i8 types are lowered to scalar a load/store and sshll/xtn. 2088 if (VT == MVT::v4i8) 2089 return 2; 2090 // Otherwise we need to scalarize. 2091 return cast<FixedVectorType>(Ty)->getNumElements() * 2; 2092 } 2093 2094 return LT.first; 2095 } 2096 2097 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( 2098 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 2099 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 2100 bool UseMaskForCond, bool UseMaskForGaps) { 2101 assert(Factor >= 2 && "Invalid interleave factor"); 2102 auto *VecVTy = cast<FixedVectorType>(VecTy); 2103 2104 if (!UseMaskForCond && !UseMaskForGaps && 2105 Factor <= TLI->getMaxSupportedInterleaveFactor()) { 2106 unsigned NumElts = VecVTy->getNumElements(); 2107 auto *SubVecTy = 2108 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); 2109 2110 // ldN/stN only support legal vector types of size 64 or 128 in bits. 2111 // Accesses having vector types that are a multiple of 128 bits can be 2112 // matched to more than one ldN/stN instruction. 2113 bool UseScalable; 2114 if (NumElts % Factor == 0 && 2115 TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable)) 2116 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable); 2117 } 2118 2119 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2120 Alignment, AddressSpace, CostKind, 2121 UseMaskForCond, UseMaskForGaps); 2122 } 2123 2124 InstructionCost 2125 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { 2126 InstructionCost Cost = 0; 2127 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2128 for (auto *I : Tys) { 2129 if (!I->isVectorTy()) 2130 continue; 2131 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() == 2132 128) 2133 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + 2134 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); 2135 } 2136 return Cost; 2137 } 2138 2139 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { 2140 return ST->getMaxInterleaveFactor(); 2141 } 2142 2143 // For Falkor, we want to avoid having too many strided loads in a loop since 2144 // that can exhaust the HW prefetcher resources. We adjust the unroller 2145 // MaxCount preference below to attempt to ensure unrolling doesn't create too 2146 // many strided loads. 2147 static void 2148 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2149 TargetTransformInfo::UnrollingPreferences &UP) { 2150 enum { MaxStridedLoads = 7 }; 2151 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { 2152 int StridedLoads = 0; 2153 // FIXME? We could make this more precise by looking at the CFG and 2154 // e.g. not counting loads in each side of an if-then-else diamond. 2155 for (const auto BB : L->blocks()) { 2156 for (auto &I : *BB) { 2157 LoadInst *LMemI = dyn_cast<LoadInst>(&I); 2158 if (!LMemI) 2159 continue; 2160 2161 Value *PtrValue = LMemI->getPointerOperand(); 2162 if (L->isLoopInvariant(PtrValue)) 2163 continue; 2164 2165 const SCEV *LSCEV = SE.getSCEV(PtrValue); 2166 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 2167 if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) 2168 continue; 2169 2170 // FIXME? We could take pairing of unrolled load copies into account 2171 // by looking at the AddRec, but we would probably have to limit this 2172 // to loops with no stores or other memory optimization barriers. 2173 ++StridedLoads; 2174 // We've seen enough strided loads that seeing more won't make a 2175 // difference. 2176 if (StridedLoads > MaxStridedLoads / 2) 2177 return StridedLoads; 2178 } 2179 } 2180 return StridedLoads; 2181 }; 2182 2183 int StridedLoads = countStridedLoads(L, SE); 2184 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads 2185 << " strided loads\n"); 2186 // Pick the largest power of 2 unroll count that won't result in too many 2187 // strided loads. 2188 if (StridedLoads) { 2189 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); 2190 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " 2191 << UP.MaxCount << '\n'); 2192 } 2193 } 2194 2195 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2196 TTI::UnrollingPreferences &UP, 2197 OptimizationRemarkEmitter *ORE) { 2198 // Enable partial unrolling and runtime unrolling. 2199 BaseT::getUnrollingPreferences(L, SE, UP, ORE); 2200 2201 UP.UpperBound = true; 2202 2203 // For inner loop, it is more likely to be a hot one, and the runtime check 2204 // can be promoted out from LICM pass, so the overhead is less, let's try 2205 // a larger threshold to unroll more loops. 2206 if (L->getLoopDepth() > 1) 2207 UP.PartialThreshold *= 2; 2208 2209 // Disable partial & runtime unrolling on -Os. 2210 UP.PartialOptSizeThreshold = 0; 2211 2212 if (ST->getProcFamily() == AArch64Subtarget::Falkor && 2213 EnableFalkorHWPFUnrollFix) 2214 getFalkorUnrollingPreferences(L, SE, UP); 2215 2216 // Scan the loop: don't unroll loops with calls as this could prevent 2217 // inlining. Don't unroll vector loops either, as they don't benefit much from 2218 // unrolling. 2219 for (auto *BB : L->getBlocks()) { 2220 for (auto &I : *BB) { 2221 // Don't unroll vectorised loop. 2222 if (I.getType()->isVectorTy()) 2223 return; 2224 2225 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 2226 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 2227 if (!isLoweredToCall(F)) 2228 continue; 2229 } 2230 return; 2231 } 2232 } 2233 } 2234 2235 // Enable runtime unrolling for in-order models 2236 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by 2237 // checking for that case, we can ensure that the default behaviour is 2238 // unchanged 2239 if (ST->getProcFamily() != AArch64Subtarget::Others && 2240 !ST->getSchedModel().isOutOfOrder()) { 2241 UP.Runtime = true; 2242 UP.Partial = true; 2243 UP.UnrollRemainder = true; 2244 UP.DefaultUnrollRuntimeCount = 4; 2245 2246 UP.UnrollAndJam = true; 2247 UP.UnrollAndJamInnerLoopThreshold = 60; 2248 } 2249 } 2250 2251 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2252 TTI::PeelingPreferences &PP) { 2253 BaseT::getPeelingPreferences(L, SE, PP); 2254 } 2255 2256 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 2257 Type *ExpectedType) { 2258 switch (Inst->getIntrinsicID()) { 2259 default: 2260 return nullptr; 2261 case Intrinsic::aarch64_neon_st2: 2262 case Intrinsic::aarch64_neon_st3: 2263 case Intrinsic::aarch64_neon_st4: { 2264 // Create a struct type 2265 StructType *ST = dyn_cast<StructType>(ExpectedType); 2266 if (!ST) 2267 return nullptr; 2268 unsigned NumElts = Inst->arg_size() - 1; 2269 if (ST->getNumElements() != NumElts) 2270 return nullptr; 2271 for (unsigned i = 0, e = NumElts; i != e; ++i) { 2272 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) 2273 return nullptr; 2274 } 2275 Value *Res = UndefValue::get(ExpectedType); 2276 IRBuilder<> Builder(Inst); 2277 for (unsigned i = 0, e = NumElts; i != e; ++i) { 2278 Value *L = Inst->getArgOperand(i); 2279 Res = Builder.CreateInsertValue(Res, L, i); 2280 } 2281 return Res; 2282 } 2283 case Intrinsic::aarch64_neon_ld2: 2284 case Intrinsic::aarch64_neon_ld3: 2285 case Intrinsic::aarch64_neon_ld4: 2286 if (Inst->getType() == ExpectedType) 2287 return Inst; 2288 return nullptr; 2289 } 2290 } 2291 2292 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, 2293 MemIntrinsicInfo &Info) { 2294 switch (Inst->getIntrinsicID()) { 2295 default: 2296 break; 2297 case Intrinsic::aarch64_neon_ld2: 2298 case Intrinsic::aarch64_neon_ld3: 2299 case Intrinsic::aarch64_neon_ld4: 2300 Info.ReadMem = true; 2301 Info.WriteMem = false; 2302 Info.PtrVal = Inst->getArgOperand(0); 2303 break; 2304 case Intrinsic::aarch64_neon_st2: 2305 case Intrinsic::aarch64_neon_st3: 2306 case Intrinsic::aarch64_neon_st4: 2307 Info.ReadMem = false; 2308 Info.WriteMem = true; 2309 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1); 2310 break; 2311 } 2312 2313 switch (Inst->getIntrinsicID()) { 2314 default: 2315 return false; 2316 case Intrinsic::aarch64_neon_ld2: 2317 case Intrinsic::aarch64_neon_st2: 2318 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; 2319 break; 2320 case Intrinsic::aarch64_neon_ld3: 2321 case Intrinsic::aarch64_neon_st3: 2322 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; 2323 break; 2324 case Intrinsic::aarch64_neon_ld4: 2325 case Intrinsic::aarch64_neon_st4: 2326 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; 2327 break; 2328 } 2329 return true; 2330 } 2331 2332 /// See if \p I should be considered for address type promotion. We check if \p 2333 /// I is a sext with right type and used in memory accesses. If it used in a 2334 /// "complex" getelementptr, we allow it to be promoted without finding other 2335 /// sext instructions that sign extended the same initial value. A getelementptr 2336 /// is considered as "complex" if it has more than 2 operands. 2337 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( 2338 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { 2339 bool Considerable = false; 2340 AllowPromotionWithoutCommonHeader = false; 2341 if (!isa<SExtInst>(&I)) 2342 return false; 2343 Type *ConsideredSExtType = 2344 Type::getInt64Ty(I.getParent()->getParent()->getContext()); 2345 if (I.getType() != ConsideredSExtType) 2346 return false; 2347 // See if the sext is the one with the right type and used in at least one 2348 // GetElementPtrInst. 2349 for (const User *U : I.users()) { 2350 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { 2351 Considerable = true; 2352 // A getelementptr is considered as "complex" if it has more than 2 2353 // operands. We will promote a SExt used in such complex GEP as we 2354 // expect some computation to be merged if they are done on 64 bits. 2355 if (GEPInst->getNumOperands() > 2) { 2356 AllowPromotionWithoutCommonHeader = true; 2357 break; 2358 } 2359 } 2360 } 2361 return Considerable; 2362 } 2363 2364 bool AArch64TTIImpl::isLegalToVectorizeReduction( 2365 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 2366 if (!VF.isScalable()) 2367 return true; 2368 2369 Type *Ty = RdxDesc.getRecurrenceType(); 2370 if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty)) 2371 return false; 2372 2373 switch (RdxDesc.getRecurrenceKind()) { 2374 case RecurKind::Add: 2375 case RecurKind::FAdd: 2376 case RecurKind::And: 2377 case RecurKind::Or: 2378 case RecurKind::Xor: 2379 case RecurKind::SMin: 2380 case RecurKind::SMax: 2381 case RecurKind::UMin: 2382 case RecurKind::UMax: 2383 case RecurKind::FMin: 2384 case RecurKind::FMax: 2385 case RecurKind::SelectICmp: 2386 case RecurKind::SelectFCmp: 2387 case RecurKind::FMulAdd: 2388 return true; 2389 default: 2390 return false; 2391 } 2392 } 2393 2394 InstructionCost 2395 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, 2396 bool IsUnsigned, 2397 TTI::TargetCostKind CostKind) { 2398 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 2399 2400 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16()) 2401 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); 2402 2403 assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) && 2404 "Both vector needs to be equally scalable"); 2405 2406 InstructionCost LegalizationCost = 0; 2407 if (LT.first > 1) { 2408 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); 2409 unsigned MinMaxOpcode = 2410 Ty->isFPOrFPVectorTy() 2411 ? Intrinsic::maxnum 2412 : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin); 2413 IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy}); 2414 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1); 2415 } 2416 2417 return LegalizationCost + /*Cost of horizontal reduction*/ 2; 2418 } 2419 2420 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( 2421 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { 2422 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2423 InstructionCost LegalizationCost = 0; 2424 if (LT.first > 1) { 2425 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); 2426 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); 2427 LegalizationCost *= LT.first - 1; 2428 } 2429 2430 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2431 assert(ISD && "Invalid opcode"); 2432 // Add the final reduction cost for the legal horizontal reduction 2433 switch (ISD) { 2434 case ISD::ADD: 2435 case ISD::AND: 2436 case ISD::OR: 2437 case ISD::XOR: 2438 case ISD::FADD: 2439 return LegalizationCost + 2; 2440 default: 2441 return InstructionCost::getInvalid(); 2442 } 2443 } 2444 2445 InstructionCost 2446 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 2447 Optional<FastMathFlags> FMF, 2448 TTI::TargetCostKind CostKind) { 2449 if (TTI::requiresOrderedReduction(FMF)) { 2450 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) { 2451 InstructionCost BaseCost = 2452 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2453 // Add on extra cost to reflect the extra overhead on some CPUs. We still 2454 // end up vectorizing for more computationally intensive loops. 2455 return BaseCost + FixedVTy->getNumElements(); 2456 } 2457 2458 if (Opcode != Instruction::FAdd) 2459 return InstructionCost::getInvalid(); 2460 2461 auto *VTy = cast<ScalableVectorType>(ValTy); 2462 InstructionCost Cost = 2463 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind); 2464 Cost *= getMaxNumElements(VTy->getElementCount()); 2465 return Cost; 2466 } 2467 2468 if (isa<ScalableVectorType>(ValTy)) 2469 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind); 2470 2471 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2472 MVT MTy = LT.second; 2473 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2474 assert(ISD && "Invalid opcode"); 2475 2476 // Horizontal adds can use the 'addv' instruction. We model the cost of these 2477 // instructions as twice a normal vector add, plus 1 for each legalization 2478 // step (LT.first). This is the only arithmetic vector reduction operation for 2479 // which we have an instruction. 2480 // OR, XOR and AND costs should match the codegen from: 2481 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll 2482 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll 2483 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll 2484 static const CostTblEntry CostTblNoPairwise[]{ 2485 {ISD::ADD, MVT::v8i8, 2}, 2486 {ISD::ADD, MVT::v16i8, 2}, 2487 {ISD::ADD, MVT::v4i16, 2}, 2488 {ISD::ADD, MVT::v8i16, 2}, 2489 {ISD::ADD, MVT::v4i32, 2}, 2490 {ISD::OR, MVT::v8i8, 15}, 2491 {ISD::OR, MVT::v16i8, 17}, 2492 {ISD::OR, MVT::v4i16, 7}, 2493 {ISD::OR, MVT::v8i16, 9}, 2494 {ISD::OR, MVT::v2i32, 3}, 2495 {ISD::OR, MVT::v4i32, 5}, 2496 {ISD::OR, MVT::v2i64, 3}, 2497 {ISD::XOR, MVT::v8i8, 15}, 2498 {ISD::XOR, MVT::v16i8, 17}, 2499 {ISD::XOR, MVT::v4i16, 7}, 2500 {ISD::XOR, MVT::v8i16, 9}, 2501 {ISD::XOR, MVT::v2i32, 3}, 2502 {ISD::XOR, MVT::v4i32, 5}, 2503 {ISD::XOR, MVT::v2i64, 3}, 2504 {ISD::AND, MVT::v8i8, 15}, 2505 {ISD::AND, MVT::v16i8, 17}, 2506 {ISD::AND, MVT::v4i16, 7}, 2507 {ISD::AND, MVT::v8i16, 9}, 2508 {ISD::AND, MVT::v2i32, 3}, 2509 {ISD::AND, MVT::v4i32, 5}, 2510 {ISD::AND, MVT::v2i64, 3}, 2511 }; 2512 switch (ISD) { 2513 default: 2514 break; 2515 case ISD::ADD: 2516 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) 2517 return (LT.first - 1) + Entry->Cost; 2518 break; 2519 case ISD::XOR: 2520 case ISD::AND: 2521 case ISD::OR: 2522 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy); 2523 if (!Entry) 2524 break; 2525 auto *ValVTy = cast<FixedVectorType>(ValTy); 2526 if (!ValVTy->getElementType()->isIntegerTy(1) && 2527 MTy.getVectorNumElements() <= ValVTy->getNumElements() && 2528 isPowerOf2_32(ValVTy->getNumElements())) { 2529 InstructionCost ExtraCost = 0; 2530 if (LT.first != 1) { 2531 // Type needs to be split, so there is an extra cost of LT.first - 1 2532 // arithmetic ops. 2533 auto *Ty = FixedVectorType::get(ValTy->getElementType(), 2534 MTy.getVectorNumElements()); 2535 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 2536 ExtraCost *= LT.first - 1; 2537 } 2538 return Entry->Cost + ExtraCost; 2539 } 2540 break; 2541 } 2542 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 2543 } 2544 2545 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) { 2546 static const CostTblEntry ShuffleTbl[] = { 2547 { TTI::SK_Splice, MVT::nxv16i8, 1 }, 2548 { TTI::SK_Splice, MVT::nxv8i16, 1 }, 2549 { TTI::SK_Splice, MVT::nxv4i32, 1 }, 2550 { TTI::SK_Splice, MVT::nxv2i64, 1 }, 2551 { TTI::SK_Splice, MVT::nxv2f16, 1 }, 2552 { TTI::SK_Splice, MVT::nxv4f16, 1 }, 2553 { TTI::SK_Splice, MVT::nxv8f16, 1 }, 2554 { TTI::SK_Splice, MVT::nxv2bf16, 1 }, 2555 { TTI::SK_Splice, MVT::nxv4bf16, 1 }, 2556 { TTI::SK_Splice, MVT::nxv8bf16, 1 }, 2557 { TTI::SK_Splice, MVT::nxv2f32, 1 }, 2558 { TTI::SK_Splice, MVT::nxv4f32, 1 }, 2559 { TTI::SK_Splice, MVT::nxv2f64, 1 }, 2560 }; 2561 2562 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2563 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext()); 2564 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 2565 EVT PromotedVT = LT.second.getScalarType() == MVT::i1 2566 ? TLI->getPromotedVTForPredicate(EVT(LT.second)) 2567 : LT.second; 2568 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext()); 2569 InstructionCost LegalizationCost = 0; 2570 if (Index < 0) { 2571 LegalizationCost = 2572 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy, 2573 CmpInst::BAD_ICMP_PREDICATE, CostKind) + 2574 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy, 2575 CmpInst::BAD_ICMP_PREDICATE, CostKind); 2576 } 2577 2578 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp 2579 // Cost performed on a promoted type. 2580 if (LT.second.getScalarType() == MVT::i1) { 2581 LegalizationCost += 2582 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy, 2583 TTI::CastContextHint::None, CostKind) + 2584 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy, 2585 TTI::CastContextHint::None, CostKind); 2586 } 2587 const auto *Entry = 2588 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT()); 2589 assert(Entry && "Illegal Type for Splice"); 2590 LegalizationCost += Entry->Cost; 2591 return LegalizationCost * LT.first; 2592 } 2593 2594 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 2595 VectorType *Tp, 2596 ArrayRef<int> Mask, int Index, 2597 VectorType *SubTp) { 2598 Kind = improveShuffleKindFromMask(Kind, Mask); 2599 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || 2600 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || 2601 Kind == TTI::SK_Reverse) { 2602 static const CostTblEntry ShuffleTbl[] = { 2603 // Broadcast shuffle kinds can be performed with 'dup'. 2604 { TTI::SK_Broadcast, MVT::v8i8, 1 }, 2605 { TTI::SK_Broadcast, MVT::v16i8, 1 }, 2606 { TTI::SK_Broadcast, MVT::v4i16, 1 }, 2607 { TTI::SK_Broadcast, MVT::v8i16, 1 }, 2608 { TTI::SK_Broadcast, MVT::v2i32, 1 }, 2609 { TTI::SK_Broadcast, MVT::v4i32, 1 }, 2610 { TTI::SK_Broadcast, MVT::v2i64, 1 }, 2611 { TTI::SK_Broadcast, MVT::v2f32, 1 }, 2612 { TTI::SK_Broadcast, MVT::v4f32, 1 }, 2613 { TTI::SK_Broadcast, MVT::v2f64, 1 }, 2614 // Transpose shuffle kinds can be performed with 'trn1/trn2' and 2615 // 'zip1/zip2' instructions. 2616 { TTI::SK_Transpose, MVT::v8i8, 1 }, 2617 { TTI::SK_Transpose, MVT::v16i8, 1 }, 2618 { TTI::SK_Transpose, MVT::v4i16, 1 }, 2619 { TTI::SK_Transpose, MVT::v8i16, 1 }, 2620 { TTI::SK_Transpose, MVT::v2i32, 1 }, 2621 { TTI::SK_Transpose, MVT::v4i32, 1 }, 2622 { TTI::SK_Transpose, MVT::v2i64, 1 }, 2623 { TTI::SK_Transpose, MVT::v2f32, 1 }, 2624 { TTI::SK_Transpose, MVT::v4f32, 1 }, 2625 { TTI::SK_Transpose, MVT::v2f64, 1 }, 2626 // Select shuffle kinds. 2627 // TODO: handle vXi8/vXi16. 2628 { TTI::SK_Select, MVT::v2i32, 1 }, // mov. 2629 { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). 2630 { TTI::SK_Select, MVT::v2i64, 1 }, // mov. 2631 { TTI::SK_Select, MVT::v2f32, 1 }, // mov. 2632 { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). 2633 { TTI::SK_Select, MVT::v2f64, 1 }, // mov. 2634 // PermuteSingleSrc shuffle kinds. 2635 { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. 2636 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. 2637 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. 2638 { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. 2639 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. 2640 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. 2641 { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case. 2642 { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case. 2643 { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case. 2644 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl 2645 { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl 2646 { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl 2647 { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl 2648 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl 2649 // Reverse can be lowered with `rev`. 2650 { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov. 2651 { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT 2652 { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov. 2653 { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov. 2654 { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT 2655 { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov. 2656 // Broadcast shuffle kinds for scalable vectors 2657 { TTI::SK_Broadcast, MVT::nxv16i8, 1 }, 2658 { TTI::SK_Broadcast, MVT::nxv8i16, 1 }, 2659 { TTI::SK_Broadcast, MVT::nxv4i32, 1 }, 2660 { TTI::SK_Broadcast, MVT::nxv2i64, 1 }, 2661 { TTI::SK_Broadcast, MVT::nxv2f16, 1 }, 2662 { TTI::SK_Broadcast, MVT::nxv4f16, 1 }, 2663 { TTI::SK_Broadcast, MVT::nxv8f16, 1 }, 2664 { TTI::SK_Broadcast, MVT::nxv2bf16, 1 }, 2665 { TTI::SK_Broadcast, MVT::nxv4bf16, 1 }, 2666 { TTI::SK_Broadcast, MVT::nxv8bf16, 1 }, 2667 { TTI::SK_Broadcast, MVT::nxv2f32, 1 }, 2668 { TTI::SK_Broadcast, MVT::nxv4f32, 1 }, 2669 { TTI::SK_Broadcast, MVT::nxv2f64, 1 }, 2670 { TTI::SK_Broadcast, MVT::nxv16i1, 1 }, 2671 { TTI::SK_Broadcast, MVT::nxv8i1, 1 }, 2672 { TTI::SK_Broadcast, MVT::nxv4i1, 1 }, 2673 { TTI::SK_Broadcast, MVT::nxv2i1, 1 }, 2674 // Handle the cases for vector.reverse with scalable vectors 2675 { TTI::SK_Reverse, MVT::nxv16i8, 1 }, 2676 { TTI::SK_Reverse, MVT::nxv8i16, 1 }, 2677 { TTI::SK_Reverse, MVT::nxv4i32, 1 }, 2678 { TTI::SK_Reverse, MVT::nxv2i64, 1 }, 2679 { TTI::SK_Reverse, MVT::nxv2f16, 1 }, 2680 { TTI::SK_Reverse, MVT::nxv4f16, 1 }, 2681 { TTI::SK_Reverse, MVT::nxv8f16, 1 }, 2682 { TTI::SK_Reverse, MVT::nxv2bf16, 1 }, 2683 { TTI::SK_Reverse, MVT::nxv4bf16, 1 }, 2684 { TTI::SK_Reverse, MVT::nxv8bf16, 1 }, 2685 { TTI::SK_Reverse, MVT::nxv2f32, 1 }, 2686 { TTI::SK_Reverse, MVT::nxv4f32, 1 }, 2687 { TTI::SK_Reverse, MVT::nxv2f64, 1 }, 2688 { TTI::SK_Reverse, MVT::nxv16i1, 1 }, 2689 { TTI::SK_Reverse, MVT::nxv8i1, 1 }, 2690 { TTI::SK_Reverse, MVT::nxv4i1, 1 }, 2691 { TTI::SK_Reverse, MVT::nxv2i1, 1 }, 2692 }; 2693 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 2694 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) 2695 return LT.first * Entry->Cost; 2696 } 2697 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp)) 2698 return getSpliceCost(Tp, Index); 2699 return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); 2700 } 2701