10b57cec5SDimitry Andric //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file implements a TargetTransformInfo analysis pass specific to the
100b57cec5SDimitry Andric // SystemZ target machine. It uses the target's detailed information to provide
110b57cec5SDimitry Andric // more precise answers to certain TTI queries, while letting the target
120b57cec5SDimitry Andric // independent and default TTI implementations handle the rest.
130b57cec5SDimitry Andric //
140b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
150b57cec5SDimitry Andric
160b57cec5SDimitry Andric #include "SystemZTargetTransformInfo.h"
170b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/BasicTTIImpl.h"
190b57cec5SDimitry Andric #include "llvm/CodeGen/CostTable.h"
200b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
210b57cec5SDimitry Andric #include "llvm/IR/IntrinsicInst.h"
220b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
230b57cec5SDimitry Andric using namespace llvm;
240b57cec5SDimitry Andric
250b57cec5SDimitry Andric #define DEBUG_TYPE "systemztti"
260b57cec5SDimitry Andric
270b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
280b57cec5SDimitry Andric //
290b57cec5SDimitry Andric // SystemZ cost model.
300b57cec5SDimitry Andric //
310b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
320b57cec5SDimitry Andric
isUsedAsMemCpySource(const Value * V,bool & OtherUse)3381ad6265SDimitry Andric static bool isUsedAsMemCpySource(const Value *V, bool &OtherUse) {
3481ad6265SDimitry Andric bool UsedAsMemCpySource = false;
3581ad6265SDimitry Andric for (const User *U : V->users())
3681ad6265SDimitry Andric if (const Instruction *User = dyn_cast<Instruction>(U)) {
3781ad6265SDimitry Andric if (isa<BitCastInst>(User) || isa<GetElementPtrInst>(User)) {
3881ad6265SDimitry Andric UsedAsMemCpySource |= isUsedAsMemCpySource(User, OtherUse);
3981ad6265SDimitry Andric continue;
4081ad6265SDimitry Andric }
4181ad6265SDimitry Andric if (const MemCpyInst *Memcpy = dyn_cast<MemCpyInst>(User)) {
4281ad6265SDimitry Andric if (Memcpy->getOperand(1) == V && !Memcpy->isVolatile()) {
4381ad6265SDimitry Andric UsedAsMemCpySource = true;
4481ad6265SDimitry Andric continue;
4581ad6265SDimitry Andric }
4681ad6265SDimitry Andric }
4781ad6265SDimitry Andric OtherUse = true;
4881ad6265SDimitry Andric }
4981ad6265SDimitry Andric return UsedAsMemCpySource;
5081ad6265SDimitry Andric }
5181ad6265SDimitry Andric
adjustInliningThreshold(const CallBase * CB) const5281ad6265SDimitry Andric unsigned SystemZTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
5381ad6265SDimitry Andric unsigned Bonus = 0;
5481ad6265SDimitry Andric
5581ad6265SDimitry Andric // Increase the threshold if an incoming argument is used only as a memcpy
5681ad6265SDimitry Andric // source.
5781ad6265SDimitry Andric if (Function *Callee = CB->getCalledFunction())
5881ad6265SDimitry Andric for (Argument &Arg : Callee->args()) {
5981ad6265SDimitry Andric bool OtherUse = false;
6081ad6265SDimitry Andric if (isUsedAsMemCpySource(&Arg, OtherUse) && !OtherUse)
6181ad6265SDimitry Andric Bonus += 150;
6281ad6265SDimitry Andric }
6381ad6265SDimitry Andric
6481ad6265SDimitry Andric LLVM_DEBUG(if (Bonus)
6581ad6265SDimitry Andric dbgs() << "++ SZTTI Adding inlining bonus: " << Bonus << "\n";);
6681ad6265SDimitry Andric return Bonus;
6781ad6265SDimitry Andric }
6881ad6265SDimitry Andric
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)69fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
705ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
710b57cec5SDimitry Andric assert(Ty->isIntegerTy());
720b57cec5SDimitry Andric
730b57cec5SDimitry Andric unsigned BitSize = Ty->getPrimitiveSizeInBits();
740b57cec5SDimitry Andric // There is no cost model for constants with a bit size of 0. Return TCC_Free
750b57cec5SDimitry Andric // here, so that constant hoisting will ignore this constant.
760b57cec5SDimitry Andric if (BitSize == 0)
770b57cec5SDimitry Andric return TTI::TCC_Free;
78*a58f00eaSDimitry Andric // No cost model for operations on integers larger than 128 bit implemented yet.
79*a58f00eaSDimitry Andric if ((!ST->hasVector() && BitSize > 64) || BitSize > 128)
800b57cec5SDimitry Andric return TTI::TCC_Free;
810b57cec5SDimitry Andric
820b57cec5SDimitry Andric if (Imm == 0)
830b57cec5SDimitry Andric return TTI::TCC_Free;
840b57cec5SDimitry Andric
850b57cec5SDimitry Andric if (Imm.getBitWidth() <= 64) {
860b57cec5SDimitry Andric // Constants loaded via lgfi.
870b57cec5SDimitry Andric if (isInt<32>(Imm.getSExtValue()))
880b57cec5SDimitry Andric return TTI::TCC_Basic;
890b57cec5SDimitry Andric // Constants loaded via llilf.
900b57cec5SDimitry Andric if (isUInt<32>(Imm.getZExtValue()))
910b57cec5SDimitry Andric return TTI::TCC_Basic;
920b57cec5SDimitry Andric // Constants loaded via llihf:
930b57cec5SDimitry Andric if ((Imm.getZExtValue() & 0xffffffff) == 0)
940b57cec5SDimitry Andric return TTI::TCC_Basic;
950b57cec5SDimitry Andric
960b57cec5SDimitry Andric return 2 * TTI::TCC_Basic;
970b57cec5SDimitry Andric }
980b57cec5SDimitry Andric
99*a58f00eaSDimitry Andric // i128 immediates loads from Constant Pool
100*a58f00eaSDimitry Andric return 2 * TTI::TCC_Basic;
1010b57cec5SDimitry Andric }
1020b57cec5SDimitry Andric
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)103fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
1045ffd83dbSDimitry Andric const APInt &Imm, Type *Ty,
105e8d8bef9SDimitry Andric TTI::TargetCostKind CostKind,
106e8d8bef9SDimitry Andric Instruction *Inst) {
1070b57cec5SDimitry Andric assert(Ty->isIntegerTy());
1080b57cec5SDimitry Andric
1090b57cec5SDimitry Andric unsigned BitSize = Ty->getPrimitiveSizeInBits();
1100b57cec5SDimitry Andric // There is no cost model for constants with a bit size of 0. Return TCC_Free
1110b57cec5SDimitry Andric // here, so that constant hoisting will ignore this constant.
1120b57cec5SDimitry Andric if (BitSize == 0)
1130b57cec5SDimitry Andric return TTI::TCC_Free;
1140b57cec5SDimitry Andric // No cost model for operations on integers larger than 64 bit implemented yet.
1150b57cec5SDimitry Andric if (BitSize > 64)
1160b57cec5SDimitry Andric return TTI::TCC_Free;
1170b57cec5SDimitry Andric
1180b57cec5SDimitry Andric switch (Opcode) {
1190b57cec5SDimitry Andric default:
1200b57cec5SDimitry Andric return TTI::TCC_Free;
1210b57cec5SDimitry Andric case Instruction::GetElementPtr:
1220b57cec5SDimitry Andric // Always hoist the base address of a GetElementPtr. This prevents the
1230b57cec5SDimitry Andric // creation of new constants for every base constant that gets constant
1240b57cec5SDimitry Andric // folded with the offset.
1250b57cec5SDimitry Andric if (Idx == 0)
1260b57cec5SDimitry Andric return 2 * TTI::TCC_Basic;
1270b57cec5SDimitry Andric return TTI::TCC_Free;
1280b57cec5SDimitry Andric case Instruction::Store:
1290b57cec5SDimitry Andric if (Idx == 0 && Imm.getBitWidth() <= 64) {
1300b57cec5SDimitry Andric // Any 8-bit immediate store can by implemented via mvi.
1310b57cec5SDimitry Andric if (BitSize == 8)
1320b57cec5SDimitry Andric return TTI::TCC_Free;
1330b57cec5SDimitry Andric // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
1340b57cec5SDimitry Andric if (isInt<16>(Imm.getSExtValue()))
1350b57cec5SDimitry Andric return TTI::TCC_Free;
1360b57cec5SDimitry Andric }
1370b57cec5SDimitry Andric break;
1380b57cec5SDimitry Andric case Instruction::ICmp:
1390b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
1400b57cec5SDimitry Andric // Comparisons against signed 32-bit immediates implemented via cgfi.
1410b57cec5SDimitry Andric if (isInt<32>(Imm.getSExtValue()))
1420b57cec5SDimitry Andric return TTI::TCC_Free;
1430b57cec5SDimitry Andric // Comparisons against unsigned 32-bit immediates implemented via clgfi.
1440b57cec5SDimitry Andric if (isUInt<32>(Imm.getZExtValue()))
1450b57cec5SDimitry Andric return TTI::TCC_Free;
1460b57cec5SDimitry Andric }
1470b57cec5SDimitry Andric break;
1480b57cec5SDimitry Andric case Instruction::Add:
1490b57cec5SDimitry Andric case Instruction::Sub:
1500b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
1510b57cec5SDimitry Andric // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
1520b57cec5SDimitry Andric if (isUInt<32>(Imm.getZExtValue()))
1530b57cec5SDimitry Andric return TTI::TCC_Free;
1540b57cec5SDimitry Andric // Or their negation, by swapping addition vs. subtraction.
1550b57cec5SDimitry Andric if (isUInt<32>(-Imm.getSExtValue()))
1560b57cec5SDimitry Andric return TTI::TCC_Free;
1570b57cec5SDimitry Andric }
1580b57cec5SDimitry Andric break;
1590b57cec5SDimitry Andric case Instruction::Mul:
1600b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
1610b57cec5SDimitry Andric // We use msgfi to multiply by 32-bit signed immediates.
1620b57cec5SDimitry Andric if (isInt<32>(Imm.getSExtValue()))
1630b57cec5SDimitry Andric return TTI::TCC_Free;
1640b57cec5SDimitry Andric }
1650b57cec5SDimitry Andric break;
1660b57cec5SDimitry Andric case Instruction::Or:
1670b57cec5SDimitry Andric case Instruction::Xor:
1680b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
1690b57cec5SDimitry Andric // Masks supported by oilf/xilf.
1700b57cec5SDimitry Andric if (isUInt<32>(Imm.getZExtValue()))
1710b57cec5SDimitry Andric return TTI::TCC_Free;
1720b57cec5SDimitry Andric // Masks supported by oihf/xihf.
1730b57cec5SDimitry Andric if ((Imm.getZExtValue() & 0xffffffff) == 0)
1740b57cec5SDimitry Andric return TTI::TCC_Free;
1750b57cec5SDimitry Andric }
1760b57cec5SDimitry Andric break;
1770b57cec5SDimitry Andric case Instruction::And:
1780b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
1790b57cec5SDimitry Andric // Any 32-bit AND operation can by implemented via nilf.
1800b57cec5SDimitry Andric if (BitSize <= 32)
1810b57cec5SDimitry Andric return TTI::TCC_Free;
1820b57cec5SDimitry Andric // 64-bit masks supported by nilf.
1830b57cec5SDimitry Andric if (isUInt<32>(~Imm.getZExtValue()))
1840b57cec5SDimitry Andric return TTI::TCC_Free;
1850b57cec5SDimitry Andric // 64-bit masks supported by nilh.
1860b57cec5SDimitry Andric if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
1870b57cec5SDimitry Andric return TTI::TCC_Free;
1880b57cec5SDimitry Andric // Some 64-bit AND operations can be implemented via risbg.
1890b57cec5SDimitry Andric const SystemZInstrInfo *TII = ST->getInstrInfo();
1900b57cec5SDimitry Andric unsigned Start, End;
1910b57cec5SDimitry Andric if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
1920b57cec5SDimitry Andric return TTI::TCC_Free;
1930b57cec5SDimitry Andric }
1940b57cec5SDimitry Andric break;
1950b57cec5SDimitry Andric case Instruction::Shl:
1960b57cec5SDimitry Andric case Instruction::LShr:
1970b57cec5SDimitry Andric case Instruction::AShr:
1980b57cec5SDimitry Andric // Always return TCC_Free for the shift value of a shift instruction.
1990b57cec5SDimitry Andric if (Idx == 1)
2000b57cec5SDimitry Andric return TTI::TCC_Free;
2010b57cec5SDimitry Andric break;
2020b57cec5SDimitry Andric case Instruction::UDiv:
2030b57cec5SDimitry Andric case Instruction::SDiv:
2040b57cec5SDimitry Andric case Instruction::URem:
2050b57cec5SDimitry Andric case Instruction::SRem:
2060b57cec5SDimitry Andric case Instruction::Trunc:
2070b57cec5SDimitry Andric case Instruction::ZExt:
2080b57cec5SDimitry Andric case Instruction::SExt:
2090b57cec5SDimitry Andric case Instruction::IntToPtr:
2100b57cec5SDimitry Andric case Instruction::PtrToInt:
2110b57cec5SDimitry Andric case Instruction::BitCast:
2120b57cec5SDimitry Andric case Instruction::PHI:
2130b57cec5SDimitry Andric case Instruction::Call:
2140b57cec5SDimitry Andric case Instruction::Select:
2150b57cec5SDimitry Andric case Instruction::Ret:
2160b57cec5SDimitry Andric case Instruction::Load:
2170b57cec5SDimitry Andric break;
2180b57cec5SDimitry Andric }
2190b57cec5SDimitry Andric
2205ffd83dbSDimitry Andric return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
2210b57cec5SDimitry Andric }
2220b57cec5SDimitry Andric
223fe6060f1SDimitry Andric InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)224fe6060f1SDimitry Andric SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
2255ffd83dbSDimitry Andric const APInt &Imm, Type *Ty,
2265ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
2270b57cec5SDimitry Andric assert(Ty->isIntegerTy());
2280b57cec5SDimitry Andric
2290b57cec5SDimitry Andric unsigned BitSize = Ty->getPrimitiveSizeInBits();
2300b57cec5SDimitry Andric // There is no cost model for constants with a bit size of 0. Return TCC_Free
2310b57cec5SDimitry Andric // here, so that constant hoisting will ignore this constant.
2320b57cec5SDimitry Andric if (BitSize == 0)
2330b57cec5SDimitry Andric return TTI::TCC_Free;
2340b57cec5SDimitry Andric // No cost model for operations on integers larger than 64 bit implemented yet.
2350b57cec5SDimitry Andric if (BitSize > 64)
2360b57cec5SDimitry Andric return TTI::TCC_Free;
2370b57cec5SDimitry Andric
2380b57cec5SDimitry Andric switch (IID) {
2390b57cec5SDimitry Andric default:
2400b57cec5SDimitry Andric return TTI::TCC_Free;
2410b57cec5SDimitry Andric case Intrinsic::sadd_with_overflow:
2420b57cec5SDimitry Andric case Intrinsic::uadd_with_overflow:
2430b57cec5SDimitry Andric case Intrinsic::ssub_with_overflow:
2440b57cec5SDimitry Andric case Intrinsic::usub_with_overflow:
2450b57cec5SDimitry Andric // These get expanded to include a normal addition/subtraction.
2460b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
2470b57cec5SDimitry Andric if (isUInt<32>(Imm.getZExtValue()))
2480b57cec5SDimitry Andric return TTI::TCC_Free;
2490b57cec5SDimitry Andric if (isUInt<32>(-Imm.getSExtValue()))
2500b57cec5SDimitry Andric return TTI::TCC_Free;
2510b57cec5SDimitry Andric }
2520b57cec5SDimitry Andric break;
2530b57cec5SDimitry Andric case Intrinsic::smul_with_overflow:
2540b57cec5SDimitry Andric case Intrinsic::umul_with_overflow:
2550b57cec5SDimitry Andric // These get expanded to include a normal multiplication.
2560b57cec5SDimitry Andric if (Idx == 1 && Imm.getBitWidth() <= 64) {
2570b57cec5SDimitry Andric if (isInt<32>(Imm.getSExtValue()))
2580b57cec5SDimitry Andric return TTI::TCC_Free;
2590b57cec5SDimitry Andric }
2600b57cec5SDimitry Andric break;
2610b57cec5SDimitry Andric case Intrinsic::experimental_stackmap:
2620b57cec5SDimitry Andric if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2630b57cec5SDimitry Andric return TTI::TCC_Free;
2640b57cec5SDimitry Andric break;
2650b57cec5SDimitry Andric case Intrinsic::experimental_patchpoint_void:
2660b57cec5SDimitry Andric case Intrinsic::experimental_patchpoint_i64:
2670b57cec5SDimitry Andric if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2680b57cec5SDimitry Andric return TTI::TCC_Free;
2690b57cec5SDimitry Andric break;
2700b57cec5SDimitry Andric }
2715ffd83dbSDimitry Andric return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
2720b57cec5SDimitry Andric }
2730b57cec5SDimitry Andric
2740b57cec5SDimitry Andric TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)2750b57cec5SDimitry Andric SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
2760b57cec5SDimitry Andric assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
2770b57cec5SDimitry Andric if (ST->hasPopulationCount() && TyWidth <= 64)
2780b57cec5SDimitry Andric return TTI::PSK_FastHardware;
2790b57cec5SDimitry Andric return TTI::PSK_Software;
2800b57cec5SDimitry Andric }
2810b57cec5SDimitry Andric
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)2820b57cec5SDimitry Andric void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
283349cc55cSDimitry Andric TTI::UnrollingPreferences &UP,
284349cc55cSDimitry Andric OptimizationRemarkEmitter *ORE) {
2850b57cec5SDimitry Andric // Find out if L contains a call, what the machine instruction count
2860b57cec5SDimitry Andric // estimate is, and how many stores there are.
2870b57cec5SDimitry Andric bool HasCall = false;
288fe6060f1SDimitry Andric InstructionCost NumStores = 0;
2890b57cec5SDimitry Andric for (auto &BB : L->blocks())
2900b57cec5SDimitry Andric for (auto &I : *BB) {
2910b57cec5SDimitry Andric if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
2925ffd83dbSDimitry Andric if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2930b57cec5SDimitry Andric if (isLoweredToCall(F))
2940b57cec5SDimitry Andric HasCall = true;
2950b57cec5SDimitry Andric if (F->getIntrinsicID() == Intrinsic::memcpy ||
2960b57cec5SDimitry Andric F->getIntrinsicID() == Intrinsic::memset)
2970b57cec5SDimitry Andric NumStores++;
2980b57cec5SDimitry Andric } else { // indirect call.
2990b57cec5SDimitry Andric HasCall = true;
3000b57cec5SDimitry Andric }
3010b57cec5SDimitry Andric }
3020b57cec5SDimitry Andric if (isa<StoreInst>(&I)) {
3030b57cec5SDimitry Andric Type *MemAccessTy = I.getOperand(0)->getType();
304bdd1243dSDimitry Andric NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy,
305bdd1243dSDimitry Andric std::nullopt, 0, TTI::TCK_RecipThroughput);
3060b57cec5SDimitry Andric }
3070b57cec5SDimitry Andric }
3080b57cec5SDimitry Andric
3090b57cec5SDimitry Andric // The z13 processor will run out of store tags if too many stores
3100b57cec5SDimitry Andric // are fed into it too quickly. Therefore make sure there are not
3110b57cec5SDimitry Andric // too many stores in the resulting unrolled loop.
312fe6060f1SDimitry Andric unsigned const NumStoresVal = *NumStores.getValue();
313fe6060f1SDimitry Andric unsigned const Max = (NumStoresVal ? (12 / NumStoresVal) : UINT_MAX);
3140b57cec5SDimitry Andric
3150b57cec5SDimitry Andric if (HasCall) {
3160b57cec5SDimitry Andric // Only allow full unrolling if loop has any calls.
3170b57cec5SDimitry Andric UP.FullUnrollMaxCount = Max;
3180b57cec5SDimitry Andric UP.MaxCount = 1;
3190b57cec5SDimitry Andric return;
3200b57cec5SDimitry Andric }
3210b57cec5SDimitry Andric
3220b57cec5SDimitry Andric UP.MaxCount = Max;
3230b57cec5SDimitry Andric if (UP.MaxCount <= 1)
3240b57cec5SDimitry Andric return;
3250b57cec5SDimitry Andric
3260b57cec5SDimitry Andric // Allow partial and runtime trip count unrolling.
3270b57cec5SDimitry Andric UP.Partial = UP.Runtime = true;
3280b57cec5SDimitry Andric
3290b57cec5SDimitry Andric UP.PartialThreshold = 75;
3300b57cec5SDimitry Andric UP.DefaultUnrollRuntimeCount = 4;
3310b57cec5SDimitry Andric
3320b57cec5SDimitry Andric // Allow expensive instructions in the pre-header of the loop.
3330b57cec5SDimitry Andric UP.AllowExpensiveTripCount = true;
3340b57cec5SDimitry Andric
3350b57cec5SDimitry Andric UP.Force = true;
3360b57cec5SDimitry Andric }
3370b57cec5SDimitry Andric
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)3385ffd83dbSDimitry Andric void SystemZTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
3395ffd83dbSDimitry Andric TTI::PeelingPreferences &PP) {
3405ffd83dbSDimitry Andric BaseT::getPeelingPreferences(L, SE, PP);
3415ffd83dbSDimitry Andric }
3420b57cec5SDimitry Andric
isLSRCostLess(const TargetTransformInfo::LSRCost & C1,const TargetTransformInfo::LSRCost & C2)34381ad6265SDimitry Andric bool SystemZTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
34481ad6265SDimitry Andric const TargetTransformInfo::LSRCost &C2) {
3450b57cec5SDimitry Andric // SystemZ specific: check instruction count (first), and don't care about
3460b57cec5SDimitry Andric // ImmCost, since offsets are checked explicitly.
3470b57cec5SDimitry Andric return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
3480b57cec5SDimitry Andric C1.NumIVMuls, C1.NumBaseAdds,
3490b57cec5SDimitry Andric C1.ScaleCost, C1.SetupCost) <
3500b57cec5SDimitry Andric std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
3510b57cec5SDimitry Andric C2.NumIVMuls, C2.NumBaseAdds,
3520b57cec5SDimitry Andric C2.ScaleCost, C2.SetupCost);
3530b57cec5SDimitry Andric }
3540b57cec5SDimitry Andric
getNumberOfRegisters(unsigned ClassID) const3558bcb0991SDimitry Andric unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
3568bcb0991SDimitry Andric bool Vector = (ClassID == 1);
3570b57cec5SDimitry Andric if (!Vector)
3580b57cec5SDimitry Andric // Discount the stack pointer. Also leave out %r0, since it can't
3590b57cec5SDimitry Andric // be used in an address.
3600b57cec5SDimitry Andric return 14;
3610b57cec5SDimitry Andric if (ST->hasVector())
3620b57cec5SDimitry Andric return 32;
3630b57cec5SDimitry Andric return 0;
3640b57cec5SDimitry Andric }
3650b57cec5SDimitry Andric
366fe6060f1SDimitry Andric TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const367fe6060f1SDimitry Andric SystemZTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
368fe6060f1SDimitry Andric switch (K) {
369fe6060f1SDimitry Andric case TargetTransformInfo::RGK_Scalar:
370fe6060f1SDimitry Andric return TypeSize::getFixed(64);
371fe6060f1SDimitry Andric case TargetTransformInfo::RGK_FixedWidthVector:
372fe6060f1SDimitry Andric return TypeSize::getFixed(ST->hasVector() ? 128 : 0);
373fe6060f1SDimitry Andric case TargetTransformInfo::RGK_ScalableVector:
374fe6060f1SDimitry Andric return TypeSize::getScalable(0);
375fe6060f1SDimitry Andric }
376fe6060f1SDimitry Andric
377fe6060f1SDimitry Andric llvm_unreachable("Unsupported register kind");
3780b57cec5SDimitry Andric }
3790b57cec5SDimitry Andric
getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall) const3805ffd83dbSDimitry Andric unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
3815ffd83dbSDimitry Andric unsigned NumStridedMemAccesses,
3825ffd83dbSDimitry Andric unsigned NumPrefetches,
3835ffd83dbSDimitry Andric bool HasCall) const {
3845ffd83dbSDimitry Andric // Don't prefetch a loop with many far apart accesses.
3855ffd83dbSDimitry Andric if (NumPrefetches > 16)
3865ffd83dbSDimitry Andric return UINT_MAX;
3875ffd83dbSDimitry Andric
3885ffd83dbSDimitry Andric // Emit prefetch instructions for smaller strides in cases where we think
3895ffd83dbSDimitry Andric // the hardware prefetcher might not be able to keep up.
390e8d8bef9SDimitry Andric if (NumStridedMemAccesses > 32 && !HasCall &&
391e8d8bef9SDimitry Andric (NumMemAccesses - NumStridedMemAccesses) * 32 <= NumStridedMemAccesses)
3925ffd83dbSDimitry Andric return 1;
3935ffd83dbSDimitry Andric
3945ffd83dbSDimitry Andric return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
3955ffd83dbSDimitry Andric }
3965ffd83dbSDimitry Andric
hasDivRemOp(Type * DataType,bool IsSigned)3970b57cec5SDimitry Andric bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
3980b57cec5SDimitry Andric EVT VT = TLI->getValueType(DL, DataType);
3990b57cec5SDimitry Andric return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
4000b57cec5SDimitry Andric }
4010b57cec5SDimitry Andric
4020b57cec5SDimitry Andric // Return the bit size for the scalar type or vector element
4030b57cec5SDimitry Andric // type. getScalarSizeInBits() returns 0 for a pointer type.
getScalarSizeInBits(Type * Ty)4040b57cec5SDimitry Andric static unsigned getScalarSizeInBits(Type *Ty) {
4050b57cec5SDimitry Andric unsigned Size =
4060b57cec5SDimitry Andric (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
4070b57cec5SDimitry Andric assert(Size > 0 && "Element must have non-zero size.");
4080b57cec5SDimitry Andric return Size;
4090b57cec5SDimitry Andric }
4100b57cec5SDimitry Andric
4110b57cec5SDimitry Andric // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
4120b57cec5SDimitry Andric // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
4130b57cec5SDimitry Andric // 3.
getNumVectorRegs(Type * Ty)4140b57cec5SDimitry Andric static unsigned getNumVectorRegs(Type *Ty) {
4155ffd83dbSDimitry Andric auto *VTy = cast<FixedVectorType>(Ty);
4165ffd83dbSDimitry Andric unsigned WideBits = getScalarSizeInBits(Ty) * VTy->getNumElements();
4170b57cec5SDimitry Andric assert(WideBits > 0 && "Could not compute size of vector");
4180b57cec5SDimitry Andric return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
4190b57cec5SDimitry Andric }
4200b57cec5SDimitry Andric
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueInfo Op1Info,TTI::OperandValueInfo Op2Info,ArrayRef<const Value * > Args,const Instruction * CxtI)421fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getArithmeticInstrCost(
4225ffd83dbSDimitry Andric unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
423bdd1243dSDimitry Andric TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
424bdd1243dSDimitry Andric ArrayRef<const Value *> Args,
425480093f4SDimitry Andric const Instruction *CxtI) {
4260b57cec5SDimitry Andric
4275ffd83dbSDimitry Andric // TODO: Handle more cost kinds.
4285ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
4295ffd83dbSDimitry Andric return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
430bdd1243dSDimitry Andric Op2Info, Args, CxtI);
4315ffd83dbSDimitry Andric
4320b57cec5SDimitry Andric // TODO: return a good value for BB-VECTORIZER that includes the
4330b57cec5SDimitry Andric // immediate loads, which we do not want to count for the loop
4340b57cec5SDimitry Andric // vectorizer, since they are hopefully hoisted out of the loop. This
4350b57cec5SDimitry Andric // would require a new parameter 'InLoop', but not sure if constant
4360b57cec5SDimitry Andric // args are common enough to motivate this.
4370b57cec5SDimitry Andric
4380b57cec5SDimitry Andric unsigned ScalarBits = Ty->getScalarSizeInBits();
4390b57cec5SDimitry Andric
4400b57cec5SDimitry Andric // There are thre cases of division and remainder: Dividing with a register
4410b57cec5SDimitry Andric // needs a divide instruction. A divisor which is a power of two constant
4420b57cec5SDimitry Andric // can be implemented with a sequence of shifts. Any other constant needs a
4430b57cec5SDimitry Andric // multiply and shifts.
4440b57cec5SDimitry Andric const unsigned DivInstrCost = 20;
4450b57cec5SDimitry Andric const unsigned DivMulSeqCost = 10;
4460b57cec5SDimitry Andric const unsigned SDivPow2Cost = 4;
4470b57cec5SDimitry Andric
4480b57cec5SDimitry Andric bool SignedDivRem =
4490b57cec5SDimitry Andric Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
4500b57cec5SDimitry Andric bool UnsignedDivRem =
4510b57cec5SDimitry Andric Opcode == Instruction::UDiv || Opcode == Instruction::URem;
4520b57cec5SDimitry Andric
4530b57cec5SDimitry Andric // Check for a constant divisor.
4540b57cec5SDimitry Andric bool DivRemConst = false;
4550b57cec5SDimitry Andric bool DivRemConstPow2 = false;
4560b57cec5SDimitry Andric if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
4570b57cec5SDimitry Andric if (const Constant *C = dyn_cast<Constant>(Args[1])) {
4580b57cec5SDimitry Andric const ConstantInt *CVal =
4590b57cec5SDimitry Andric (C->getType()->isVectorTy()
4600b57cec5SDimitry Andric ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
4610b57cec5SDimitry Andric : dyn_cast<const ConstantInt>(C));
462349cc55cSDimitry Andric if (CVal && (CVal->getValue().isPowerOf2() ||
463349cc55cSDimitry Andric CVal->getValue().isNegatedPowerOf2()))
4640b57cec5SDimitry Andric DivRemConstPow2 = true;
4650b57cec5SDimitry Andric else
4660b57cec5SDimitry Andric DivRemConst = true;
4670b57cec5SDimitry Andric }
4680b57cec5SDimitry Andric }
4690b57cec5SDimitry Andric
4705ffd83dbSDimitry Andric if (!Ty->isVectorTy()) {
4710b57cec5SDimitry Andric // These FP operations are supported with a dedicated instruction for
4720b57cec5SDimitry Andric // float, double and fp128 (base implementation assumes float generally
4730b57cec5SDimitry Andric // costs 2).
4740b57cec5SDimitry Andric if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
4750b57cec5SDimitry Andric Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
4760b57cec5SDimitry Andric return 1;
4770b57cec5SDimitry Andric
4780b57cec5SDimitry Andric // There is no native support for FRem.
4790b57cec5SDimitry Andric if (Opcode == Instruction::FRem)
4800b57cec5SDimitry Andric return LIBCALL_COST;
4810b57cec5SDimitry Andric
4820b57cec5SDimitry Andric // Give discount for some combined logical operations if supported.
483*a58f00eaSDimitry Andric if (Args.size() == 2) {
4840b57cec5SDimitry Andric if (Opcode == Instruction::Xor) {
4850b57cec5SDimitry Andric for (const Value *A : Args) {
4860b57cec5SDimitry Andric if (const Instruction *I = dyn_cast<Instruction>(A))
4870b57cec5SDimitry Andric if (I->hasOneUse() &&
488*a58f00eaSDimitry Andric (I->getOpcode() == Instruction::Or ||
489*a58f00eaSDimitry Andric I->getOpcode() == Instruction::And ||
4900b57cec5SDimitry Andric I->getOpcode() == Instruction::Xor))
491*a58f00eaSDimitry Andric if ((ScalarBits <= 64 && ST->hasMiscellaneousExtensions3()) ||
492*a58f00eaSDimitry Andric (isInt128InVR(Ty) &&
493*a58f00eaSDimitry Andric (I->getOpcode() == Instruction::Or || ST->hasVectorEnhancements1())))
4940b57cec5SDimitry Andric return 0;
4950b57cec5SDimitry Andric }
4960b57cec5SDimitry Andric }
497*a58f00eaSDimitry Andric else if (Opcode == Instruction::And || Opcode == Instruction::Or) {
4980b57cec5SDimitry Andric for (const Value *A : Args) {
4990b57cec5SDimitry Andric if (const Instruction *I = dyn_cast<Instruction>(A))
500*a58f00eaSDimitry Andric if ((I->hasOneUse() && I->getOpcode() == Instruction::Xor) &&
501*a58f00eaSDimitry Andric ((ScalarBits <= 64 && ST->hasMiscellaneousExtensions3()) ||
502*a58f00eaSDimitry Andric (isInt128InVR(Ty) &&
503*a58f00eaSDimitry Andric (Opcode == Instruction::And || ST->hasVectorEnhancements1()))))
5040b57cec5SDimitry Andric return 0;
5050b57cec5SDimitry Andric }
5060b57cec5SDimitry Andric }
5070b57cec5SDimitry Andric }
5080b57cec5SDimitry Andric
5090b57cec5SDimitry Andric // Or requires one instruction, although it has custom handling for i64.
5100b57cec5SDimitry Andric if (Opcode == Instruction::Or)
5110b57cec5SDimitry Andric return 1;
5120b57cec5SDimitry Andric
5130b57cec5SDimitry Andric if (Opcode == Instruction::Xor && ScalarBits == 1) {
5140b57cec5SDimitry Andric if (ST->hasLoadStoreOnCond2())
5150b57cec5SDimitry Andric return 5; // 2 * (li 0; loc 1); xor
5160b57cec5SDimitry Andric return 7; // 2 * ipm sequences ; xor ; shift ; compare
5170b57cec5SDimitry Andric }
5180b57cec5SDimitry Andric
5190b57cec5SDimitry Andric if (DivRemConstPow2)
5200b57cec5SDimitry Andric return (SignedDivRem ? SDivPow2Cost : 1);
5210b57cec5SDimitry Andric if (DivRemConst)
5220b57cec5SDimitry Andric return DivMulSeqCost;
5230b57cec5SDimitry Andric if (SignedDivRem || UnsignedDivRem)
5240b57cec5SDimitry Andric return DivInstrCost;
5250b57cec5SDimitry Andric }
5265ffd83dbSDimitry Andric else if (ST->hasVector()) {
5275ffd83dbSDimitry Andric auto *VTy = cast<FixedVectorType>(Ty);
5285ffd83dbSDimitry Andric unsigned VF = VTy->getNumElements();
5295ffd83dbSDimitry Andric unsigned NumVectors = getNumVectorRegs(Ty);
5305ffd83dbSDimitry Andric
5315ffd83dbSDimitry Andric // These vector operations are custom handled, but are still supported
5325ffd83dbSDimitry Andric // with one instruction per vector, regardless of element size.
5335ffd83dbSDimitry Andric if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
5345ffd83dbSDimitry Andric Opcode == Instruction::AShr) {
5355ffd83dbSDimitry Andric return NumVectors;
5365ffd83dbSDimitry Andric }
5375ffd83dbSDimitry Andric
5385ffd83dbSDimitry Andric if (DivRemConstPow2)
5395ffd83dbSDimitry Andric return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
540fe6060f1SDimitry Andric if (DivRemConst) {
541fe6060f1SDimitry Andric SmallVector<Type *> Tys(Args.size(), Ty);
542bdd1243dSDimitry Andric return VF * DivMulSeqCost +
543bdd1243dSDimitry Andric getScalarizationOverhead(VTy, Args, Tys, CostKind);
544fe6060f1SDimitry Andric }
5455ffd83dbSDimitry Andric if ((SignedDivRem || UnsignedDivRem) && VF > 4)
5465ffd83dbSDimitry Andric // Temporary hack: disable high vectorization factors with integer
5475ffd83dbSDimitry Andric // division/remainder, which will get scalarized and handled with
5485ffd83dbSDimitry Andric // GR128 registers. The mischeduler is not clever enough to avoid
5495ffd83dbSDimitry Andric // spilling yet.
5505ffd83dbSDimitry Andric return 1000;
5515ffd83dbSDimitry Andric
5525ffd83dbSDimitry Andric // These FP operations are supported with a single vector instruction for
5535ffd83dbSDimitry Andric // double (base implementation assumes float generally costs 2). For
5545ffd83dbSDimitry Andric // FP128, the scalar cost is 1, and there is no overhead since the values
5555ffd83dbSDimitry Andric // are already in scalar registers.
5565ffd83dbSDimitry Andric if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
5575ffd83dbSDimitry Andric Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
5585ffd83dbSDimitry Andric switch (ScalarBits) {
5595ffd83dbSDimitry Andric case 32: {
5605ffd83dbSDimitry Andric // The vector enhancements facility 1 provides v4f32 instructions.
5615ffd83dbSDimitry Andric if (ST->hasVectorEnhancements1())
5625ffd83dbSDimitry Andric return NumVectors;
5635ffd83dbSDimitry Andric // Return the cost of multiple scalar invocation plus the cost of
5645ffd83dbSDimitry Andric // inserting and extracting the values.
565fe6060f1SDimitry Andric InstructionCost ScalarCost =
5665ffd83dbSDimitry Andric getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
567fe6060f1SDimitry Andric SmallVector<Type *> Tys(Args.size(), Ty);
568fe6060f1SDimitry Andric InstructionCost Cost =
569bdd1243dSDimitry Andric (VF * ScalarCost) +
570bdd1243dSDimitry Andric getScalarizationOverhead(VTy, Args, Tys, CostKind);
5715ffd83dbSDimitry Andric // FIXME: VF 2 for these FP operations are currently just as
5725ffd83dbSDimitry Andric // expensive as for VF 4.
5735ffd83dbSDimitry Andric if (VF == 2)
5745ffd83dbSDimitry Andric Cost *= 2;
5755ffd83dbSDimitry Andric return Cost;
5765ffd83dbSDimitry Andric }
5775ffd83dbSDimitry Andric case 64:
5785ffd83dbSDimitry Andric case 128:
5795ffd83dbSDimitry Andric return NumVectors;
5805ffd83dbSDimitry Andric default:
5815ffd83dbSDimitry Andric break;
5825ffd83dbSDimitry Andric }
5835ffd83dbSDimitry Andric }
5845ffd83dbSDimitry Andric
5855ffd83dbSDimitry Andric // There is no native support for FRem.
5865ffd83dbSDimitry Andric if (Opcode == Instruction::FRem) {
587fe6060f1SDimitry Andric SmallVector<Type *> Tys(Args.size(), Ty);
588bdd1243dSDimitry Andric InstructionCost Cost = (VF * LIBCALL_COST) +
589bdd1243dSDimitry Andric getScalarizationOverhead(VTy, Args, Tys, CostKind);
5905ffd83dbSDimitry Andric // FIXME: VF 2 for float is currently just as expensive as for VF 4.
5915ffd83dbSDimitry Andric if (VF == 2 && ScalarBits == 32)
5925ffd83dbSDimitry Andric Cost *= 2;
5935ffd83dbSDimitry Andric return Cost;
5945ffd83dbSDimitry Andric }
5955ffd83dbSDimitry Andric }
5960b57cec5SDimitry Andric
5970b57cec5SDimitry Andric // Fallback to the default implementation.
5985ffd83dbSDimitry Andric return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
599bdd1243dSDimitry Andric Args, CxtI);
6000b57cec5SDimitry Andric }
6010b57cec5SDimitry Andric
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,VectorType * SubTp,ArrayRef<const Value * > Args)602fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
603fe6060f1SDimitry Andric VectorType *Tp,
604bdd1243dSDimitry Andric ArrayRef<int> Mask,
605bdd1243dSDimitry Andric TTI::TargetCostKind CostKind,
606bdd1243dSDimitry Andric int Index, VectorType *SubTp,
60781ad6265SDimitry Andric ArrayRef<const Value *> Args) {
608c9157d92SDimitry Andric Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
6095ffd83dbSDimitry Andric if (ST->hasVector()) {
6100b57cec5SDimitry Andric unsigned NumVectors = getNumVectorRegs(Tp);
6110b57cec5SDimitry Andric
6120b57cec5SDimitry Andric // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
6130b57cec5SDimitry Andric
6140b57cec5SDimitry Andric // FP128 values are always in scalar registers, so there is no work
6150b57cec5SDimitry Andric // involved with a shuffle, except for broadcast. In that case register
6160b57cec5SDimitry Andric // moves are done with a single instruction per element.
6170b57cec5SDimitry Andric if (Tp->getScalarType()->isFP128Ty())
6180b57cec5SDimitry Andric return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
6190b57cec5SDimitry Andric
6200b57cec5SDimitry Andric switch (Kind) {
6210b57cec5SDimitry Andric case TargetTransformInfo::SK_ExtractSubvector:
6220b57cec5SDimitry Andric // ExtractSubvector Index indicates start offset.
6230b57cec5SDimitry Andric
6240b57cec5SDimitry Andric // Extracting a subvector from first index is a noop.
6250b57cec5SDimitry Andric return (Index == 0 ? 0 : NumVectors);
6260b57cec5SDimitry Andric
6270b57cec5SDimitry Andric case TargetTransformInfo::SK_Broadcast:
6280b57cec5SDimitry Andric // Loop vectorizer calls here to figure out the extra cost of
6290b57cec5SDimitry Andric // broadcasting a loaded value to all elements of a vector. Since vlrep
6300b57cec5SDimitry Andric // loads and replicates with a single instruction, adjust the returned
6310b57cec5SDimitry Andric // value.
6320b57cec5SDimitry Andric return NumVectors - 1;
6330b57cec5SDimitry Andric
6340b57cec5SDimitry Andric default:
6350b57cec5SDimitry Andric
6360b57cec5SDimitry Andric // SystemZ supports single instruction permutation / replication.
6370b57cec5SDimitry Andric return NumVectors;
6380b57cec5SDimitry Andric }
6395ffd83dbSDimitry Andric }
6400b57cec5SDimitry Andric
641bdd1243dSDimitry Andric return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
6420b57cec5SDimitry Andric }
6430b57cec5SDimitry Andric
6440b57cec5SDimitry Andric // Return the log2 difference of the element sizes of the two vector types.
getElSizeLog2Diff(Type * Ty0,Type * Ty1)6450b57cec5SDimitry Andric static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
6460b57cec5SDimitry Andric unsigned Bits0 = Ty0->getScalarSizeInBits();
6470b57cec5SDimitry Andric unsigned Bits1 = Ty1->getScalarSizeInBits();
6480b57cec5SDimitry Andric
6490b57cec5SDimitry Andric if (Bits1 > Bits0)
6500b57cec5SDimitry Andric return (Log2_32(Bits1) - Log2_32(Bits0));
6510b57cec5SDimitry Andric
6520b57cec5SDimitry Andric return (Log2_32(Bits0) - Log2_32(Bits1));
6530b57cec5SDimitry Andric }
6540b57cec5SDimitry Andric
6550b57cec5SDimitry Andric // Return the number of instructions needed to truncate SrcTy to DstTy.
6560b57cec5SDimitry Andric unsigned SystemZTTIImpl::
getVectorTruncCost(Type * SrcTy,Type * DstTy)6570b57cec5SDimitry Andric getVectorTruncCost(Type *SrcTy, Type *DstTy) {
6580b57cec5SDimitry Andric assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
659bdd1243dSDimitry Andric assert(SrcTy->getPrimitiveSizeInBits().getFixedValue() >
660bdd1243dSDimitry Andric DstTy->getPrimitiveSizeInBits().getFixedValue() &&
6610b57cec5SDimitry Andric "Packing must reduce size of vector type.");
6625ffd83dbSDimitry Andric assert(cast<FixedVectorType>(SrcTy)->getNumElements() ==
6635ffd83dbSDimitry Andric cast<FixedVectorType>(DstTy)->getNumElements() &&
6640b57cec5SDimitry Andric "Packing should not change number of elements.");
6650b57cec5SDimitry Andric
6660b57cec5SDimitry Andric // TODO: Since fp32 is expanded, the extract cost should always be 0.
6670b57cec5SDimitry Andric
6680b57cec5SDimitry Andric unsigned NumParts = getNumVectorRegs(SrcTy);
6690b57cec5SDimitry Andric if (NumParts <= 2)
6700b57cec5SDimitry Andric // Up to 2 vector registers can be truncated efficiently with pack or
6710b57cec5SDimitry Andric // permute. The latter requires an immediate mask to be loaded, which
6720b57cec5SDimitry Andric // typically gets hoisted out of a loop. TODO: return a good value for
6730b57cec5SDimitry Andric // BB-VECTORIZER that includes the immediate loads, which we do not want
6740b57cec5SDimitry Andric // to count for the loop vectorizer.
6750b57cec5SDimitry Andric return 1;
6760b57cec5SDimitry Andric
6770b57cec5SDimitry Andric unsigned Cost = 0;
6780b57cec5SDimitry Andric unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
6795ffd83dbSDimitry Andric unsigned VF = cast<FixedVectorType>(SrcTy)->getNumElements();
6800b57cec5SDimitry Andric for (unsigned P = 0; P < Log2Diff; ++P) {
6810b57cec5SDimitry Andric if (NumParts > 1)
6820b57cec5SDimitry Andric NumParts /= 2;
6830b57cec5SDimitry Andric Cost += NumParts;
6840b57cec5SDimitry Andric }
6850b57cec5SDimitry Andric
6860b57cec5SDimitry Andric // Currently, a general mix of permutes and pack instructions is output by
6870b57cec5SDimitry Andric // isel, which follow the cost computation above except for this case which
6880b57cec5SDimitry Andric // is one instruction less:
6890b57cec5SDimitry Andric if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
6900b57cec5SDimitry Andric DstTy->getScalarSizeInBits() == 8)
6910b57cec5SDimitry Andric Cost--;
6920b57cec5SDimitry Andric
6930b57cec5SDimitry Andric return Cost;
6940b57cec5SDimitry Andric }
6950b57cec5SDimitry Andric
6960b57cec5SDimitry Andric // Return the cost of converting a vector bitmask produced by a compare
6970b57cec5SDimitry Andric // (SrcTy), to the type of the select or extend instruction (DstTy).
6980b57cec5SDimitry Andric unsigned SystemZTTIImpl::
getVectorBitmaskConversionCost(Type * SrcTy,Type * DstTy)6990b57cec5SDimitry Andric getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
7000b57cec5SDimitry Andric assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
7010b57cec5SDimitry Andric "Should only be called with vector types.");
7020b57cec5SDimitry Andric
7030b57cec5SDimitry Andric unsigned PackCost = 0;
7040b57cec5SDimitry Andric unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
7050b57cec5SDimitry Andric unsigned DstScalarBits = DstTy->getScalarSizeInBits();
7060b57cec5SDimitry Andric unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
7070b57cec5SDimitry Andric if (SrcScalarBits > DstScalarBits)
7080b57cec5SDimitry Andric // The bitmask will be truncated.
7090b57cec5SDimitry Andric PackCost = getVectorTruncCost(SrcTy, DstTy);
7100b57cec5SDimitry Andric else if (SrcScalarBits < DstScalarBits) {
7110b57cec5SDimitry Andric unsigned DstNumParts = getNumVectorRegs(DstTy);
7120b57cec5SDimitry Andric // Each vector select needs its part of the bitmask unpacked.
7130b57cec5SDimitry Andric PackCost = Log2Diff * DstNumParts;
7140b57cec5SDimitry Andric // Extra cost for moving part of mask before unpacking.
7150b57cec5SDimitry Andric PackCost += DstNumParts - 1;
7160b57cec5SDimitry Andric }
7170b57cec5SDimitry Andric
7180b57cec5SDimitry Andric return PackCost;
7190b57cec5SDimitry Andric }
7200b57cec5SDimitry Andric
7210b57cec5SDimitry Andric // Return the type of the compared operands. This is needed to compute the
7220b57cec5SDimitry Andric // cost for a Select / ZExt or SExt instruction.
getCmpOpsType(const Instruction * I,unsigned VF=1)7230b57cec5SDimitry Andric static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
7240b57cec5SDimitry Andric Type *OpTy = nullptr;
7250b57cec5SDimitry Andric if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
7260b57cec5SDimitry Andric OpTy = CI->getOperand(0)->getType();
7270b57cec5SDimitry Andric else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
7280b57cec5SDimitry Andric if (LogicI->getNumOperands() == 2)
7290b57cec5SDimitry Andric if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
7300b57cec5SDimitry Andric if (isa<CmpInst>(LogicI->getOperand(1)))
7310b57cec5SDimitry Andric OpTy = CI0->getOperand(0)->getType();
7320b57cec5SDimitry Andric
7330b57cec5SDimitry Andric if (OpTy != nullptr) {
7340b57cec5SDimitry Andric if (VF == 1) {
7350b57cec5SDimitry Andric assert (!OpTy->isVectorTy() && "Expected scalar type");
7360b57cec5SDimitry Andric return OpTy;
7370b57cec5SDimitry Andric }
7380b57cec5SDimitry Andric // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
7390b57cec5SDimitry Andric // be either scalar or already vectorized with a same or lesser VF.
7400b57cec5SDimitry Andric Type *ElTy = OpTy->getScalarType();
7415ffd83dbSDimitry Andric return FixedVectorType::get(ElTy, VF);
7420b57cec5SDimitry Andric }
7430b57cec5SDimitry Andric
7440b57cec5SDimitry Andric return nullptr;
7450b57cec5SDimitry Andric }
7460b57cec5SDimitry Andric
7470b57cec5SDimitry Andric // Get the cost of converting a boolean vector to a vector with same width
7480b57cec5SDimitry Andric // and element size as Dst, plus the cost of zero extending if needed.
7490b57cec5SDimitry Andric unsigned SystemZTTIImpl::
getBoolVecToIntConversionCost(unsigned Opcode,Type * Dst,const Instruction * I)7500b57cec5SDimitry Andric getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
7510b57cec5SDimitry Andric const Instruction *I) {
7525ffd83dbSDimitry Andric auto *DstVTy = cast<FixedVectorType>(Dst);
7535ffd83dbSDimitry Andric unsigned VF = DstVTy->getNumElements();
7540b57cec5SDimitry Andric unsigned Cost = 0;
7550b57cec5SDimitry Andric // If we know what the widths of the compared operands, get any cost of
7560b57cec5SDimitry Andric // converting it to match Dst. Otherwise assume same widths.
7570b57cec5SDimitry Andric Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
7580b57cec5SDimitry Andric if (CmpOpTy != nullptr)
7590b57cec5SDimitry Andric Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
7600b57cec5SDimitry Andric if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
7610b57cec5SDimitry Andric // One 'vn' per dst vector with an immediate mask.
7620b57cec5SDimitry Andric Cost += getNumVectorRegs(Dst);
7630b57cec5SDimitry Andric return Cost;
7640b57cec5SDimitry Andric }
7650b57cec5SDimitry Andric
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)766fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
767fe6060f1SDimitry Andric Type *Src,
768e8d8bef9SDimitry Andric TTI::CastContextHint CCH,
7695ffd83dbSDimitry Andric TTI::TargetCostKind CostKind,
7700b57cec5SDimitry Andric const Instruction *I) {
7715ffd83dbSDimitry Andric // FIXME: Can the logic below also be used for these cost kinds?
7725ffd83dbSDimitry Andric if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) {
773fe6060f1SDimitry Andric auto BaseCost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
7745ffd83dbSDimitry Andric return BaseCost == 0 ? BaseCost : 1;
7755ffd83dbSDimitry Andric }
7765ffd83dbSDimitry Andric
7770b57cec5SDimitry Andric unsigned DstScalarBits = Dst->getScalarSizeInBits();
7780b57cec5SDimitry Andric unsigned SrcScalarBits = Src->getScalarSizeInBits();
7790b57cec5SDimitry Andric
7805ffd83dbSDimitry Andric if (!Src->isVectorTy()) {
7815ffd83dbSDimitry Andric assert (!Dst->isVectorTy());
7825ffd83dbSDimitry Andric
7835ffd83dbSDimitry Andric if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
784*a58f00eaSDimitry Andric if (Src->isIntegerTy(128))
785*a58f00eaSDimitry Andric return LIBCALL_COST;
7865ffd83dbSDimitry Andric if (SrcScalarBits >= 32 ||
7875ffd83dbSDimitry Andric (I != nullptr && isa<LoadInst>(I->getOperand(0))))
7885ffd83dbSDimitry Andric return 1;
7895ffd83dbSDimitry Andric return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
7905ffd83dbSDimitry Andric }
7915ffd83dbSDimitry Andric
792*a58f00eaSDimitry Andric if ((Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) &&
793*a58f00eaSDimitry Andric Dst->isIntegerTy(128))
794*a58f00eaSDimitry Andric return LIBCALL_COST;
795*a58f00eaSDimitry Andric
796*a58f00eaSDimitry Andric if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt)) {
797*a58f00eaSDimitry Andric if (Src->isIntegerTy(1)) {
798*a58f00eaSDimitry Andric if (DstScalarBits == 128)
799*a58f00eaSDimitry Andric return 5 /*branch seq.*/;
800*a58f00eaSDimitry Andric
8015ffd83dbSDimitry Andric if (ST->hasLoadStoreOnCond2())
8025ffd83dbSDimitry Andric return 2; // li 0; loc 1
8035ffd83dbSDimitry Andric
8045ffd83dbSDimitry Andric // This should be extension of a compare i1 result, which is done with
8055ffd83dbSDimitry Andric // ipm and a varying sequence of instructions.
8065ffd83dbSDimitry Andric unsigned Cost = 0;
8075ffd83dbSDimitry Andric if (Opcode == Instruction::SExt)
8085ffd83dbSDimitry Andric Cost = (DstScalarBits < 64 ? 3 : 4);
8095ffd83dbSDimitry Andric if (Opcode == Instruction::ZExt)
8105ffd83dbSDimitry Andric Cost = 3;
8115ffd83dbSDimitry Andric Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
8125ffd83dbSDimitry Andric if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
8135ffd83dbSDimitry Andric // If operands of an fp-type was compared, this costs +1.
8145ffd83dbSDimitry Andric Cost++;
8155ffd83dbSDimitry Andric return Cost;
8165ffd83dbSDimitry Andric }
817*a58f00eaSDimitry Andric else if (isInt128InVR(Dst)) {
818*a58f00eaSDimitry Andric // Extensions from GPR to i128 (in VR) typically costs two instructions,
819*a58f00eaSDimitry Andric // but a zero-extending load would be just one extra instruction.
820*a58f00eaSDimitry Andric if (Opcode == Instruction::ZExt && I != nullptr)
821*a58f00eaSDimitry Andric if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
822*a58f00eaSDimitry Andric if (Ld->hasOneUse())
823*a58f00eaSDimitry Andric return 1;
824*a58f00eaSDimitry Andric return 2;
825*a58f00eaSDimitry Andric }
826*a58f00eaSDimitry Andric }
827*a58f00eaSDimitry Andric
828*a58f00eaSDimitry Andric if (Opcode == Instruction::Trunc && isInt128InVR(Src) && I != nullptr) {
829*a58f00eaSDimitry Andric if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
830*a58f00eaSDimitry Andric if (Ld->hasOneUse())
831*a58f00eaSDimitry Andric return 0; // Will be converted to GPR load.
832*a58f00eaSDimitry Andric bool OnlyTruncatingStores = true;
833*a58f00eaSDimitry Andric for (const User *U : I->users())
834*a58f00eaSDimitry Andric if (!isa<StoreInst>(U)) {
835*a58f00eaSDimitry Andric OnlyTruncatingStores = false;
836*a58f00eaSDimitry Andric break;
837*a58f00eaSDimitry Andric }
838*a58f00eaSDimitry Andric if (OnlyTruncatingStores)
839*a58f00eaSDimitry Andric return 0;
840*a58f00eaSDimitry Andric return 2; // Vector element extraction.
841*a58f00eaSDimitry Andric }
8425ffd83dbSDimitry Andric }
8435ffd83dbSDimitry Andric else if (ST->hasVector()) {
844fe6060f1SDimitry Andric // Vector to scalar cast.
8455ffd83dbSDimitry Andric auto *SrcVecTy = cast<FixedVectorType>(Src);
846fe6060f1SDimitry Andric auto *DstVecTy = dyn_cast<FixedVectorType>(Dst);
847fe6060f1SDimitry Andric if (!DstVecTy) {
848fe6060f1SDimitry Andric // TODO: tune vector-to-scalar cast.
849fe6060f1SDimitry Andric return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
850fe6060f1SDimitry Andric }
8515ffd83dbSDimitry Andric unsigned VF = SrcVecTy->getNumElements();
8520b57cec5SDimitry Andric unsigned NumDstVectors = getNumVectorRegs(Dst);
8530b57cec5SDimitry Andric unsigned NumSrcVectors = getNumVectorRegs(Src);
8540b57cec5SDimitry Andric
8550b57cec5SDimitry Andric if (Opcode == Instruction::Trunc) {
8560b57cec5SDimitry Andric if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
8570b57cec5SDimitry Andric return 0; // Check for NOOP conversions.
8580b57cec5SDimitry Andric return getVectorTruncCost(Src, Dst);
8590b57cec5SDimitry Andric }
8600b57cec5SDimitry Andric
8610b57cec5SDimitry Andric if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
8620b57cec5SDimitry Andric if (SrcScalarBits >= 8) {
86381ad6265SDimitry Andric // ZExt will use either a single unpack or a vector permute.
86481ad6265SDimitry Andric if (Opcode == Instruction::ZExt)
86581ad6265SDimitry Andric return NumDstVectors;
86681ad6265SDimitry Andric
86781ad6265SDimitry Andric // SExt will be handled with one unpack per doubling of width.
8680b57cec5SDimitry Andric unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
8690b57cec5SDimitry Andric
8700b57cec5SDimitry Andric // For types that spans multiple vector registers, some additional
8710b57cec5SDimitry Andric // instructions are used to setup the unpacking.
8720b57cec5SDimitry Andric unsigned NumSrcVectorOps =
8730b57cec5SDimitry Andric (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
8740b57cec5SDimitry Andric : (NumDstVectors / 2));
8750b57cec5SDimitry Andric
8760b57cec5SDimitry Andric return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
8770b57cec5SDimitry Andric }
8780b57cec5SDimitry Andric else if (SrcScalarBits == 1)
8790b57cec5SDimitry Andric return getBoolVecToIntConversionCost(Opcode, Dst, I);
8800b57cec5SDimitry Andric }
8810b57cec5SDimitry Andric
8820b57cec5SDimitry Andric if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
8830b57cec5SDimitry Andric Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
8840b57cec5SDimitry Andric // TODO: Fix base implementation which could simplify things a bit here
8850b57cec5SDimitry Andric // (seems to miss on differentiating on scalar/vector types).
8860b57cec5SDimitry Andric
8878bcb0991SDimitry Andric // Only 64 bit vector conversions are natively supported before z15.
8880b57cec5SDimitry Andric if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
8890b57cec5SDimitry Andric if (SrcScalarBits == DstScalarBits)
8900b57cec5SDimitry Andric return NumDstVectors;
8910b57cec5SDimitry Andric
8920b57cec5SDimitry Andric if (SrcScalarBits == 1)
8930b57cec5SDimitry Andric return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
8940b57cec5SDimitry Andric }
8950b57cec5SDimitry Andric
8960b57cec5SDimitry Andric // Return the cost of multiple scalar invocation plus the cost of
8970b57cec5SDimitry Andric // inserting and extracting the values. Base implementation does not
8980b57cec5SDimitry Andric // realize float->int gets scalarized.
899fe6060f1SDimitry Andric InstructionCost ScalarCost = getCastInstrCost(
900e8d8bef9SDimitry Andric Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind);
901fe6060f1SDimitry Andric InstructionCost TotCost = VF * ScalarCost;
9020b57cec5SDimitry Andric bool NeedsInserts = true, NeedsExtracts = true;
9030b57cec5SDimitry Andric // FP128 registers do not get inserted or extracted.
9040b57cec5SDimitry Andric if (DstScalarBits == 128 &&
9050b57cec5SDimitry Andric (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
9060b57cec5SDimitry Andric NeedsInserts = false;
9070b57cec5SDimitry Andric if (SrcScalarBits == 128 &&
9080b57cec5SDimitry Andric (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
9090b57cec5SDimitry Andric NeedsExtracts = false;
9100b57cec5SDimitry Andric
911bdd1243dSDimitry Andric TotCost += getScalarizationOverhead(SrcVecTy, /*Insert*/ false,
912bdd1243dSDimitry Andric NeedsExtracts, CostKind);
913bdd1243dSDimitry Andric TotCost += getScalarizationOverhead(DstVecTy, NeedsInserts,
914bdd1243dSDimitry Andric /*Extract*/ false, CostKind);
9150b57cec5SDimitry Andric
9160b57cec5SDimitry Andric // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
9170b57cec5SDimitry Andric if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
9180b57cec5SDimitry Andric TotCost *= 2;
9190b57cec5SDimitry Andric
9200b57cec5SDimitry Andric return TotCost;
9210b57cec5SDimitry Andric }
9220b57cec5SDimitry Andric
9230b57cec5SDimitry Andric if (Opcode == Instruction::FPTrunc) {
9240b57cec5SDimitry Andric if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
9255ffd83dbSDimitry Andric return VF /*ldxbr/lexbr*/ +
926bdd1243dSDimitry Andric getScalarizationOverhead(DstVecTy, /*Insert*/ true,
927bdd1243dSDimitry Andric /*Extract*/ false, CostKind);
9280b57cec5SDimitry Andric else // double -> float
9290b57cec5SDimitry Andric return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
9300b57cec5SDimitry Andric }
9310b57cec5SDimitry Andric
9320b57cec5SDimitry Andric if (Opcode == Instruction::FPExt) {
9330b57cec5SDimitry Andric if (SrcScalarBits == 32 && DstScalarBits == 64) {
9340b57cec5SDimitry Andric // float -> double is very rare and currently unoptimized. Instead of
9350b57cec5SDimitry Andric // using vldeb, which can do two at a time, all conversions are
9360b57cec5SDimitry Andric // scalarized.
9370b57cec5SDimitry Andric return VF * 2;
9380b57cec5SDimitry Andric }
9390b57cec5SDimitry Andric // -> fp128. VF * lxdb/lxeb + extraction of elements.
940bdd1243dSDimitry Andric return VF + getScalarizationOverhead(SrcVecTy, /*Insert*/ false,
941bdd1243dSDimitry Andric /*Extract*/ true, CostKind);
9420b57cec5SDimitry Andric }
9430b57cec5SDimitry Andric }
9440b57cec5SDimitry Andric
945e8d8bef9SDimitry Andric return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
9460b57cec5SDimitry Andric }
9470b57cec5SDimitry Andric
9480b57cec5SDimitry Andric // Scalar i8 / i16 operations will typically be made after first extending
9490b57cec5SDimitry Andric // the operands to i32.
getOperandsExtensionCost(const Instruction * I)9500b57cec5SDimitry Andric static unsigned getOperandsExtensionCost(const Instruction *I) {
9510b57cec5SDimitry Andric unsigned ExtCost = 0;
9520b57cec5SDimitry Andric for (Value *Op : I->operands())
9530b57cec5SDimitry Andric // A load of i8 or i16 sign/zero extends to i32.
9540b57cec5SDimitry Andric if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
9550b57cec5SDimitry Andric ExtCost++;
9560b57cec5SDimitry Andric
9570b57cec5SDimitry Andric return ExtCost;
9580b57cec5SDimitry Andric }
9590b57cec5SDimitry Andric
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)960fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
961fe6060f1SDimitry Andric Type *CondTy,
962fe6060f1SDimitry Andric CmpInst::Predicate VecPred,
9635ffd83dbSDimitry Andric TTI::TargetCostKind CostKind,
9645ffd83dbSDimitry Andric const Instruction *I) {
9655ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
966e8d8bef9SDimitry Andric return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind);
9675ffd83dbSDimitry Andric
9685ffd83dbSDimitry Andric if (!ValTy->isVectorTy()) {
9695ffd83dbSDimitry Andric switch (Opcode) {
9705ffd83dbSDimitry Andric case Instruction::ICmp: {
9715ffd83dbSDimitry Andric // A loaded value compared with 0 with multiple users becomes Load and
9725ffd83dbSDimitry Andric // Test. The load is then not foldable, so return 0 cost for the ICmp.
9735ffd83dbSDimitry Andric unsigned ScalarBits = ValTy->getScalarSizeInBits();
974*a58f00eaSDimitry Andric if (I != nullptr && (ScalarBits == 32 || ScalarBits == 64))
9755ffd83dbSDimitry Andric if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
9765ffd83dbSDimitry Andric if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
9775ffd83dbSDimitry Andric if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
978e8d8bef9SDimitry Andric C->isZero())
9795ffd83dbSDimitry Andric return 0;
9805ffd83dbSDimitry Andric
9815ffd83dbSDimitry Andric unsigned Cost = 1;
9825ffd83dbSDimitry Andric if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
9835ffd83dbSDimitry Andric Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
9845ffd83dbSDimitry Andric return Cost;
9855ffd83dbSDimitry Andric }
9865ffd83dbSDimitry Andric case Instruction::Select:
987*a58f00eaSDimitry Andric if (ValTy->isFloatingPointTy() || isInt128InVR(ValTy))
988*a58f00eaSDimitry Andric return 4; // No LOC for FP / i128 - costs a conditional jump.
9895ffd83dbSDimitry Andric return 1; // Load On Condition / Select Register.
9905ffd83dbSDimitry Andric }
9915ffd83dbSDimitry Andric }
9925ffd83dbSDimitry Andric else if (ST->hasVector()) {
9935ffd83dbSDimitry Andric unsigned VF = cast<FixedVectorType>(ValTy)->getNumElements();
9940b57cec5SDimitry Andric
9950b57cec5SDimitry Andric // Called with a compare instruction.
9960b57cec5SDimitry Andric if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
9970b57cec5SDimitry Andric unsigned PredicateExtraCost = 0;
9980b57cec5SDimitry Andric if (I != nullptr) {
9990b57cec5SDimitry Andric // Some predicates cost one or two extra instructions.
10000b57cec5SDimitry Andric switch (cast<CmpInst>(I)->getPredicate()) {
10010b57cec5SDimitry Andric case CmpInst::Predicate::ICMP_NE:
10020b57cec5SDimitry Andric case CmpInst::Predicate::ICMP_UGE:
10030b57cec5SDimitry Andric case CmpInst::Predicate::ICMP_ULE:
10040b57cec5SDimitry Andric case CmpInst::Predicate::ICMP_SGE:
10050b57cec5SDimitry Andric case CmpInst::Predicate::ICMP_SLE:
10060b57cec5SDimitry Andric PredicateExtraCost = 1;
10070b57cec5SDimitry Andric break;
10080b57cec5SDimitry Andric case CmpInst::Predicate::FCMP_ONE:
10090b57cec5SDimitry Andric case CmpInst::Predicate::FCMP_ORD:
10100b57cec5SDimitry Andric case CmpInst::Predicate::FCMP_UEQ:
10110b57cec5SDimitry Andric case CmpInst::Predicate::FCMP_UNO:
10120b57cec5SDimitry Andric PredicateExtraCost = 2;
10130b57cec5SDimitry Andric break;
10140b57cec5SDimitry Andric default:
10150b57cec5SDimitry Andric break;
10160b57cec5SDimitry Andric }
10170b57cec5SDimitry Andric }
10180b57cec5SDimitry Andric
10190b57cec5SDimitry Andric // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
10200b57cec5SDimitry Andric // floats. FIXME: <2 x float> generates same code as <4 x float>.
10210b57cec5SDimitry Andric unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
10220b57cec5SDimitry Andric unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
10230b57cec5SDimitry Andric
10240b57cec5SDimitry Andric unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
10250b57cec5SDimitry Andric return Cost;
10260b57cec5SDimitry Andric }
10270b57cec5SDimitry Andric else { // Called with a select instruction.
10280b57cec5SDimitry Andric assert (Opcode == Instruction::Select);
10290b57cec5SDimitry Andric
10300b57cec5SDimitry Andric // We can figure out the extra cost of packing / unpacking if the
10310b57cec5SDimitry Andric // instruction was passed and the compare instruction is found.
10320b57cec5SDimitry Andric unsigned PackCost = 0;
10330b57cec5SDimitry Andric Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
10340b57cec5SDimitry Andric if (CmpOpTy != nullptr)
10350b57cec5SDimitry Andric PackCost =
10360b57cec5SDimitry Andric getVectorBitmaskConversionCost(CmpOpTy, ValTy);
10370b57cec5SDimitry Andric
10380b57cec5SDimitry Andric return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
10390b57cec5SDimitry Andric }
10400b57cec5SDimitry Andric }
10410b57cec5SDimitry Andric
1042e8d8bef9SDimitry Andric return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind);
10430b57cec5SDimitry Andric }
10440b57cec5SDimitry Andric
getVectorInstrCost(unsigned Opcode,Type * Val,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)1045fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1046bdd1243dSDimitry Andric TTI::TargetCostKind CostKind,
1047bdd1243dSDimitry Andric unsigned Index, Value *Op0,
1048bdd1243dSDimitry Andric Value *Op1) {
10490b57cec5SDimitry Andric // vlvgp will insert two grs into a vector register, so only count half the
10500b57cec5SDimitry Andric // number of instructions.
10510b57cec5SDimitry Andric if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
10520b57cec5SDimitry Andric return ((Index % 2 == 0) ? 1 : 0);
10530b57cec5SDimitry Andric
10540b57cec5SDimitry Andric if (Opcode == Instruction::ExtractElement) {
10550b57cec5SDimitry Andric int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
10560b57cec5SDimitry Andric
10570b57cec5SDimitry Andric // Give a slight penalty for moving out of vector pipeline to FXU unit.
10580b57cec5SDimitry Andric if (Index == 0 && Val->isIntOrIntVectorTy())
10590b57cec5SDimitry Andric Cost += 1;
10600b57cec5SDimitry Andric
10610b57cec5SDimitry Andric return Cost;
10620b57cec5SDimitry Andric }
10630b57cec5SDimitry Andric
1064bdd1243dSDimitry Andric return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
10650b57cec5SDimitry Andric }
10660b57cec5SDimitry Andric
10670b57cec5SDimitry Andric // Check if a load may be folded as a memory operand in its user.
10680b57cec5SDimitry Andric bool SystemZTTIImpl::
isFoldableLoad(const LoadInst * Ld,const Instruction * & FoldedValue)10690b57cec5SDimitry Andric isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
10700b57cec5SDimitry Andric if (!Ld->hasOneUse())
10710b57cec5SDimitry Andric return false;
10720b57cec5SDimitry Andric FoldedValue = Ld;
10730b57cec5SDimitry Andric const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
10740b57cec5SDimitry Andric unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
10750b57cec5SDimitry Andric unsigned TruncBits = 0;
10760b57cec5SDimitry Andric unsigned SExtBits = 0;
10770b57cec5SDimitry Andric unsigned ZExtBits = 0;
10780b57cec5SDimitry Andric if (UserI->hasOneUse()) {
10790b57cec5SDimitry Andric unsigned UserBits = UserI->getType()->getScalarSizeInBits();
10800b57cec5SDimitry Andric if (isa<TruncInst>(UserI))
10810b57cec5SDimitry Andric TruncBits = UserBits;
10820b57cec5SDimitry Andric else if (isa<SExtInst>(UserI))
10830b57cec5SDimitry Andric SExtBits = UserBits;
10840b57cec5SDimitry Andric else if (isa<ZExtInst>(UserI))
10850b57cec5SDimitry Andric ZExtBits = UserBits;
10860b57cec5SDimitry Andric }
10870b57cec5SDimitry Andric if (TruncBits || SExtBits || ZExtBits) {
10880b57cec5SDimitry Andric FoldedValue = UserI;
10890b57cec5SDimitry Andric UserI = cast<Instruction>(*UserI->user_begin());
10900b57cec5SDimitry Andric // Load (single use) -> trunc/extend (single use) -> UserI
10910b57cec5SDimitry Andric }
10920b57cec5SDimitry Andric if ((UserI->getOpcode() == Instruction::Sub ||
10930b57cec5SDimitry Andric UserI->getOpcode() == Instruction::SDiv ||
10940b57cec5SDimitry Andric UserI->getOpcode() == Instruction::UDiv) &&
10950b57cec5SDimitry Andric UserI->getOperand(1) != FoldedValue)
10960b57cec5SDimitry Andric return false; // Not commutative, only RHS foldable.
10970b57cec5SDimitry Andric // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
10980b57cec5SDimitry Andric // extension was made of the load.
10990b57cec5SDimitry Andric unsigned LoadOrTruncBits =
11000b57cec5SDimitry Andric ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
11010b57cec5SDimitry Andric switch (UserI->getOpcode()) {
11020b57cec5SDimitry Andric case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
11030b57cec5SDimitry Andric case Instruction::Sub:
11040b57cec5SDimitry Andric case Instruction::ICmp:
11050b57cec5SDimitry Andric if (LoadedBits == 32 && ZExtBits == 64)
11060b57cec5SDimitry Andric return true;
1107bdd1243dSDimitry Andric [[fallthrough]];
11080b57cec5SDimitry Andric case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
11090b57cec5SDimitry Andric if (UserI->getOpcode() != Instruction::ICmp) {
11100b57cec5SDimitry Andric if (LoadedBits == 16 &&
11110b57cec5SDimitry Andric (SExtBits == 32 ||
11120b57cec5SDimitry Andric (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
11130b57cec5SDimitry Andric return true;
11140b57cec5SDimitry Andric if (LoadOrTruncBits == 16)
11150b57cec5SDimitry Andric return true;
11160b57cec5SDimitry Andric }
1117bdd1243dSDimitry Andric [[fallthrough]];
11180b57cec5SDimitry Andric case Instruction::SDiv:// SE: 32->64
11190b57cec5SDimitry Andric if (LoadedBits == 32 && SExtBits == 64)
11200b57cec5SDimitry Andric return true;
1121bdd1243dSDimitry Andric [[fallthrough]];
11220b57cec5SDimitry Andric case Instruction::UDiv:
11230b57cec5SDimitry Andric case Instruction::And:
11240b57cec5SDimitry Andric case Instruction::Or:
11250b57cec5SDimitry Andric case Instruction::Xor:
11260b57cec5SDimitry Andric // This also makes sense for float operations, but disabled for now due
11270b57cec5SDimitry Andric // to regressions.
11280b57cec5SDimitry Andric // case Instruction::FCmp:
11290b57cec5SDimitry Andric // case Instruction::FAdd:
11300b57cec5SDimitry Andric // case Instruction::FSub:
11310b57cec5SDimitry Andric // case Instruction::FMul:
11320b57cec5SDimitry Andric // case Instruction::FDiv:
11330b57cec5SDimitry Andric
11340b57cec5SDimitry Andric // All possible extensions of memory checked above.
11350b57cec5SDimitry Andric
11360b57cec5SDimitry Andric // Comparison between memory and immediate.
11370b57cec5SDimitry Andric if (UserI->getOpcode() == Instruction::ICmp)
11380b57cec5SDimitry Andric if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
1139e8d8bef9SDimitry Andric if (CI->getValue().isIntN(16))
11400b57cec5SDimitry Andric return true;
11410b57cec5SDimitry Andric return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
11420b57cec5SDimitry Andric break;
11430b57cec5SDimitry Andric }
11440b57cec5SDimitry Andric return false;
11450b57cec5SDimitry Andric }
11460b57cec5SDimitry Andric
isBswapIntrinsicCall(const Value * V)11470b57cec5SDimitry Andric static bool isBswapIntrinsicCall(const Value *V) {
11480b57cec5SDimitry Andric if (const Instruction *I = dyn_cast<Instruction>(V))
11490b57cec5SDimitry Andric if (auto *CI = dyn_cast<CallInst>(I))
11500b57cec5SDimitry Andric if (auto *F = CI->getCalledFunction())
11510b57cec5SDimitry Andric if (F->getIntrinsicID() == Intrinsic::bswap)
11520b57cec5SDimitry Andric return true;
11530b57cec5SDimitry Andric return false;
11540b57cec5SDimitry Andric }
11550b57cec5SDimitry Andric
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,TTI::OperandValueInfo OpInfo,const Instruction * I)1156fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1157fe6060f1SDimitry Andric MaybeAlign Alignment,
1158fe6060f1SDimitry Andric unsigned AddressSpace,
11595ffd83dbSDimitry Andric TTI::TargetCostKind CostKind,
1160bdd1243dSDimitry Andric TTI::OperandValueInfo OpInfo,
11610b57cec5SDimitry Andric const Instruction *I) {
11620b57cec5SDimitry Andric assert(!Src->isVoidTy() && "Invalid type");
11630b57cec5SDimitry Andric
11645ffd83dbSDimitry Andric // TODO: Handle other cost kinds.
11655ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
11665ffd83dbSDimitry Andric return 1;
11675ffd83dbSDimitry Andric
11680b57cec5SDimitry Andric if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
11690b57cec5SDimitry Andric // Store the load or its truncated or extended value in FoldedValue.
11700b57cec5SDimitry Andric const Instruction *FoldedValue = nullptr;
11710b57cec5SDimitry Andric if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
11720b57cec5SDimitry Andric const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
11730b57cec5SDimitry Andric assert (UserI->getNumOperands() == 2 && "Expected a binop.");
11740b57cec5SDimitry Andric
11750b57cec5SDimitry Andric // UserI can't fold two loads, so in that case return 0 cost only
11760b57cec5SDimitry Andric // half of the time.
11770b57cec5SDimitry Andric for (unsigned i = 0; i < 2; ++i) {
11780b57cec5SDimitry Andric if (UserI->getOperand(i) == FoldedValue)
11790b57cec5SDimitry Andric continue;
11800b57cec5SDimitry Andric
11810b57cec5SDimitry Andric if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
11820b57cec5SDimitry Andric LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
11830b57cec5SDimitry Andric if (!OtherLoad &&
11840b57cec5SDimitry Andric (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
11850b57cec5SDimitry Andric isa<ZExtInst>(OtherOp)))
11860b57cec5SDimitry Andric OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
11870b57cec5SDimitry Andric if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
11880b57cec5SDimitry Andric return i == 0; // Both operands foldable.
11890b57cec5SDimitry Andric }
11900b57cec5SDimitry Andric }
11910b57cec5SDimitry Andric
11920b57cec5SDimitry Andric return 0; // Only I is foldable in user.
11930b57cec5SDimitry Andric }
11940b57cec5SDimitry Andric }
11950b57cec5SDimitry Andric
1196271697daSDimitry Andric // Type legalization (via getNumberOfParts) can't handle structs
1197271697daSDimitry Andric if (TLI->getValueType(DL, Src, true) == MVT::Other)
1198271697daSDimitry Andric return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1199271697daSDimitry Andric CostKind);
1200271697daSDimitry Andric
1201*a58f00eaSDimitry Andric // FP128 is a legal type but kept in a register pair on older CPUs.
1202*a58f00eaSDimitry Andric if (Src->isFP128Ty() && !ST->hasVectorEnhancements1())
1203*a58f00eaSDimitry Andric return 2;
1204*a58f00eaSDimitry Andric
12050b57cec5SDimitry Andric unsigned NumOps =
12060b57cec5SDimitry Andric (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
12070b57cec5SDimitry Andric
12080b57cec5SDimitry Andric // Store/Load reversed saves one instruction.
12090b57cec5SDimitry Andric if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
12100b57cec5SDimitry Andric I != nullptr) {
12110b57cec5SDimitry Andric if (Opcode == Instruction::Load && I->hasOneUse()) {
12120b57cec5SDimitry Andric const Instruction *LdUser = cast<Instruction>(*I->user_begin());
12130b57cec5SDimitry Andric // In case of load -> bswap -> store, return normal cost for the load.
12140b57cec5SDimitry Andric if (isBswapIntrinsicCall(LdUser) &&
12150b57cec5SDimitry Andric (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
12160b57cec5SDimitry Andric return 0;
12170b57cec5SDimitry Andric }
12180b57cec5SDimitry Andric else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
12190b57cec5SDimitry Andric const Value *StoredVal = SI->getValueOperand();
12200b57cec5SDimitry Andric if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
12210b57cec5SDimitry Andric return 0;
12220b57cec5SDimitry Andric }
12230b57cec5SDimitry Andric }
12240b57cec5SDimitry Andric
12250b57cec5SDimitry Andric return NumOps;
12260b57cec5SDimitry Andric }
12270b57cec5SDimitry Andric
12280b57cec5SDimitry Andric // The generic implementation of getInterleavedMemoryOpCost() is based on
12290b57cec5SDimitry Andric // adding costs of the memory operations plus all the extracts and inserts
12300b57cec5SDimitry Andric // needed for using / defining the vector operands. The SystemZ version does
12310b57cec5SDimitry Andric // roughly the same but bases the computations on vector permutations
12320b57cec5SDimitry Andric // instead.
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)1233fe6060f1SDimitry Andric InstructionCost SystemZTTIImpl::getInterleavedMemoryOpCost(
12345ffd83dbSDimitry Andric unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
12355ffd83dbSDimitry Andric Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
12365ffd83dbSDimitry Andric bool UseMaskForCond, bool UseMaskForGaps) {
12370b57cec5SDimitry Andric if (UseMaskForCond || UseMaskForGaps)
12380b57cec5SDimitry Andric return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
12395ffd83dbSDimitry Andric Alignment, AddressSpace, CostKind,
12400b57cec5SDimitry Andric UseMaskForCond, UseMaskForGaps);
12410b57cec5SDimitry Andric assert(isa<VectorType>(VecTy) &&
12420b57cec5SDimitry Andric "Expect a vector type for interleaved memory op");
12430b57cec5SDimitry Andric
12445ffd83dbSDimitry Andric unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
12450b57cec5SDimitry Andric assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
12460b57cec5SDimitry Andric unsigned VF = NumElts / Factor;
12470b57cec5SDimitry Andric unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
12480b57cec5SDimitry Andric unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
12490b57cec5SDimitry Andric unsigned NumPermutes = 0;
12500b57cec5SDimitry Andric
12510b57cec5SDimitry Andric if (Opcode == Instruction::Load) {
12520b57cec5SDimitry Andric // Loading interleave groups may have gaps, which may mean fewer
12530b57cec5SDimitry Andric // loads. Find out how many vectors will be loaded in total, and in how
12540b57cec5SDimitry Andric // many of them each value will be in.
12550b57cec5SDimitry Andric BitVector UsedInsts(NumVectorMemOps, false);
12560b57cec5SDimitry Andric std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
12570b57cec5SDimitry Andric for (unsigned Index : Indices)
12580b57cec5SDimitry Andric for (unsigned Elt = 0; Elt < VF; ++Elt) {
12590b57cec5SDimitry Andric unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
12600b57cec5SDimitry Andric UsedInsts.set(Vec);
12610b57cec5SDimitry Andric ValueVecs[Index].set(Vec);
12620b57cec5SDimitry Andric }
12630b57cec5SDimitry Andric NumVectorMemOps = UsedInsts.count();
12640b57cec5SDimitry Andric
12650b57cec5SDimitry Andric for (unsigned Index : Indices) {
12660b57cec5SDimitry Andric // Estimate that each loaded source vector containing this Index
12670b57cec5SDimitry Andric // requires one operation, except that vperm can handle two input
12680b57cec5SDimitry Andric // registers first time for each dst vector.
12690b57cec5SDimitry Andric unsigned NumSrcVecs = ValueVecs[Index].count();
1270fe6060f1SDimitry Andric unsigned NumDstVecs = divideCeil(VF * getScalarSizeInBits(VecTy), 128U);
12710b57cec5SDimitry Andric assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
12720b57cec5SDimitry Andric NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
12730b57cec5SDimitry Andric }
12740b57cec5SDimitry Andric } else {
12750b57cec5SDimitry Andric // Estimate the permutes for each stored vector as the smaller of the
12760b57cec5SDimitry Andric // number of elements and the number of source vectors. Subtract one per
12770b57cec5SDimitry Andric // dst vector for vperm (S.A.).
12780b57cec5SDimitry Andric unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
12790b57cec5SDimitry Andric unsigned NumDstVecs = NumVectorMemOps;
12800b57cec5SDimitry Andric NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
12810b57cec5SDimitry Andric }
12820b57cec5SDimitry Andric
12830b57cec5SDimitry Andric // Cost of load/store operations and the permutations needed.
12840b57cec5SDimitry Andric return NumVectorMemOps + NumPermutes;
12850b57cec5SDimitry Andric }
12860b57cec5SDimitry Andric
getVectorIntrinsicInstrCost(Intrinsic::ID ID,Type * RetTy)12870b57cec5SDimitry Andric static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
12880b57cec5SDimitry Andric if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
12890b57cec5SDimitry Andric return getNumVectorRegs(RetTy); // VPERM
12900b57cec5SDimitry Andric return -1;
12910b57cec5SDimitry Andric }
12920b57cec5SDimitry Andric
1293fe6060f1SDimitry Andric InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1294fe6060f1SDimitry Andric SystemZTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
12955ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
1296fe6060f1SDimitry Andric InstructionCost Cost =
1297fe6060f1SDimitry Andric getVectorIntrinsicInstrCost(ICA.getID(), ICA.getReturnType());
12980b57cec5SDimitry Andric if (Cost != -1)
12990b57cec5SDimitry Andric return Cost;
13005ffd83dbSDimitry Andric return BaseT::getIntrinsicInstrCost(ICA, CostKind);
13010b57cec5SDimitry Andric }
1302