1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a TargetTransformInfo analysis pass specific to the
10 // SystemZ target machine. It uses the target's detailed information to provide
11 // more precise answers to certain TTI queries, while letting the target
12 // independent and default TTI implementations handle the rest.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "SystemZTargetTransformInfo.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/BasicTTIImpl.h"
19 #include "llvm/CodeGen/CostTable.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "systemztti"
26 
27 //===----------------------------------------------------------------------===//
28 //
29 // SystemZ cost model.
30 //
31 //===----------------------------------------------------------------------===//
32 
33 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
34                                   TTI::TargetCostKind CostKind) {
35   assert(Ty->isIntegerTy());
36 
37   unsigned BitSize = Ty->getPrimitiveSizeInBits();
38   // There is no cost model for constants with a bit size of 0. Return TCC_Free
39   // here, so that constant hoisting will ignore this constant.
40   if (BitSize == 0)
41     return TTI::TCC_Free;
42   // No cost model for operations on integers larger than 64 bit implemented yet.
43   if (BitSize > 64)
44     return TTI::TCC_Free;
45 
46   if (Imm == 0)
47     return TTI::TCC_Free;
48 
49   if (Imm.getBitWidth() <= 64) {
50     // Constants loaded via lgfi.
51     if (isInt<32>(Imm.getSExtValue()))
52       return TTI::TCC_Basic;
53     // Constants loaded via llilf.
54     if (isUInt<32>(Imm.getZExtValue()))
55       return TTI::TCC_Basic;
56     // Constants loaded via llihf:
57     if ((Imm.getZExtValue() & 0xffffffff) == 0)
58       return TTI::TCC_Basic;
59 
60     return 2 * TTI::TCC_Basic;
61   }
62 
63   return 4 * TTI::TCC_Basic;
64 }
65 
66 int SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
67                                   const APInt &Imm, Type *Ty,
68                                   TTI::TargetCostKind CostKind) {
69   assert(Ty->isIntegerTy());
70 
71   unsigned BitSize = Ty->getPrimitiveSizeInBits();
72   // There is no cost model for constants with a bit size of 0. Return TCC_Free
73   // here, so that constant hoisting will ignore this constant.
74   if (BitSize == 0)
75     return TTI::TCC_Free;
76   // No cost model for operations on integers larger than 64 bit implemented yet.
77   if (BitSize > 64)
78     return TTI::TCC_Free;
79 
80   switch (Opcode) {
81   default:
82     return TTI::TCC_Free;
83   case Instruction::GetElementPtr:
84     // Always hoist the base address of a GetElementPtr. This prevents the
85     // creation of new constants for every base constant that gets constant
86     // folded with the offset.
87     if (Idx == 0)
88       return 2 * TTI::TCC_Basic;
89     return TTI::TCC_Free;
90   case Instruction::Store:
91     if (Idx == 0 && Imm.getBitWidth() <= 64) {
92       // Any 8-bit immediate store can by implemented via mvi.
93       if (BitSize == 8)
94         return TTI::TCC_Free;
95       // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
96       if (isInt<16>(Imm.getSExtValue()))
97         return TTI::TCC_Free;
98     }
99     break;
100   case Instruction::ICmp:
101     if (Idx == 1 && Imm.getBitWidth() <= 64) {
102       // Comparisons against signed 32-bit immediates implemented via cgfi.
103       if (isInt<32>(Imm.getSExtValue()))
104         return TTI::TCC_Free;
105       // Comparisons against unsigned 32-bit immediates implemented via clgfi.
106       if (isUInt<32>(Imm.getZExtValue()))
107         return TTI::TCC_Free;
108     }
109     break;
110   case Instruction::Add:
111   case Instruction::Sub:
112     if (Idx == 1 && Imm.getBitWidth() <= 64) {
113       // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
114       if (isUInt<32>(Imm.getZExtValue()))
115         return TTI::TCC_Free;
116       // Or their negation, by swapping addition vs. subtraction.
117       if (isUInt<32>(-Imm.getSExtValue()))
118         return TTI::TCC_Free;
119     }
120     break;
121   case Instruction::Mul:
122     if (Idx == 1 && Imm.getBitWidth() <= 64) {
123       // We use msgfi to multiply by 32-bit signed immediates.
124       if (isInt<32>(Imm.getSExtValue()))
125         return TTI::TCC_Free;
126     }
127     break;
128   case Instruction::Or:
129   case Instruction::Xor:
130     if (Idx == 1 && Imm.getBitWidth() <= 64) {
131       // Masks supported by oilf/xilf.
132       if (isUInt<32>(Imm.getZExtValue()))
133         return TTI::TCC_Free;
134       // Masks supported by oihf/xihf.
135       if ((Imm.getZExtValue() & 0xffffffff) == 0)
136         return TTI::TCC_Free;
137     }
138     break;
139   case Instruction::And:
140     if (Idx == 1 && Imm.getBitWidth() <= 64) {
141       // Any 32-bit AND operation can by implemented via nilf.
142       if (BitSize <= 32)
143         return TTI::TCC_Free;
144       // 64-bit masks supported by nilf.
145       if (isUInt<32>(~Imm.getZExtValue()))
146         return TTI::TCC_Free;
147       // 64-bit masks supported by nilh.
148       if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
149         return TTI::TCC_Free;
150       // Some 64-bit AND operations can be implemented via risbg.
151       const SystemZInstrInfo *TII = ST->getInstrInfo();
152       unsigned Start, End;
153       if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
154         return TTI::TCC_Free;
155     }
156     break;
157   case Instruction::Shl:
158   case Instruction::LShr:
159   case Instruction::AShr:
160     // Always return TCC_Free for the shift value of a shift instruction.
161     if (Idx == 1)
162       return TTI::TCC_Free;
163     break;
164   case Instruction::UDiv:
165   case Instruction::SDiv:
166   case Instruction::URem:
167   case Instruction::SRem:
168   case Instruction::Trunc:
169   case Instruction::ZExt:
170   case Instruction::SExt:
171   case Instruction::IntToPtr:
172   case Instruction::PtrToInt:
173   case Instruction::BitCast:
174   case Instruction::PHI:
175   case Instruction::Call:
176   case Instruction::Select:
177   case Instruction::Ret:
178   case Instruction::Load:
179     break;
180   }
181 
182   return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
183 }
184 
185 int SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
186                                         const APInt &Imm, Type *Ty,
187                                         TTI::TargetCostKind CostKind) {
188   assert(Ty->isIntegerTy());
189 
190   unsigned BitSize = Ty->getPrimitiveSizeInBits();
191   // There is no cost model for constants with a bit size of 0. Return TCC_Free
192   // here, so that constant hoisting will ignore this constant.
193   if (BitSize == 0)
194     return TTI::TCC_Free;
195   // No cost model for operations on integers larger than 64 bit implemented yet.
196   if (BitSize > 64)
197     return TTI::TCC_Free;
198 
199   switch (IID) {
200   default:
201     return TTI::TCC_Free;
202   case Intrinsic::sadd_with_overflow:
203   case Intrinsic::uadd_with_overflow:
204   case Intrinsic::ssub_with_overflow:
205   case Intrinsic::usub_with_overflow:
206     // These get expanded to include a normal addition/subtraction.
207     if (Idx == 1 && Imm.getBitWidth() <= 64) {
208       if (isUInt<32>(Imm.getZExtValue()))
209         return TTI::TCC_Free;
210       if (isUInt<32>(-Imm.getSExtValue()))
211         return TTI::TCC_Free;
212     }
213     break;
214   case Intrinsic::smul_with_overflow:
215   case Intrinsic::umul_with_overflow:
216     // These get expanded to include a normal multiplication.
217     if (Idx == 1 && Imm.getBitWidth() <= 64) {
218       if (isInt<32>(Imm.getSExtValue()))
219         return TTI::TCC_Free;
220     }
221     break;
222   case Intrinsic::experimental_stackmap:
223     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
224       return TTI::TCC_Free;
225     break;
226   case Intrinsic::experimental_patchpoint_void:
227   case Intrinsic::experimental_patchpoint_i64:
228     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
229       return TTI::TCC_Free;
230     break;
231   }
232   return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
233 }
234 
235 TargetTransformInfo::PopcntSupportKind
236 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
237   assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
238   if (ST->hasPopulationCount() && TyWidth <= 64)
239     return TTI::PSK_FastHardware;
240   return TTI::PSK_Software;
241 }
242 
243 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
244                                              TTI::UnrollingPreferences &UP) {
245   // Find out if L contains a call, what the machine instruction count
246   // estimate is, and how many stores there are.
247   bool HasCall = false;
248   unsigned NumStores = 0;
249   for (auto &BB : L->blocks())
250     for (auto &I : *BB) {
251       if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
252         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
253           if (isLoweredToCall(F))
254             HasCall = true;
255           if (F->getIntrinsicID() == Intrinsic::memcpy ||
256               F->getIntrinsicID() == Intrinsic::memset)
257             NumStores++;
258         } else { // indirect call.
259           HasCall = true;
260         }
261       }
262       if (isa<StoreInst>(&I)) {
263         Type *MemAccessTy = I.getOperand(0)->getType();
264         NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, None, 0,
265                                      TTI::TCK_RecipThroughput);
266       }
267     }
268 
269   // The z13 processor will run out of store tags if too many stores
270   // are fed into it too quickly. Therefore make sure there are not
271   // too many stores in the resulting unrolled loop.
272   unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
273 
274   if (HasCall) {
275     // Only allow full unrolling if loop has any calls.
276     UP.FullUnrollMaxCount = Max;
277     UP.MaxCount = 1;
278     return;
279   }
280 
281   UP.MaxCount = Max;
282   if (UP.MaxCount <= 1)
283     return;
284 
285   // Allow partial and runtime trip count unrolling.
286   UP.Partial = UP.Runtime = true;
287 
288   UP.PartialThreshold = 75;
289   UP.DefaultUnrollRuntimeCount = 4;
290 
291   // Allow expensive instructions in the pre-header of the loop.
292   UP.AllowExpensiveTripCount = true;
293 
294   UP.Force = true;
295 }
296 
297 void SystemZTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
298                                            TTI::PeelingPreferences &PP) {
299   BaseT::getPeelingPreferences(L, SE, PP);
300 }
301 
302 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
303                                    TargetTransformInfo::LSRCost &C2) {
304   // SystemZ specific: check instruction count (first), and don't care about
305   // ImmCost, since offsets are checked explicitly.
306   return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
307                   C1.NumIVMuls, C1.NumBaseAdds,
308                   C1.ScaleCost, C1.SetupCost) <
309     std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
310              C2.NumIVMuls, C2.NumBaseAdds,
311              C2.ScaleCost, C2.SetupCost);
312 }
313 
314 unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
315   bool Vector = (ClassID == 1);
316   if (!Vector)
317     // Discount the stack pointer.  Also leave out %r0, since it can't
318     // be used in an address.
319     return 14;
320   if (ST->hasVector())
321     return 32;
322   return 0;
323 }
324 
325 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
326   if (!Vector)
327     return 64;
328   if (ST->hasVector())
329     return 128;
330   return 0;
331 }
332 
333 unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
334                                               unsigned NumStridedMemAccesses,
335                                               unsigned NumPrefetches,
336                                               bool HasCall) const {
337   // Don't prefetch a loop with many far apart accesses.
338   if (NumPrefetches > 16)
339     return UINT_MAX;
340 
341   // Emit prefetch instructions for smaller strides in cases where we think
342   // the hardware prefetcher might not be able to keep up.
343   if (NumStridedMemAccesses > 32 &&
344       NumStridedMemAccesses == NumMemAccesses && !HasCall)
345     return 1;
346 
347   return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
348 }
349 
350 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
351   EVT VT = TLI->getValueType(DL, DataType);
352   return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
353 }
354 
355 // Return the bit size for the scalar type or vector element
356 // type. getScalarSizeInBits() returns 0 for a pointer type.
357 static unsigned getScalarSizeInBits(Type *Ty) {
358   unsigned Size =
359     (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
360   assert(Size > 0 && "Element must have non-zero size.");
361   return Size;
362 }
363 
364 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
365 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
366 // 3.
367 static unsigned getNumVectorRegs(Type *Ty) {
368   auto *VTy = cast<FixedVectorType>(Ty);
369   unsigned WideBits = getScalarSizeInBits(Ty) * VTy->getNumElements();
370   assert(WideBits > 0 && "Could not compute size of vector");
371   return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
372 }
373 
374 int SystemZTTIImpl::getArithmeticInstrCost(
375     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
376     TTI::OperandValueKind Op1Info,
377     TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
378     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
379     const Instruction *CxtI) {
380 
381   // TODO: Handle more cost kinds.
382   if (CostKind != TTI::TCK_RecipThroughput)
383     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
384                                          Op2Info, Opd1PropInfo,
385                                          Opd2PropInfo, Args, CxtI);
386 
387   // TODO: return a good value for BB-VECTORIZER that includes the
388   // immediate loads, which we do not want to count for the loop
389   // vectorizer, since they are hopefully hoisted out of the loop. This
390   // would require a new parameter 'InLoop', but not sure if constant
391   // args are common enough to motivate this.
392 
393   unsigned ScalarBits = Ty->getScalarSizeInBits();
394 
395   // There are thre cases of division and remainder: Dividing with a register
396   // needs a divide instruction. A divisor which is a power of two constant
397   // can be implemented with a sequence of shifts. Any other constant needs a
398   // multiply and shifts.
399   const unsigned DivInstrCost = 20;
400   const unsigned DivMulSeqCost = 10;
401   const unsigned SDivPow2Cost = 4;
402 
403   bool SignedDivRem =
404       Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
405   bool UnsignedDivRem =
406       Opcode == Instruction::UDiv || Opcode == Instruction::URem;
407 
408   // Check for a constant divisor.
409   bool DivRemConst = false;
410   bool DivRemConstPow2 = false;
411   if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
412     if (const Constant *C = dyn_cast<Constant>(Args[1])) {
413       const ConstantInt *CVal =
414           (C->getType()->isVectorTy()
415                ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
416                : dyn_cast<const ConstantInt>(C));
417       if (CVal != nullptr &&
418           (CVal->getValue().isPowerOf2() || (-CVal->getValue()).isPowerOf2()))
419         DivRemConstPow2 = true;
420       else
421         DivRemConst = true;
422     }
423   }
424 
425   if (!Ty->isVectorTy()) {
426     // These FP operations are supported with a dedicated instruction for
427     // float, double and fp128 (base implementation assumes float generally
428     // costs 2).
429     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
430         Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
431       return 1;
432 
433     // There is no native support for FRem.
434     if (Opcode == Instruction::FRem)
435       return LIBCALL_COST;
436 
437     // Give discount for some combined logical operations if supported.
438     if (Args.size() == 2 && ST->hasMiscellaneousExtensions3()) {
439       if (Opcode == Instruction::Xor) {
440         for (const Value *A : Args) {
441           if (const Instruction *I = dyn_cast<Instruction>(A))
442             if (I->hasOneUse() &&
443                 (I->getOpcode() == Instruction::And ||
444                  I->getOpcode() == Instruction::Or ||
445                  I->getOpcode() == Instruction::Xor))
446               return 0;
447         }
448       }
449       else if (Opcode == Instruction::Or || Opcode == Instruction::And) {
450         for (const Value *A : Args) {
451           if (const Instruction *I = dyn_cast<Instruction>(A))
452             if (I->hasOneUse() && I->getOpcode() == Instruction::Xor)
453               return 0;
454         }
455       }
456     }
457 
458     // Or requires one instruction, although it has custom handling for i64.
459     if (Opcode == Instruction::Or)
460       return 1;
461 
462     if (Opcode == Instruction::Xor && ScalarBits == 1) {
463       if (ST->hasLoadStoreOnCond2())
464         return 5; // 2 * (li 0; loc 1); xor
465       return 7; // 2 * ipm sequences ; xor ; shift ; compare
466     }
467 
468     if (DivRemConstPow2)
469       return (SignedDivRem ? SDivPow2Cost : 1);
470     if (DivRemConst)
471       return DivMulSeqCost;
472     if (SignedDivRem || UnsignedDivRem)
473       return DivInstrCost;
474   }
475   else if (ST->hasVector()) {
476     auto *VTy = cast<FixedVectorType>(Ty);
477     unsigned VF = VTy->getNumElements();
478     unsigned NumVectors = getNumVectorRegs(Ty);
479 
480     // These vector operations are custom handled, but are still supported
481     // with one instruction per vector, regardless of element size.
482     if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
483         Opcode == Instruction::AShr) {
484       return NumVectors;
485     }
486 
487     if (DivRemConstPow2)
488       return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
489     if (DivRemConst)
490       return VF * DivMulSeqCost + getScalarizationOverhead(VTy, Args);
491     if ((SignedDivRem || UnsignedDivRem) && VF > 4)
492       // Temporary hack: disable high vectorization factors with integer
493       // division/remainder, which will get scalarized and handled with
494       // GR128 registers. The mischeduler is not clever enough to avoid
495       // spilling yet.
496       return 1000;
497 
498     // These FP operations are supported with a single vector instruction for
499     // double (base implementation assumes float generally costs 2). For
500     // FP128, the scalar cost is 1, and there is no overhead since the values
501     // are already in scalar registers.
502     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
503         Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
504       switch (ScalarBits) {
505       case 32: {
506         // The vector enhancements facility 1 provides v4f32 instructions.
507         if (ST->hasVectorEnhancements1())
508           return NumVectors;
509         // Return the cost of multiple scalar invocation plus the cost of
510         // inserting and extracting the values.
511         unsigned ScalarCost =
512             getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
513         unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(VTy, Args);
514         // FIXME: VF 2 for these FP operations are currently just as
515         // expensive as for VF 4.
516         if (VF == 2)
517           Cost *= 2;
518         return Cost;
519       }
520       case 64:
521       case 128:
522         return NumVectors;
523       default:
524         break;
525       }
526     }
527 
528     // There is no native support for FRem.
529     if (Opcode == Instruction::FRem) {
530       unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(VTy, Args);
531       // FIXME: VF 2 for float is currently just as expensive as for VF 4.
532       if (VF == 2 && ScalarBits == 32)
533         Cost *= 2;
534       return Cost;
535     }
536   }
537 
538   // Fallback to the default implementation.
539   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
540                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
541 }
542 
543 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
544                                    int Index, VectorType *SubTp) {
545   if (ST->hasVector()) {
546     unsigned NumVectors = getNumVectorRegs(Tp);
547 
548     // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
549 
550     // FP128 values are always in scalar registers, so there is no work
551     // involved with a shuffle, except for broadcast. In that case register
552     // moves are done with a single instruction per element.
553     if (Tp->getScalarType()->isFP128Ty())
554       return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
555 
556     switch (Kind) {
557     case  TargetTransformInfo::SK_ExtractSubvector:
558       // ExtractSubvector Index indicates start offset.
559 
560       // Extracting a subvector from first index is a noop.
561       return (Index == 0 ? 0 : NumVectors);
562 
563     case TargetTransformInfo::SK_Broadcast:
564       // Loop vectorizer calls here to figure out the extra cost of
565       // broadcasting a loaded value to all elements of a vector. Since vlrep
566       // loads and replicates with a single instruction, adjust the returned
567       // value.
568       return NumVectors - 1;
569 
570     default:
571 
572       // SystemZ supports single instruction permutation / replication.
573       return NumVectors;
574     }
575   }
576 
577   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
578 }
579 
580 // Return the log2 difference of the element sizes of the two vector types.
581 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
582   unsigned Bits0 = Ty0->getScalarSizeInBits();
583   unsigned Bits1 = Ty1->getScalarSizeInBits();
584 
585   if (Bits1 >  Bits0)
586     return (Log2_32(Bits1) - Log2_32(Bits0));
587 
588   return (Log2_32(Bits0) - Log2_32(Bits1));
589 }
590 
591 // Return the number of instructions needed to truncate SrcTy to DstTy.
592 unsigned SystemZTTIImpl::
593 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
594   assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
595   assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
596           "Packing must reduce size of vector type.");
597   assert(cast<FixedVectorType>(SrcTy)->getNumElements() ==
598              cast<FixedVectorType>(DstTy)->getNumElements() &&
599          "Packing should not change number of elements.");
600 
601   // TODO: Since fp32 is expanded, the extract cost should always be 0.
602 
603   unsigned NumParts = getNumVectorRegs(SrcTy);
604   if (NumParts <= 2)
605     // Up to 2 vector registers can be truncated efficiently with pack or
606     // permute. The latter requires an immediate mask to be loaded, which
607     // typically gets hoisted out of a loop.  TODO: return a good value for
608     // BB-VECTORIZER that includes the immediate loads, which we do not want
609     // to count for the loop vectorizer.
610     return 1;
611 
612   unsigned Cost = 0;
613   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
614   unsigned VF = cast<FixedVectorType>(SrcTy)->getNumElements();
615   for (unsigned P = 0; P < Log2Diff; ++P) {
616     if (NumParts > 1)
617       NumParts /= 2;
618     Cost += NumParts;
619   }
620 
621   // Currently, a general mix of permutes and pack instructions is output by
622   // isel, which follow the cost computation above except for this case which
623   // is one instruction less:
624   if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
625       DstTy->getScalarSizeInBits() == 8)
626     Cost--;
627 
628   return Cost;
629 }
630 
631 // Return the cost of converting a vector bitmask produced by a compare
632 // (SrcTy), to the type of the select or extend instruction (DstTy).
633 unsigned SystemZTTIImpl::
634 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
635   assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
636           "Should only be called with vector types.");
637 
638   unsigned PackCost = 0;
639   unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
640   unsigned DstScalarBits = DstTy->getScalarSizeInBits();
641   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
642   if (SrcScalarBits > DstScalarBits)
643     // The bitmask will be truncated.
644     PackCost = getVectorTruncCost(SrcTy, DstTy);
645   else if (SrcScalarBits < DstScalarBits) {
646     unsigned DstNumParts = getNumVectorRegs(DstTy);
647     // Each vector select needs its part of the bitmask unpacked.
648     PackCost = Log2Diff * DstNumParts;
649     // Extra cost for moving part of mask before unpacking.
650     PackCost += DstNumParts - 1;
651   }
652 
653   return PackCost;
654 }
655 
656 // Return the type of the compared operands. This is needed to compute the
657 // cost for a Select / ZExt or SExt instruction.
658 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
659   Type *OpTy = nullptr;
660   if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
661     OpTy = CI->getOperand(0)->getType();
662   else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
663     if (LogicI->getNumOperands() == 2)
664       if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
665         if (isa<CmpInst>(LogicI->getOperand(1)))
666           OpTy = CI0->getOperand(0)->getType();
667 
668   if (OpTy != nullptr) {
669     if (VF == 1) {
670       assert (!OpTy->isVectorTy() && "Expected scalar type");
671       return OpTy;
672     }
673     // Return the potentially vectorized type based on 'I' and 'VF'.  'I' may
674     // be either scalar or already vectorized with a same or lesser VF.
675     Type *ElTy = OpTy->getScalarType();
676     return FixedVectorType::get(ElTy, VF);
677   }
678 
679   return nullptr;
680 }
681 
682 // Get the cost of converting a boolean vector to a vector with same width
683 // and element size as Dst, plus the cost of zero extending if needed.
684 unsigned SystemZTTIImpl::
685 getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
686                               const Instruction *I) {
687   auto *DstVTy = cast<FixedVectorType>(Dst);
688   unsigned VF = DstVTy->getNumElements();
689   unsigned Cost = 0;
690   // If we know what the widths of the compared operands, get any cost of
691   // converting it to match Dst. Otherwise assume same widths.
692   Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
693   if (CmpOpTy != nullptr)
694     Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
695   if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
696     // One 'vn' per dst vector with an immediate mask.
697     Cost += getNumVectorRegs(Dst);
698   return Cost;
699 }
700 
701 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
702                                      TTI::CastContextHint CCH,
703                                      TTI::TargetCostKind CostKind,
704                                      const Instruction *I) {
705   // FIXME: Can the logic below also be used for these cost kinds?
706   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) {
707     int BaseCost = BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
708     return BaseCost == 0 ? BaseCost : 1;
709   }
710 
711   unsigned DstScalarBits = Dst->getScalarSizeInBits();
712   unsigned SrcScalarBits = Src->getScalarSizeInBits();
713 
714   if (!Src->isVectorTy()) {
715     assert (!Dst->isVectorTy());
716 
717     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
718       if (SrcScalarBits >= 32 ||
719           (I != nullptr && isa<LoadInst>(I->getOperand(0))))
720         return 1;
721       return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
722     }
723 
724     if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
725         Src->isIntegerTy(1)) {
726       if (ST->hasLoadStoreOnCond2())
727         return 2; // li 0; loc 1
728 
729       // This should be extension of a compare i1 result, which is done with
730       // ipm and a varying sequence of instructions.
731       unsigned Cost = 0;
732       if (Opcode == Instruction::SExt)
733         Cost = (DstScalarBits < 64 ? 3 : 4);
734       if (Opcode == Instruction::ZExt)
735         Cost = 3;
736       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
737       if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
738         // If operands of an fp-type was compared, this costs +1.
739         Cost++;
740       return Cost;
741     }
742   }
743   else if (ST->hasVector()) {
744     auto *SrcVecTy = cast<FixedVectorType>(Src);
745     auto *DstVecTy = cast<FixedVectorType>(Dst);
746     unsigned VF = SrcVecTy->getNumElements();
747     unsigned NumDstVectors = getNumVectorRegs(Dst);
748     unsigned NumSrcVectors = getNumVectorRegs(Src);
749 
750     if (Opcode == Instruction::Trunc) {
751       if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
752         return 0; // Check for NOOP conversions.
753       return getVectorTruncCost(Src, Dst);
754     }
755 
756     if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
757       if (SrcScalarBits >= 8) {
758         // ZExt/SExt will be handled with one unpack per doubling of width.
759         unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
760 
761         // For types that spans multiple vector registers, some additional
762         // instructions are used to setup the unpacking.
763         unsigned NumSrcVectorOps =
764           (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
765                           : (NumDstVectors / 2));
766 
767         return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
768       }
769       else if (SrcScalarBits == 1)
770         return getBoolVecToIntConversionCost(Opcode, Dst, I);
771     }
772 
773     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
774         Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
775       // TODO: Fix base implementation which could simplify things a bit here
776       // (seems to miss on differentiating on scalar/vector types).
777 
778       // Only 64 bit vector conversions are natively supported before z15.
779       if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
780         if (SrcScalarBits == DstScalarBits)
781           return NumDstVectors;
782 
783         if (SrcScalarBits == 1)
784           return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
785       }
786 
787       // Return the cost of multiple scalar invocation plus the cost of
788       // inserting and extracting the values. Base implementation does not
789       // realize float->int gets scalarized.
790       unsigned ScalarCost = getCastInstrCost(
791           Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind);
792       unsigned TotCost = VF * ScalarCost;
793       bool NeedsInserts = true, NeedsExtracts = true;
794       // FP128 registers do not get inserted or extracted.
795       if (DstScalarBits == 128 &&
796           (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
797         NeedsInserts = false;
798       if (SrcScalarBits == 128 &&
799           (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
800         NeedsExtracts = false;
801 
802       TotCost += getScalarizationOverhead(SrcVecTy, false, NeedsExtracts);
803       TotCost += getScalarizationOverhead(DstVecTy, NeedsInserts, false);
804 
805       // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
806       if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
807         TotCost *= 2;
808 
809       return TotCost;
810     }
811 
812     if (Opcode == Instruction::FPTrunc) {
813       if (SrcScalarBits == 128)  // fp128 -> double/float + inserts of elements.
814         return VF /*ldxbr/lexbr*/ +
815                getScalarizationOverhead(DstVecTy, true, false);
816       else // double -> float
817         return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
818     }
819 
820     if (Opcode == Instruction::FPExt) {
821       if (SrcScalarBits == 32 && DstScalarBits == 64) {
822         // float -> double is very rare and currently unoptimized. Instead of
823         // using vldeb, which can do two at a time, all conversions are
824         // scalarized.
825         return VF * 2;
826       }
827       // -> fp128.  VF * lxdb/lxeb + extraction of elements.
828       return VF + getScalarizationOverhead(SrcVecTy, false, true);
829     }
830   }
831 
832   return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
833 }
834 
835 // Scalar i8 / i16 operations will typically be made after first extending
836 // the operands to i32.
837 static unsigned getOperandsExtensionCost(const Instruction *I) {
838   unsigned ExtCost = 0;
839   for (Value *Op : I->operands())
840     // A load of i8 or i16 sign/zero extends to i32.
841     if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
842       ExtCost++;
843 
844   return ExtCost;
845 }
846 
847 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
848                                        Type *CondTy,
849                                        TTI::TargetCostKind CostKind,
850                                        const Instruction *I) {
851   if (CostKind != TTI::TCK_RecipThroughput)
852     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind);
853 
854   if (!ValTy->isVectorTy()) {
855     switch (Opcode) {
856     case Instruction::ICmp: {
857       // A loaded value compared with 0 with multiple users becomes Load and
858       // Test. The load is then not foldable, so return 0 cost for the ICmp.
859       unsigned ScalarBits = ValTy->getScalarSizeInBits();
860       if (I != nullptr && ScalarBits >= 32)
861         if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
862           if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
863             if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
864                 C->getZExtValue() == 0)
865               return 0;
866 
867       unsigned Cost = 1;
868       if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
869         Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
870       return Cost;
871     }
872     case Instruction::Select:
873       if (ValTy->isFloatingPointTy())
874         return 4; // No load on condition for FP - costs a conditional jump.
875       return 1; // Load On Condition / Select Register.
876     }
877   }
878   else if (ST->hasVector()) {
879     unsigned VF = cast<FixedVectorType>(ValTy)->getNumElements();
880 
881     // Called with a compare instruction.
882     if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
883       unsigned PredicateExtraCost = 0;
884       if (I != nullptr) {
885         // Some predicates cost one or two extra instructions.
886         switch (cast<CmpInst>(I)->getPredicate()) {
887         case CmpInst::Predicate::ICMP_NE:
888         case CmpInst::Predicate::ICMP_UGE:
889         case CmpInst::Predicate::ICMP_ULE:
890         case CmpInst::Predicate::ICMP_SGE:
891         case CmpInst::Predicate::ICMP_SLE:
892           PredicateExtraCost = 1;
893           break;
894         case CmpInst::Predicate::FCMP_ONE:
895         case CmpInst::Predicate::FCMP_ORD:
896         case CmpInst::Predicate::FCMP_UEQ:
897         case CmpInst::Predicate::FCMP_UNO:
898           PredicateExtraCost = 2;
899           break;
900         default:
901           break;
902         }
903       }
904 
905       // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
906       // floats.  FIXME: <2 x float> generates same code as <4 x float>.
907       unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
908       unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
909 
910       unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
911       return Cost;
912     }
913     else { // Called with a select instruction.
914       assert (Opcode == Instruction::Select);
915 
916       // We can figure out the extra cost of packing / unpacking if the
917       // instruction was passed and the compare instruction is found.
918       unsigned PackCost = 0;
919       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
920       if (CmpOpTy != nullptr)
921         PackCost =
922           getVectorBitmaskConversionCost(CmpOpTy, ValTy);
923 
924       return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
925     }
926   }
927 
928   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind);
929 }
930 
931 int SystemZTTIImpl::
932 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
933   // vlvgp will insert two grs into a vector register, so only count half the
934   // number of instructions.
935   if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
936     return ((Index % 2 == 0) ? 1 : 0);
937 
938   if (Opcode == Instruction::ExtractElement) {
939     int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
940 
941     // Give a slight penalty for moving out of vector pipeline to FXU unit.
942     if (Index == 0 && Val->isIntOrIntVectorTy())
943       Cost += 1;
944 
945     return Cost;
946   }
947 
948   return BaseT::getVectorInstrCost(Opcode, Val, Index);
949 }
950 
951 // Check if a load may be folded as a memory operand in its user.
952 bool SystemZTTIImpl::
953 isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
954   if (!Ld->hasOneUse())
955     return false;
956   FoldedValue = Ld;
957   const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
958   unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
959   unsigned TruncBits = 0;
960   unsigned SExtBits = 0;
961   unsigned ZExtBits = 0;
962   if (UserI->hasOneUse()) {
963     unsigned UserBits = UserI->getType()->getScalarSizeInBits();
964     if (isa<TruncInst>(UserI))
965       TruncBits = UserBits;
966     else if (isa<SExtInst>(UserI))
967       SExtBits = UserBits;
968     else if (isa<ZExtInst>(UserI))
969       ZExtBits = UserBits;
970   }
971   if (TruncBits || SExtBits || ZExtBits) {
972     FoldedValue = UserI;
973     UserI = cast<Instruction>(*UserI->user_begin());
974     // Load (single use) -> trunc/extend (single use) -> UserI
975   }
976   if ((UserI->getOpcode() == Instruction::Sub ||
977        UserI->getOpcode() == Instruction::SDiv ||
978        UserI->getOpcode() == Instruction::UDiv) &&
979       UserI->getOperand(1) != FoldedValue)
980     return false; // Not commutative, only RHS foldable.
981   // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
982   // extension was made of the load.
983   unsigned LoadOrTruncBits =
984       ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
985   switch (UserI->getOpcode()) {
986   case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
987   case Instruction::Sub:
988   case Instruction::ICmp:
989     if (LoadedBits == 32 && ZExtBits == 64)
990       return true;
991     LLVM_FALLTHROUGH;
992   case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
993     if (UserI->getOpcode() != Instruction::ICmp) {
994       if (LoadedBits == 16 &&
995           (SExtBits == 32 ||
996            (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
997         return true;
998       if (LoadOrTruncBits == 16)
999         return true;
1000     }
1001     LLVM_FALLTHROUGH;
1002   case Instruction::SDiv:// SE: 32->64
1003     if (LoadedBits == 32 && SExtBits == 64)
1004       return true;
1005     LLVM_FALLTHROUGH;
1006   case Instruction::UDiv:
1007   case Instruction::And:
1008   case Instruction::Or:
1009   case Instruction::Xor:
1010     // This also makes sense for float operations, but disabled for now due
1011     // to regressions.
1012     // case Instruction::FCmp:
1013     // case Instruction::FAdd:
1014     // case Instruction::FSub:
1015     // case Instruction::FMul:
1016     // case Instruction::FDiv:
1017 
1018     // All possible extensions of memory checked above.
1019 
1020     // Comparison between memory and immediate.
1021     if (UserI->getOpcode() == Instruction::ICmp)
1022       if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
1023         if (isUInt<16>(CI->getZExtValue()))
1024           return true;
1025     return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
1026     break;
1027   }
1028   return false;
1029 }
1030 
1031 static bool isBswapIntrinsicCall(const Value *V) {
1032   if (const Instruction *I = dyn_cast<Instruction>(V))
1033     if (auto *CI = dyn_cast<CallInst>(I))
1034       if (auto *F = CI->getCalledFunction())
1035         if (F->getIntrinsicID() == Intrinsic::bswap)
1036           return true;
1037   return false;
1038 }
1039 
1040 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1041                                     MaybeAlign Alignment, unsigned AddressSpace,
1042                                     TTI::TargetCostKind CostKind,
1043                                     const Instruction *I) {
1044   assert(!Src->isVoidTy() && "Invalid type");
1045 
1046   // TODO: Handle other cost kinds.
1047   if (CostKind != TTI::TCK_RecipThroughput)
1048     return 1;
1049 
1050   if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
1051     // Store the load or its truncated or extended value in FoldedValue.
1052     const Instruction *FoldedValue = nullptr;
1053     if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
1054       const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
1055       assert (UserI->getNumOperands() == 2 && "Expected a binop.");
1056 
1057       // UserI can't fold two loads, so in that case return 0 cost only
1058       // half of the time.
1059       for (unsigned i = 0; i < 2; ++i) {
1060         if (UserI->getOperand(i) == FoldedValue)
1061           continue;
1062 
1063         if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
1064           LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
1065           if (!OtherLoad &&
1066               (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
1067                isa<ZExtInst>(OtherOp)))
1068             OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1069           if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1070             return i == 0; // Both operands foldable.
1071         }
1072       }
1073 
1074       return 0; // Only I is foldable in user.
1075     }
1076   }
1077 
1078   unsigned NumOps =
1079     (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1080 
1081   // Store/Load reversed saves one instruction.
1082   if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
1083       I != nullptr) {
1084     if (Opcode == Instruction::Load && I->hasOneUse()) {
1085       const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1086       // In case of load -> bswap -> store, return normal cost for the load.
1087       if (isBswapIntrinsicCall(LdUser) &&
1088           (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1089         return 0;
1090     }
1091     else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1092       const Value *StoredVal = SI->getValueOperand();
1093       if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1094         return 0;
1095     }
1096   }
1097 
1098   if (Src->getScalarSizeInBits() == 128)
1099     // 128 bit scalars are held in a pair of two 64 bit registers.
1100     NumOps *= 2;
1101 
1102   return  NumOps;
1103 }
1104 
1105 // The generic implementation of getInterleavedMemoryOpCost() is based on
1106 // adding costs of the memory operations plus all the extracts and inserts
1107 // needed for using / defining the vector operands. The SystemZ version does
1108 // roughly the same but bases the computations on vector permutations
1109 // instead.
1110 int SystemZTTIImpl::getInterleavedMemoryOpCost(
1111     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1112     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1113     bool UseMaskForCond, bool UseMaskForGaps) {
1114   if (UseMaskForCond || UseMaskForGaps)
1115     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1116                                              Alignment, AddressSpace, CostKind,
1117                                              UseMaskForCond, UseMaskForGaps);
1118   assert(isa<VectorType>(VecTy) &&
1119          "Expect a vector type for interleaved memory op");
1120 
1121   // Return the ceiling of dividing A by B.
1122   auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1123 
1124   unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1125   assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1126   unsigned VF = NumElts / Factor;
1127   unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1128   unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1129   unsigned NumPermutes = 0;
1130 
1131   if (Opcode == Instruction::Load) {
1132     // Loading interleave groups may have gaps, which may mean fewer
1133     // loads. Find out how many vectors will be loaded in total, and in how
1134     // many of them each value will be in.
1135     BitVector UsedInsts(NumVectorMemOps, false);
1136     std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1137     for (unsigned Index : Indices)
1138       for (unsigned Elt = 0; Elt < VF; ++Elt) {
1139         unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1140         UsedInsts.set(Vec);
1141         ValueVecs[Index].set(Vec);
1142       }
1143     NumVectorMemOps = UsedInsts.count();
1144 
1145     for (unsigned Index : Indices) {
1146       // Estimate that each loaded source vector containing this Index
1147       // requires one operation, except that vperm can handle two input
1148       // registers first time for each dst vector.
1149       unsigned NumSrcVecs = ValueVecs[Index].count();
1150       unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
1151       assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1152       NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1153     }
1154   } else {
1155     // Estimate the permutes for each stored vector as the smaller of the
1156     // number of elements and the number of source vectors. Subtract one per
1157     // dst vector for vperm (S.A.).
1158     unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1159     unsigned NumDstVecs = NumVectorMemOps;
1160     assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
1161     NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1162   }
1163 
1164   // Cost of load/store operations and the permutations needed.
1165   return NumVectorMemOps + NumPermutes;
1166 }
1167 
1168 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
1169   if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1170     return getNumVectorRegs(RetTy); // VPERM
1171   return -1;
1172 }
1173 
1174 int SystemZTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1175                                           TTI::TargetCostKind CostKind) {
1176   int Cost = getVectorIntrinsicInstrCost(ICA.getID(), ICA.getReturnType());
1177   if (Cost != -1)
1178     return Cost;
1179   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1180 }
1181