1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a TargetTransformInfo analysis pass specific to the
10 // SystemZ target machine. It uses the target's detailed information to provide
11 // more precise answers to certain TTI queries, while letting the target
12 // independent and default TTI implementations handle the rest.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "SystemZTargetTransformInfo.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/BasicTTIImpl.h"
19 #include "llvm/CodeGen/CostTable.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "systemztti"
26 
27 //===----------------------------------------------------------------------===//
28 //
29 // SystemZ cost model.
30 //
31 //===----------------------------------------------------------------------===//
32 
33 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
34   assert(Ty->isIntegerTy());
35 
36   unsigned BitSize = Ty->getPrimitiveSizeInBits();
37   // There is no cost model for constants with a bit size of 0. Return TCC_Free
38   // here, so that constant hoisting will ignore this constant.
39   if (BitSize == 0)
40     return TTI::TCC_Free;
41   // No cost model for operations on integers larger than 64 bit implemented yet.
42   if (BitSize > 64)
43     return TTI::TCC_Free;
44 
45   if (Imm == 0)
46     return TTI::TCC_Free;
47 
48   if (Imm.getBitWidth() <= 64) {
49     // Constants loaded via lgfi.
50     if (isInt<32>(Imm.getSExtValue()))
51       return TTI::TCC_Basic;
52     // Constants loaded via llilf.
53     if (isUInt<32>(Imm.getZExtValue()))
54       return TTI::TCC_Basic;
55     // Constants loaded via llihf:
56     if ((Imm.getZExtValue() & 0xffffffff) == 0)
57       return TTI::TCC_Basic;
58 
59     return 2 * TTI::TCC_Basic;
60   }
61 
62   return 4 * TTI::TCC_Basic;
63 }
64 
65 int SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
66                                   const APInt &Imm, Type *Ty) {
67   assert(Ty->isIntegerTy());
68 
69   unsigned BitSize = Ty->getPrimitiveSizeInBits();
70   // There is no cost model for constants with a bit size of 0. Return TCC_Free
71   // here, so that constant hoisting will ignore this constant.
72   if (BitSize == 0)
73     return TTI::TCC_Free;
74   // No cost model for operations on integers larger than 64 bit implemented yet.
75   if (BitSize > 64)
76     return TTI::TCC_Free;
77 
78   switch (Opcode) {
79   default:
80     return TTI::TCC_Free;
81   case Instruction::GetElementPtr:
82     // Always hoist the base address of a GetElementPtr. This prevents the
83     // creation of new constants for every base constant that gets constant
84     // folded with the offset.
85     if (Idx == 0)
86       return 2 * TTI::TCC_Basic;
87     return TTI::TCC_Free;
88   case Instruction::Store:
89     if (Idx == 0 && Imm.getBitWidth() <= 64) {
90       // Any 8-bit immediate store can by implemented via mvi.
91       if (BitSize == 8)
92         return TTI::TCC_Free;
93       // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
94       if (isInt<16>(Imm.getSExtValue()))
95         return TTI::TCC_Free;
96     }
97     break;
98   case Instruction::ICmp:
99     if (Idx == 1 && Imm.getBitWidth() <= 64) {
100       // Comparisons against signed 32-bit immediates implemented via cgfi.
101       if (isInt<32>(Imm.getSExtValue()))
102         return TTI::TCC_Free;
103       // Comparisons against unsigned 32-bit immediates implemented via clgfi.
104       if (isUInt<32>(Imm.getZExtValue()))
105         return TTI::TCC_Free;
106     }
107     break;
108   case Instruction::Add:
109   case Instruction::Sub:
110     if (Idx == 1 && Imm.getBitWidth() <= 64) {
111       // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
112       if (isUInt<32>(Imm.getZExtValue()))
113         return TTI::TCC_Free;
114       // Or their negation, by swapping addition vs. subtraction.
115       if (isUInt<32>(-Imm.getSExtValue()))
116         return TTI::TCC_Free;
117     }
118     break;
119   case Instruction::Mul:
120     if (Idx == 1 && Imm.getBitWidth() <= 64) {
121       // We use msgfi to multiply by 32-bit signed immediates.
122       if (isInt<32>(Imm.getSExtValue()))
123         return TTI::TCC_Free;
124     }
125     break;
126   case Instruction::Or:
127   case Instruction::Xor:
128     if (Idx == 1 && Imm.getBitWidth() <= 64) {
129       // Masks supported by oilf/xilf.
130       if (isUInt<32>(Imm.getZExtValue()))
131         return TTI::TCC_Free;
132       // Masks supported by oihf/xihf.
133       if ((Imm.getZExtValue() & 0xffffffff) == 0)
134         return TTI::TCC_Free;
135     }
136     break;
137   case Instruction::And:
138     if (Idx == 1 && Imm.getBitWidth() <= 64) {
139       // Any 32-bit AND operation can by implemented via nilf.
140       if (BitSize <= 32)
141         return TTI::TCC_Free;
142       // 64-bit masks supported by nilf.
143       if (isUInt<32>(~Imm.getZExtValue()))
144         return TTI::TCC_Free;
145       // 64-bit masks supported by nilh.
146       if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
147         return TTI::TCC_Free;
148       // Some 64-bit AND operations can be implemented via risbg.
149       const SystemZInstrInfo *TII = ST->getInstrInfo();
150       unsigned Start, End;
151       if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
152         return TTI::TCC_Free;
153     }
154     break;
155   case Instruction::Shl:
156   case Instruction::LShr:
157   case Instruction::AShr:
158     // Always return TCC_Free for the shift value of a shift instruction.
159     if (Idx == 1)
160       return TTI::TCC_Free;
161     break;
162   case Instruction::UDiv:
163   case Instruction::SDiv:
164   case Instruction::URem:
165   case Instruction::SRem:
166   case Instruction::Trunc:
167   case Instruction::ZExt:
168   case Instruction::SExt:
169   case Instruction::IntToPtr:
170   case Instruction::PtrToInt:
171   case Instruction::BitCast:
172   case Instruction::PHI:
173   case Instruction::Call:
174   case Instruction::Select:
175   case Instruction::Ret:
176   case Instruction::Load:
177     break;
178   }
179 
180   return SystemZTTIImpl::getIntImmCost(Imm, Ty);
181 }
182 
183 int SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
184                                         const APInt &Imm, Type *Ty) {
185   assert(Ty->isIntegerTy());
186 
187   unsigned BitSize = Ty->getPrimitiveSizeInBits();
188   // There is no cost model for constants with a bit size of 0. Return TCC_Free
189   // here, so that constant hoisting will ignore this constant.
190   if (BitSize == 0)
191     return TTI::TCC_Free;
192   // No cost model for operations on integers larger than 64 bit implemented yet.
193   if (BitSize > 64)
194     return TTI::TCC_Free;
195 
196   switch (IID) {
197   default:
198     return TTI::TCC_Free;
199   case Intrinsic::sadd_with_overflow:
200   case Intrinsic::uadd_with_overflow:
201   case Intrinsic::ssub_with_overflow:
202   case Intrinsic::usub_with_overflow:
203     // These get expanded to include a normal addition/subtraction.
204     if (Idx == 1 && Imm.getBitWidth() <= 64) {
205       if (isUInt<32>(Imm.getZExtValue()))
206         return TTI::TCC_Free;
207       if (isUInt<32>(-Imm.getSExtValue()))
208         return TTI::TCC_Free;
209     }
210     break;
211   case Intrinsic::smul_with_overflow:
212   case Intrinsic::umul_with_overflow:
213     // These get expanded to include a normal multiplication.
214     if (Idx == 1 && Imm.getBitWidth() <= 64) {
215       if (isInt<32>(Imm.getSExtValue()))
216         return TTI::TCC_Free;
217     }
218     break;
219   case Intrinsic::experimental_stackmap:
220     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
221       return TTI::TCC_Free;
222     break;
223   case Intrinsic::experimental_patchpoint_void:
224   case Intrinsic::experimental_patchpoint_i64:
225     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
226       return TTI::TCC_Free;
227     break;
228   }
229   return SystemZTTIImpl::getIntImmCost(Imm, Ty);
230 }
231 
232 TargetTransformInfo::PopcntSupportKind
233 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
234   assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
235   if (ST->hasPopulationCount() && TyWidth <= 64)
236     return TTI::PSK_FastHardware;
237   return TTI::PSK_Software;
238 }
239 
240 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
241                                              TTI::UnrollingPreferences &UP) {
242   // Find out if L contains a call, what the machine instruction count
243   // estimate is, and how many stores there are.
244   bool HasCall = false;
245   unsigned NumStores = 0;
246   for (auto &BB : L->blocks())
247     for (auto &I : *BB) {
248       if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
249         ImmutableCallSite CS(&I);
250         if (const Function *F = CS.getCalledFunction()) {
251           if (isLoweredToCall(F))
252             HasCall = true;
253           if (F->getIntrinsicID() == Intrinsic::memcpy ||
254               F->getIntrinsicID() == Intrinsic::memset)
255             NumStores++;
256         } else { // indirect call.
257           HasCall = true;
258         }
259       }
260       if (isa<StoreInst>(&I)) {
261         Type *MemAccessTy = I.getOperand(0)->getType();
262         NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, None, 0);
263       }
264     }
265 
266   // The z13 processor will run out of store tags if too many stores
267   // are fed into it too quickly. Therefore make sure there are not
268   // too many stores in the resulting unrolled loop.
269   unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
270 
271   if (HasCall) {
272     // Only allow full unrolling if loop has any calls.
273     UP.FullUnrollMaxCount = Max;
274     UP.MaxCount = 1;
275     return;
276   }
277 
278   UP.MaxCount = Max;
279   if (UP.MaxCount <= 1)
280     return;
281 
282   // Allow partial and runtime trip count unrolling.
283   UP.Partial = UP.Runtime = true;
284 
285   UP.PartialThreshold = 75;
286   UP.DefaultUnrollRuntimeCount = 4;
287 
288   // Allow expensive instructions in the pre-header of the loop.
289   UP.AllowExpensiveTripCount = true;
290 
291   UP.Force = true;
292 }
293 
294 
295 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
296                                    TargetTransformInfo::LSRCost &C2) {
297   // SystemZ specific: check instruction count (first), and don't care about
298   // ImmCost, since offsets are checked explicitly.
299   return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
300                   C1.NumIVMuls, C1.NumBaseAdds,
301                   C1.ScaleCost, C1.SetupCost) <
302     std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
303              C2.NumIVMuls, C2.NumBaseAdds,
304              C2.ScaleCost, C2.SetupCost);
305 }
306 
307 unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
308   bool Vector = (ClassID == 1);
309   if (!Vector)
310     // Discount the stack pointer.  Also leave out %r0, since it can't
311     // be used in an address.
312     return 14;
313   if (ST->hasVector())
314     return 32;
315   return 0;
316 }
317 
318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
319   if (!Vector)
320     return 64;
321   if (ST->hasVector())
322     return 128;
323   return 0;
324 }
325 
326 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
327   EVT VT = TLI->getValueType(DL, DataType);
328   return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
329 }
330 
331 // Return the bit size for the scalar type or vector element
332 // type. getScalarSizeInBits() returns 0 for a pointer type.
333 static unsigned getScalarSizeInBits(Type *Ty) {
334   unsigned Size =
335     (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
336   assert(Size > 0 && "Element must have non-zero size.");
337   return Size;
338 }
339 
340 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
341 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
342 // 3.
343 static unsigned getNumVectorRegs(Type *Ty) {
344   assert(Ty->isVectorTy() && "Expected vector type");
345   unsigned WideBits = getScalarSizeInBits(Ty) * Ty->getVectorNumElements();
346   assert(WideBits > 0 && "Could not compute size of vector");
347   return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
348 }
349 
350 int SystemZTTIImpl::getArithmeticInstrCost(
351     unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
352     TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
353     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
354     const Instruction *CxtI) {
355 
356   // TODO: return a good value for BB-VECTORIZER that includes the
357   // immediate loads, which we do not want to count for the loop
358   // vectorizer, since they are hopefully hoisted out of the loop. This
359   // would require a new parameter 'InLoop', but not sure if constant
360   // args are common enough to motivate this.
361 
362   unsigned ScalarBits = Ty->getScalarSizeInBits();
363 
364   // There are thre cases of division and remainder: Dividing with a register
365   // needs a divide instruction. A divisor which is a power of two constant
366   // can be implemented with a sequence of shifts. Any other constant needs a
367   // multiply and shifts.
368   const unsigned DivInstrCost = 20;
369   const unsigned DivMulSeqCost = 10;
370   const unsigned SDivPow2Cost = 4;
371 
372   bool SignedDivRem =
373       Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
374   bool UnsignedDivRem =
375       Opcode == Instruction::UDiv || Opcode == Instruction::URem;
376 
377   // Check for a constant divisor.
378   bool DivRemConst = false;
379   bool DivRemConstPow2 = false;
380   if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
381     if (const Constant *C = dyn_cast<Constant>(Args[1])) {
382       const ConstantInt *CVal =
383           (C->getType()->isVectorTy()
384                ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
385                : dyn_cast<const ConstantInt>(C));
386       if (CVal != nullptr &&
387           (CVal->getValue().isPowerOf2() || (-CVal->getValue()).isPowerOf2()))
388         DivRemConstPow2 = true;
389       else
390         DivRemConst = true;
391     }
392   }
393 
394   if (!Ty->isVectorTy()) {
395     // These FP operations are supported with a dedicated instruction for
396     // float, double and fp128 (base implementation assumes float generally
397     // costs 2).
398     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
399         Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
400       return 1;
401 
402     // There is no native support for FRem.
403     if (Opcode == Instruction::FRem)
404       return LIBCALL_COST;
405 
406     // Give discount for some combined logical operations if supported.
407     if (Args.size() == 2 && ST->hasMiscellaneousExtensions3()) {
408       if (Opcode == Instruction::Xor) {
409         for (const Value *A : Args) {
410           if (const Instruction *I = dyn_cast<Instruction>(A))
411             if (I->hasOneUse() &&
412                 (I->getOpcode() == Instruction::And ||
413                  I->getOpcode() == Instruction::Or ||
414                  I->getOpcode() == Instruction::Xor))
415               return 0;
416         }
417       }
418       else if (Opcode == Instruction::Or || Opcode == Instruction::And) {
419         for (const Value *A : Args) {
420           if (const Instruction *I = dyn_cast<Instruction>(A))
421             if (I->hasOneUse() && I->getOpcode() == Instruction::Xor)
422               return 0;
423         }
424       }
425     }
426 
427     // Or requires one instruction, although it has custom handling for i64.
428     if (Opcode == Instruction::Or)
429       return 1;
430 
431     if (Opcode == Instruction::Xor && ScalarBits == 1) {
432       if (ST->hasLoadStoreOnCond2())
433         return 5; // 2 * (li 0; loc 1); xor
434       return 7; // 2 * ipm sequences ; xor ; shift ; compare
435     }
436 
437     if (DivRemConstPow2)
438       return (SignedDivRem ? SDivPow2Cost : 1);
439     if (DivRemConst)
440       return DivMulSeqCost;
441     if (SignedDivRem || UnsignedDivRem)
442       return DivInstrCost;
443   }
444   else if (ST->hasVector()) {
445     unsigned VF = Ty->getVectorNumElements();
446     unsigned NumVectors = getNumVectorRegs(Ty);
447 
448     // These vector operations are custom handled, but are still supported
449     // with one instruction per vector, regardless of element size.
450     if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
451         Opcode == Instruction::AShr) {
452       return NumVectors;
453     }
454 
455     if (DivRemConstPow2)
456       return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
457     if (DivRemConst)
458       return VF * DivMulSeqCost + getScalarizationOverhead(Ty, Args);
459     if ((SignedDivRem || UnsignedDivRem) && VF > 4)
460       // Temporary hack: disable high vectorization factors with integer
461       // division/remainder, which will get scalarized and handled with
462       // GR128 registers. The mischeduler is not clever enough to avoid
463       // spilling yet.
464       return 1000;
465 
466     // These FP operations are supported with a single vector instruction for
467     // double (base implementation assumes float generally costs 2). For
468     // FP128, the scalar cost is 1, and there is no overhead since the values
469     // are already in scalar registers.
470     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
471         Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
472       switch (ScalarBits) {
473       case 32: {
474         // The vector enhancements facility 1 provides v4f32 instructions.
475         if (ST->hasVectorEnhancements1())
476           return NumVectors;
477         // Return the cost of multiple scalar invocation plus the cost of
478         // inserting and extracting the values.
479         unsigned ScalarCost =
480             getArithmeticInstrCost(Opcode, Ty->getScalarType());
481         unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args);
482         // FIXME: VF 2 for these FP operations are currently just as
483         // expensive as for VF 4.
484         if (VF == 2)
485           Cost *= 2;
486         return Cost;
487       }
488       case 64:
489       case 128:
490         return NumVectors;
491       default:
492         break;
493       }
494     }
495 
496     // There is no native support for FRem.
497     if (Opcode == Instruction::FRem) {
498       unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args);
499       // FIXME: VF 2 for float is currently just as expensive as for VF 4.
500       if (VF == 2 && ScalarBits == 32)
501         Cost *= 2;
502       return Cost;
503     }
504   }
505 
506   // Fallback to the default implementation.
507   return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
508                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
509 }
510 
511 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
512                                    Type *SubTp) {
513   assert (Tp->isVectorTy());
514   if (ST->hasVector()) {
515     unsigned NumVectors = getNumVectorRegs(Tp);
516 
517     // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
518 
519     // FP128 values are always in scalar registers, so there is no work
520     // involved with a shuffle, except for broadcast. In that case register
521     // moves are done with a single instruction per element.
522     if (Tp->getScalarType()->isFP128Ty())
523       return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
524 
525     switch (Kind) {
526     case  TargetTransformInfo::SK_ExtractSubvector:
527       // ExtractSubvector Index indicates start offset.
528 
529       // Extracting a subvector from first index is a noop.
530       return (Index == 0 ? 0 : NumVectors);
531 
532     case TargetTransformInfo::SK_Broadcast:
533       // Loop vectorizer calls here to figure out the extra cost of
534       // broadcasting a loaded value to all elements of a vector. Since vlrep
535       // loads and replicates with a single instruction, adjust the returned
536       // value.
537       return NumVectors - 1;
538 
539     default:
540 
541       // SystemZ supports single instruction permutation / replication.
542       return NumVectors;
543     }
544   }
545 
546   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
547 }
548 
549 // Return the log2 difference of the element sizes of the two vector types.
550 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
551   unsigned Bits0 = Ty0->getScalarSizeInBits();
552   unsigned Bits1 = Ty1->getScalarSizeInBits();
553 
554   if (Bits1 >  Bits0)
555     return (Log2_32(Bits1) - Log2_32(Bits0));
556 
557   return (Log2_32(Bits0) - Log2_32(Bits1));
558 }
559 
560 // Return the number of instructions needed to truncate SrcTy to DstTy.
561 unsigned SystemZTTIImpl::
562 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
563   assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
564   assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
565           "Packing must reduce size of vector type.");
566   assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() &&
567           "Packing should not change number of elements.");
568 
569   // TODO: Since fp32 is expanded, the extract cost should always be 0.
570 
571   unsigned NumParts = getNumVectorRegs(SrcTy);
572   if (NumParts <= 2)
573     // Up to 2 vector registers can be truncated efficiently with pack or
574     // permute. The latter requires an immediate mask to be loaded, which
575     // typically gets hoisted out of a loop.  TODO: return a good value for
576     // BB-VECTORIZER that includes the immediate loads, which we do not want
577     // to count for the loop vectorizer.
578     return 1;
579 
580   unsigned Cost = 0;
581   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
582   unsigned VF = SrcTy->getVectorNumElements();
583   for (unsigned P = 0; P < Log2Diff; ++P) {
584     if (NumParts > 1)
585       NumParts /= 2;
586     Cost += NumParts;
587   }
588 
589   // Currently, a general mix of permutes and pack instructions is output by
590   // isel, which follow the cost computation above except for this case which
591   // is one instruction less:
592   if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
593       DstTy->getScalarSizeInBits() == 8)
594     Cost--;
595 
596   return Cost;
597 }
598 
599 // Return the cost of converting a vector bitmask produced by a compare
600 // (SrcTy), to the type of the select or extend instruction (DstTy).
601 unsigned SystemZTTIImpl::
602 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
603   assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
604           "Should only be called with vector types.");
605 
606   unsigned PackCost = 0;
607   unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
608   unsigned DstScalarBits = DstTy->getScalarSizeInBits();
609   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
610   if (SrcScalarBits > DstScalarBits)
611     // The bitmask will be truncated.
612     PackCost = getVectorTruncCost(SrcTy, DstTy);
613   else if (SrcScalarBits < DstScalarBits) {
614     unsigned DstNumParts = getNumVectorRegs(DstTy);
615     // Each vector select needs its part of the bitmask unpacked.
616     PackCost = Log2Diff * DstNumParts;
617     // Extra cost for moving part of mask before unpacking.
618     PackCost += DstNumParts - 1;
619   }
620 
621   return PackCost;
622 }
623 
624 // Return the type of the compared operands. This is needed to compute the
625 // cost for a Select / ZExt or SExt instruction.
626 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
627   Type *OpTy = nullptr;
628   if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
629     OpTy = CI->getOperand(0)->getType();
630   else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
631     if (LogicI->getNumOperands() == 2)
632       if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
633         if (isa<CmpInst>(LogicI->getOperand(1)))
634           OpTy = CI0->getOperand(0)->getType();
635 
636   if (OpTy != nullptr) {
637     if (VF == 1) {
638       assert (!OpTy->isVectorTy() && "Expected scalar type");
639       return OpTy;
640     }
641     // Return the potentially vectorized type based on 'I' and 'VF'.  'I' may
642     // be either scalar or already vectorized with a same or lesser VF.
643     Type *ElTy = OpTy->getScalarType();
644     return VectorType::get(ElTy, VF);
645   }
646 
647   return nullptr;
648 }
649 
650 // Get the cost of converting a boolean vector to a vector with same width
651 // and element size as Dst, plus the cost of zero extending if needed.
652 unsigned SystemZTTIImpl::
653 getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
654                               const Instruction *I) {
655   assert (Dst->isVectorTy());
656   unsigned VF = Dst->getVectorNumElements();
657   unsigned Cost = 0;
658   // If we know what the widths of the compared operands, get any cost of
659   // converting it to match Dst. Otherwise assume same widths.
660   Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
661   if (CmpOpTy != nullptr)
662     Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
663   if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
664     // One 'vn' per dst vector with an immediate mask.
665     Cost += getNumVectorRegs(Dst);
666   return Cost;
667 }
668 
669 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
670                                      const Instruction *I) {
671   unsigned DstScalarBits = Dst->getScalarSizeInBits();
672   unsigned SrcScalarBits = Src->getScalarSizeInBits();
673 
674   if (!Src->isVectorTy()) {
675     assert (!Dst->isVectorTy());
676 
677     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
678       if (SrcScalarBits >= 32 ||
679           (I != nullptr && isa<LoadInst>(I->getOperand(0))))
680         return 1;
681       return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
682     }
683 
684     if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
685         Src->isIntegerTy(1)) {
686       if (ST->hasLoadStoreOnCond2())
687         return 2; // li 0; loc 1
688 
689       // This should be extension of a compare i1 result, which is done with
690       // ipm and a varying sequence of instructions.
691       unsigned Cost = 0;
692       if (Opcode == Instruction::SExt)
693         Cost = (DstScalarBits < 64 ? 3 : 4);
694       if (Opcode == Instruction::ZExt)
695         Cost = 3;
696       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
697       if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
698         // If operands of an fp-type was compared, this costs +1.
699         Cost++;
700       return Cost;
701     }
702   }
703   else if (ST->hasVector()) {
704     assert (Dst->isVectorTy());
705     unsigned VF = Src->getVectorNumElements();
706     unsigned NumDstVectors = getNumVectorRegs(Dst);
707     unsigned NumSrcVectors = getNumVectorRegs(Src);
708 
709     if (Opcode == Instruction::Trunc) {
710       if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
711         return 0; // Check for NOOP conversions.
712       return getVectorTruncCost(Src, Dst);
713     }
714 
715     if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
716       if (SrcScalarBits >= 8) {
717         // ZExt/SExt will be handled with one unpack per doubling of width.
718         unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
719 
720         // For types that spans multiple vector registers, some additional
721         // instructions are used to setup the unpacking.
722         unsigned NumSrcVectorOps =
723           (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
724                           : (NumDstVectors / 2));
725 
726         return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
727       }
728       else if (SrcScalarBits == 1)
729         return getBoolVecToIntConversionCost(Opcode, Dst, I);
730     }
731 
732     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
733         Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
734       // TODO: Fix base implementation which could simplify things a bit here
735       // (seems to miss on differentiating on scalar/vector types).
736 
737       // Only 64 bit vector conversions are natively supported before z15.
738       if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
739         if (SrcScalarBits == DstScalarBits)
740           return NumDstVectors;
741 
742         if (SrcScalarBits == 1)
743           return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
744       }
745 
746       // Return the cost of multiple scalar invocation plus the cost of
747       // inserting and extracting the values. Base implementation does not
748       // realize float->int gets scalarized.
749       unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
750                                              Src->getScalarType());
751       unsigned TotCost = VF * ScalarCost;
752       bool NeedsInserts = true, NeedsExtracts = true;
753       // FP128 registers do not get inserted or extracted.
754       if (DstScalarBits == 128 &&
755           (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
756         NeedsInserts = false;
757       if (SrcScalarBits == 128 &&
758           (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
759         NeedsExtracts = false;
760 
761       TotCost += getScalarizationOverhead(Src, false, NeedsExtracts);
762       TotCost += getScalarizationOverhead(Dst, NeedsInserts, false);
763 
764       // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
765       if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
766         TotCost *= 2;
767 
768       return TotCost;
769     }
770 
771     if (Opcode == Instruction::FPTrunc) {
772       if (SrcScalarBits == 128)  // fp128 -> double/float + inserts of elements.
773         return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false);
774       else // double -> float
775         return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
776     }
777 
778     if (Opcode == Instruction::FPExt) {
779       if (SrcScalarBits == 32 && DstScalarBits == 64) {
780         // float -> double is very rare and currently unoptimized. Instead of
781         // using vldeb, which can do two at a time, all conversions are
782         // scalarized.
783         return VF * 2;
784       }
785       // -> fp128.  VF * lxdb/lxeb + extraction of elements.
786       return VF + getScalarizationOverhead(Src, false, true);
787     }
788   }
789 
790   return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
791 }
792 
793 // Scalar i8 / i16 operations will typically be made after first extending
794 // the operands to i32.
795 static unsigned getOperandsExtensionCost(const Instruction *I) {
796   unsigned ExtCost = 0;
797   for (Value *Op : I->operands())
798     // A load of i8 or i16 sign/zero extends to i32.
799     if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
800       ExtCost++;
801 
802   return ExtCost;
803 }
804 
805 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
806                                        Type *CondTy, const Instruction *I) {
807   if (!ValTy->isVectorTy()) {
808     switch (Opcode) {
809     case Instruction::ICmp: {
810       // A loaded value compared with 0 with multiple users becomes Load and
811       // Test. The load is then not foldable, so return 0 cost for the ICmp.
812       unsigned ScalarBits = ValTy->getScalarSizeInBits();
813       if (I != nullptr && ScalarBits >= 32)
814         if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
815           if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
816             if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
817                 C->getZExtValue() == 0)
818               return 0;
819 
820       unsigned Cost = 1;
821       if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
822         Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
823       return Cost;
824     }
825     case Instruction::Select:
826       if (ValTy->isFloatingPointTy())
827         return 4; // No load on condition for FP - costs a conditional jump.
828       return 1; // Load On Condition / Select Register.
829     }
830   }
831   else if (ST->hasVector()) {
832     unsigned VF = ValTy->getVectorNumElements();
833 
834     // Called with a compare instruction.
835     if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
836       unsigned PredicateExtraCost = 0;
837       if (I != nullptr) {
838         // Some predicates cost one or two extra instructions.
839         switch (cast<CmpInst>(I)->getPredicate()) {
840         case CmpInst::Predicate::ICMP_NE:
841         case CmpInst::Predicate::ICMP_UGE:
842         case CmpInst::Predicate::ICMP_ULE:
843         case CmpInst::Predicate::ICMP_SGE:
844         case CmpInst::Predicate::ICMP_SLE:
845           PredicateExtraCost = 1;
846           break;
847         case CmpInst::Predicate::FCMP_ONE:
848         case CmpInst::Predicate::FCMP_ORD:
849         case CmpInst::Predicate::FCMP_UEQ:
850         case CmpInst::Predicate::FCMP_UNO:
851           PredicateExtraCost = 2;
852           break;
853         default:
854           break;
855         }
856       }
857 
858       // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
859       // floats.  FIXME: <2 x float> generates same code as <4 x float>.
860       unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
861       unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
862 
863       unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
864       return Cost;
865     }
866     else { // Called with a select instruction.
867       assert (Opcode == Instruction::Select);
868 
869       // We can figure out the extra cost of packing / unpacking if the
870       // instruction was passed and the compare instruction is found.
871       unsigned PackCost = 0;
872       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
873       if (CmpOpTy != nullptr)
874         PackCost =
875           getVectorBitmaskConversionCost(CmpOpTy, ValTy);
876 
877       return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
878     }
879   }
880 
881   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr);
882 }
883 
884 int SystemZTTIImpl::
885 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
886   // vlvgp will insert two grs into a vector register, so only count half the
887   // number of instructions.
888   if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
889     return ((Index % 2 == 0) ? 1 : 0);
890 
891   if (Opcode == Instruction::ExtractElement) {
892     int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
893 
894     // Give a slight penalty for moving out of vector pipeline to FXU unit.
895     if (Index == 0 && Val->isIntOrIntVectorTy())
896       Cost += 1;
897 
898     return Cost;
899   }
900 
901   return BaseT::getVectorInstrCost(Opcode, Val, Index);
902 }
903 
904 // Check if a load may be folded as a memory operand in its user.
905 bool SystemZTTIImpl::
906 isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
907   if (!Ld->hasOneUse())
908     return false;
909   FoldedValue = Ld;
910   const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
911   unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
912   unsigned TruncBits = 0;
913   unsigned SExtBits = 0;
914   unsigned ZExtBits = 0;
915   if (UserI->hasOneUse()) {
916     unsigned UserBits = UserI->getType()->getScalarSizeInBits();
917     if (isa<TruncInst>(UserI))
918       TruncBits = UserBits;
919     else if (isa<SExtInst>(UserI))
920       SExtBits = UserBits;
921     else if (isa<ZExtInst>(UserI))
922       ZExtBits = UserBits;
923   }
924   if (TruncBits || SExtBits || ZExtBits) {
925     FoldedValue = UserI;
926     UserI = cast<Instruction>(*UserI->user_begin());
927     // Load (single use) -> trunc/extend (single use) -> UserI
928   }
929   if ((UserI->getOpcode() == Instruction::Sub ||
930        UserI->getOpcode() == Instruction::SDiv ||
931        UserI->getOpcode() == Instruction::UDiv) &&
932       UserI->getOperand(1) != FoldedValue)
933     return false; // Not commutative, only RHS foldable.
934   // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
935   // extension was made of the load.
936   unsigned LoadOrTruncBits =
937       ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
938   switch (UserI->getOpcode()) {
939   case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
940   case Instruction::Sub:
941   case Instruction::ICmp:
942     if (LoadedBits == 32 && ZExtBits == 64)
943       return true;
944     LLVM_FALLTHROUGH;
945   case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
946     if (UserI->getOpcode() != Instruction::ICmp) {
947       if (LoadedBits == 16 &&
948           (SExtBits == 32 ||
949            (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
950         return true;
951       if (LoadOrTruncBits == 16)
952         return true;
953     }
954     LLVM_FALLTHROUGH;
955   case Instruction::SDiv:// SE: 32->64
956     if (LoadedBits == 32 && SExtBits == 64)
957       return true;
958     LLVM_FALLTHROUGH;
959   case Instruction::UDiv:
960   case Instruction::And:
961   case Instruction::Or:
962   case Instruction::Xor:
963     // This also makes sense for float operations, but disabled for now due
964     // to regressions.
965     // case Instruction::FCmp:
966     // case Instruction::FAdd:
967     // case Instruction::FSub:
968     // case Instruction::FMul:
969     // case Instruction::FDiv:
970 
971     // All possible extensions of memory checked above.
972 
973     // Comparison between memory and immediate.
974     if (UserI->getOpcode() == Instruction::ICmp)
975       if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
976         if (isUInt<16>(CI->getZExtValue()))
977           return true;
978     return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
979     break;
980   }
981   return false;
982 }
983 
984 static bool isBswapIntrinsicCall(const Value *V) {
985   if (const Instruction *I = dyn_cast<Instruction>(V))
986     if (auto *CI = dyn_cast<CallInst>(I))
987       if (auto *F = CI->getCalledFunction())
988         if (F->getIntrinsicID() == Intrinsic::bswap)
989           return true;
990   return false;
991 }
992 
993 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
994                                     MaybeAlign Alignment, unsigned AddressSpace,
995                                     const Instruction *I) {
996   assert(!Src->isVoidTy() && "Invalid type");
997 
998   if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
999     // Store the load or its truncated or extended value in FoldedValue.
1000     const Instruction *FoldedValue = nullptr;
1001     if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
1002       const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
1003       assert (UserI->getNumOperands() == 2 && "Expected a binop.");
1004 
1005       // UserI can't fold two loads, so in that case return 0 cost only
1006       // half of the time.
1007       for (unsigned i = 0; i < 2; ++i) {
1008         if (UserI->getOperand(i) == FoldedValue)
1009           continue;
1010 
1011         if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
1012           LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
1013           if (!OtherLoad &&
1014               (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
1015                isa<ZExtInst>(OtherOp)))
1016             OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1017           if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1018             return i == 0; // Both operands foldable.
1019         }
1020       }
1021 
1022       return 0; // Only I is foldable in user.
1023     }
1024   }
1025 
1026   unsigned NumOps =
1027     (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1028 
1029   // Store/Load reversed saves one instruction.
1030   if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
1031       I != nullptr) {
1032     if (Opcode == Instruction::Load && I->hasOneUse()) {
1033       const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1034       // In case of load -> bswap -> store, return normal cost for the load.
1035       if (isBswapIntrinsicCall(LdUser) &&
1036           (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1037         return 0;
1038     }
1039     else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1040       const Value *StoredVal = SI->getValueOperand();
1041       if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1042         return 0;
1043     }
1044   }
1045 
1046   if (Src->getScalarSizeInBits() == 128)
1047     // 128 bit scalars are held in a pair of two 64 bit registers.
1048     NumOps *= 2;
1049 
1050   return  NumOps;
1051 }
1052 
1053 // The generic implementation of getInterleavedMemoryOpCost() is based on
1054 // adding costs of the memory operations plus all the extracts and inserts
1055 // needed for using / defining the vector operands. The SystemZ version does
1056 // roughly the same but bases the computations on vector permutations
1057 // instead.
1058 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
1059                                                unsigned Factor,
1060                                                ArrayRef<unsigned> Indices,
1061                                                unsigned Alignment,
1062                                                unsigned AddressSpace,
1063                                                bool UseMaskForCond,
1064                                                bool UseMaskForGaps) {
1065   if (UseMaskForCond || UseMaskForGaps)
1066     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1067                                              Alignment, AddressSpace,
1068                                              UseMaskForCond, UseMaskForGaps);
1069   assert(isa<VectorType>(VecTy) &&
1070          "Expect a vector type for interleaved memory op");
1071 
1072   // Return the ceiling of dividing A by B.
1073   auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1074 
1075   unsigned NumElts = VecTy->getVectorNumElements();
1076   assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1077   unsigned VF = NumElts / Factor;
1078   unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1079   unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1080   unsigned NumPermutes = 0;
1081 
1082   if (Opcode == Instruction::Load) {
1083     // Loading interleave groups may have gaps, which may mean fewer
1084     // loads. Find out how many vectors will be loaded in total, and in how
1085     // many of them each value will be in.
1086     BitVector UsedInsts(NumVectorMemOps, false);
1087     std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1088     for (unsigned Index : Indices)
1089       for (unsigned Elt = 0; Elt < VF; ++Elt) {
1090         unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1091         UsedInsts.set(Vec);
1092         ValueVecs[Index].set(Vec);
1093       }
1094     NumVectorMemOps = UsedInsts.count();
1095 
1096     for (unsigned Index : Indices) {
1097       // Estimate that each loaded source vector containing this Index
1098       // requires one operation, except that vperm can handle two input
1099       // registers first time for each dst vector.
1100       unsigned NumSrcVecs = ValueVecs[Index].count();
1101       unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
1102       assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1103       NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1104     }
1105   } else {
1106     // Estimate the permutes for each stored vector as the smaller of the
1107     // number of elements and the number of source vectors. Subtract one per
1108     // dst vector for vperm (S.A.).
1109     unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1110     unsigned NumDstVecs = NumVectorMemOps;
1111     assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
1112     NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1113   }
1114 
1115   // Cost of load/store operations and the permutations needed.
1116   return NumVectorMemOps + NumPermutes;
1117 }
1118 
1119 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
1120   if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1121     return getNumVectorRegs(RetTy); // VPERM
1122   return -1;
1123 }
1124 
1125 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1126                                           ArrayRef<Value *> Args,
1127                                           FastMathFlags FMF, unsigned VF) {
1128   int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1129   if (Cost != -1)
1130     return Cost;
1131   return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
1132 }
1133 
1134 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1135                                           ArrayRef<Type *> Tys,
1136                                           FastMathFlags FMF,
1137                                           unsigned ScalarizationCostPassed) {
1138   int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1139   if (Cost != -1)
1140     return Cost;
1141   return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys,
1142                                       FMF, ScalarizationCostPassed);
1143 }
1144