1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 static cl::opt<bool> EnablePPCPCRelTLS(
125     "enable-ppc-pcrel-tls",
126     cl::desc("enable the use of PC relative memops in TLS instructions on PPC"),
127     cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
155     } else {
156       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
157       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
158     }
159   }
160 
161   // Match BITREVERSE to customized fast code sequence in the td file.
162   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
163   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
164 
165   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
166   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
167 
168   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
169   for (MVT VT : MVT::integer_valuetypes()) {
170     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
172   }
173 
174   if (Subtarget.isISA3_0()) {
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
176     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
177     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
179   } else {
180     // No extending loads from f16 or HW conversions back and forth.
181     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
182     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
183     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
184     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
185     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
186     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
187     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
188     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
189   }
190 
191   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 
193   // PowerPC has pre-inc load and store's.
194   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
199   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
204   if (!Subtarget.hasSPE()) {
205     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
207     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
209   }
210 
211   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
212   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
213   for (MVT VT : ScalarIntVTs) {
214     setOperationAction(ISD::ADDC, VT, Legal);
215     setOperationAction(ISD::ADDE, VT, Legal);
216     setOperationAction(ISD::SUBC, VT, Legal);
217     setOperationAction(ISD::SUBE, VT, Legal);
218   }
219 
220   if (Subtarget.useCRBits()) {
221     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
222 
223     if (isPPC64 || Subtarget.hasFPCVT()) {
224       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
225       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
226                         isPPC64 ? MVT::i64 : MVT::i32);
227       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
228       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
229                         isPPC64 ? MVT::i64 : MVT::i32);
230 
231       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
232       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
233                          isPPC64 ? MVT::i64 : MVT::i32);
234       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
235       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
236                         isPPC64 ? MVT::i64 : MVT::i32);
237     } else {
238       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
239       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
240       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
241       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
242     }
243 
244     // PowerPC does not support direct load/store of condition registers.
245     setOperationAction(ISD::LOAD, MVT::i1, Custom);
246     setOperationAction(ISD::STORE, MVT::i1, Custom);
247 
248     // FIXME: Remove this once the ANDI glue bug is fixed:
249     if (ANDIGlueBug)
250       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
251 
252     for (MVT VT : MVT::integer_valuetypes()) {
253       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
254       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
255       setTruncStoreAction(VT, MVT::i1, Expand);
256     }
257 
258     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
259   }
260 
261   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
262   // PPC (the libcall is not available).
263   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
264   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
265 
266   // We do not currently implement these libm ops for PowerPC.
267   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
268   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
269   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
270   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
271   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
272   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
273 
274   // PowerPC has no SREM/UREM instructions unless we are on P9
275   // On P9 we may use a hardware instruction to compute the remainder.
276   // When the result of both the remainder and the division is required it is
277   // more efficient to compute the remainder from the result of the division
278   // rather than use the remainder instruction. The instructions are legalized
279   // directly because the DivRemPairsPass performs the transformation at the IR
280   // level.
281   if (Subtarget.isISA3_0()) {
282     setOperationAction(ISD::SREM, MVT::i32, Legal);
283     setOperationAction(ISD::UREM, MVT::i32, Legal);
284     setOperationAction(ISD::SREM, MVT::i64, Legal);
285     setOperationAction(ISD::UREM, MVT::i64, Legal);
286   } else {
287     setOperationAction(ISD::SREM, MVT::i32, Expand);
288     setOperationAction(ISD::UREM, MVT::i32, Expand);
289     setOperationAction(ISD::SREM, MVT::i64, Expand);
290     setOperationAction(ISD::UREM, MVT::i64, Expand);
291   }
292 
293   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
294   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
295   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
296   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
297   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
298   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
299   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
300   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
301   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
302 
303   // Handle constrained floating-point operations of scalar.
304   // TODO: Handle SPE specific operation.
305   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
306   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
307   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
308   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
309   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
310   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
311 
312   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
313   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
314   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
315   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
316   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
317   if (Subtarget.hasVSX())
318     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
319 
320   if (Subtarget.hasFSQRT()) {
321     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
322     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
323   }
324 
325   if (Subtarget.hasFPRND()) {
326     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
327     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
328     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
329     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
330 
331     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
332     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
333     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
334     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
335   }
336 
337   // We don't support sin/cos/sqrt/fmod/pow
338   setOperationAction(ISD::FSIN , MVT::f64, Expand);
339   setOperationAction(ISD::FCOS , MVT::f64, Expand);
340   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
341   setOperationAction(ISD::FREM , MVT::f64, Expand);
342   setOperationAction(ISD::FPOW , MVT::f64, Expand);
343   setOperationAction(ISD::FSIN , MVT::f32, Expand);
344   setOperationAction(ISD::FCOS , MVT::f32, Expand);
345   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
346   setOperationAction(ISD::FREM , MVT::f32, Expand);
347   setOperationAction(ISD::FPOW , MVT::f32, Expand);
348   if (Subtarget.hasSPE()) {
349     setOperationAction(ISD::FMA  , MVT::f64, Expand);
350     setOperationAction(ISD::FMA  , MVT::f32, Expand);
351   } else {
352     setOperationAction(ISD::FMA  , MVT::f64, Legal);
353     setOperationAction(ISD::FMA  , MVT::f32, Legal);
354   }
355 
356   if (Subtarget.hasSPE())
357     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
358 
359   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
360 
361   // If we're enabling GP optimizations, use hardware square root
362   if (!Subtarget.hasFSQRT() &&
363       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
364         Subtarget.hasFRE()))
365     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
366 
367   if (!Subtarget.hasFSQRT() &&
368       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
369         Subtarget.hasFRES()))
370     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
371 
372   if (Subtarget.hasFCPSGN()) {
373     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
374     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
375   } else {
376     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
377     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
378   }
379 
380   if (Subtarget.hasFPRND()) {
381     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
382     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
383     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
384     setOperationAction(ISD::FROUND, MVT::f64, Legal);
385 
386     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
387     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
388     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
389     setOperationAction(ISD::FROUND, MVT::f32, Legal);
390   }
391 
392   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
393   // to speed up scalar BSWAP64.
394   // CTPOP or CTTZ were introduced in P8/P9 respectively
395   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
396   if (Subtarget.hasP9Vector())
397     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
398   else
399     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
400   if (Subtarget.isISA3_0()) {
401     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
402     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
403   } else {
404     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
405     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
406   }
407 
408   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
409     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
410     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
411   } else {
412     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
413     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
414   }
415 
416   // PowerPC does not have ROTR
417   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
418   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
419 
420   if (!Subtarget.useCRBits()) {
421     // PowerPC does not have Select
422     setOperationAction(ISD::SELECT, MVT::i32, Expand);
423     setOperationAction(ISD::SELECT, MVT::i64, Expand);
424     setOperationAction(ISD::SELECT, MVT::f32, Expand);
425     setOperationAction(ISD::SELECT, MVT::f64, Expand);
426   }
427 
428   // PowerPC wants to turn select_cc of FP into fsel when possible.
429   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
430   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
431 
432   // PowerPC wants to optimize integer setcc a bit
433   if (!Subtarget.useCRBits())
434     setOperationAction(ISD::SETCC, MVT::i32, Custom);
435 
436   if (Subtarget.hasFPU()) {
437     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
438     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
439     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
440 
441     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
442     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
443     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
444   }
445 
446   // PowerPC does not have BRCOND which requires SetCC
447   if (!Subtarget.useCRBits())
448     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
449 
450   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
451 
452   if (Subtarget.hasSPE()) {
453     // SPE has built-in conversions
454     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
455     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
456     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
457     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
458     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
459     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
460   } else {
461     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
462     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
463     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
464 
465     // PowerPC does not have [U|S]INT_TO_FP
466     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
467     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
468     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
469     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
470   }
471 
472   if (Subtarget.hasDirectMove() && isPPC64) {
473     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
474     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
475     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
476     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
477     if (TM.Options.UnsafeFPMath) {
478       setOperationAction(ISD::LRINT, MVT::f64, Legal);
479       setOperationAction(ISD::LRINT, MVT::f32, Legal);
480       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
481       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
482       setOperationAction(ISD::LROUND, MVT::f64, Legal);
483       setOperationAction(ISD::LROUND, MVT::f32, Legal);
484       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
485       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
486     }
487   } else {
488     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
489     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
490     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
491     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
492   }
493 
494   // We cannot sextinreg(i1).  Expand to shifts.
495   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
496 
497   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
498   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
499   // support continuation, user-level threading, and etc.. As a result, no
500   // other SjLj exception interfaces are implemented and please don't build
501   // your own exception handling based on them.
502   // LLVM/Clang supports zero-cost DWARF exception handling.
503   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
504   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
505 
506   // We want to legalize GlobalAddress and ConstantPool nodes into the
507   // appropriate instructions to materialize the address.
508   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
509   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
510   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
511   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
512   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
513   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
514   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
515   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
516   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
517   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
518 
519   // TRAP is legal.
520   setOperationAction(ISD::TRAP, MVT::Other, Legal);
521 
522   // TRAMPOLINE is custom lowered.
523   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
524   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
525 
526   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
527   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
528 
529   if (Subtarget.is64BitELFABI()) {
530     // VAARG always uses double-word chunks, so promote anything smaller.
531     setOperationAction(ISD::VAARG, MVT::i1, Promote);
532     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
533     setOperationAction(ISD::VAARG, MVT::i8, Promote);
534     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
535     setOperationAction(ISD::VAARG, MVT::i16, Promote);
536     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
537     setOperationAction(ISD::VAARG, MVT::i32, Promote);
538     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
539     setOperationAction(ISD::VAARG, MVT::Other, Expand);
540   } else if (Subtarget.is32BitELFABI()) {
541     // VAARG is custom lowered with the 32-bit SVR4 ABI.
542     setOperationAction(ISD::VAARG, MVT::Other, Custom);
543     setOperationAction(ISD::VAARG, MVT::i64, Custom);
544   } else
545     setOperationAction(ISD::VAARG, MVT::Other, Expand);
546 
547   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
548   if (Subtarget.is32BitELFABI())
549     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
550   else
551     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
552 
553   // Use the default implementation.
554   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
555   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
556   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
557   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
558   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
559   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
560   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
561   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
562   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
563 
564   // We want to custom lower some of our intrinsics.
565   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
566 
567   // To handle counter-based loop conditions.
568   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
569 
570   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
571   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
572   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
573   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
574 
575   // Comparisons that require checking two conditions.
576   if (Subtarget.hasSPE()) {
577     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
578     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
579     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
580     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
581   }
582   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
583   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
584   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
585   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
586   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
587   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
588   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
589   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
590   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
591   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
592   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
593   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
594 
595   if (Subtarget.has64BitSupport()) {
596     // They also have instructions for converting between i64 and fp.
597     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
598     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
599     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
600     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
601     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
602     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
603     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
604     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
605     // This is just the low 32 bits of a (signed) fp->i64 conversion.
606     // We cannot do this with Promote because i64 is not a legal type.
607     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
608     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
609 
610     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
611       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
612       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
613     }
614   } else {
615     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
616     if (Subtarget.hasSPE()) {
617       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
618       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
619     } else {
620       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
621       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
622     }
623   }
624 
625   // With the instructions enabled under FPCVT, we can do everything.
626   if (Subtarget.hasFPCVT()) {
627     if (Subtarget.has64BitSupport()) {
628       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
629       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
630       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
631       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
632       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
633       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
634       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
635       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
636     }
637 
638     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
639     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
640     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
641     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
642     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
643     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
644     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
645     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
646   }
647 
648   if (Subtarget.use64BitRegs()) {
649     // 64-bit PowerPC implementations can support i64 types directly
650     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
651     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
652     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
653     // 64-bit PowerPC wants to expand i128 shifts itself.
654     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
655     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
656     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
657   } else {
658     // 32-bit PowerPC wants to expand i64 shifts itself.
659     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
660     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
661     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
662   }
663 
664   // PowerPC has better expansions for funnel shifts than the generic
665   // TargetLowering::expandFunnelShift.
666   if (Subtarget.has64BitSupport()) {
667     setOperationAction(ISD::FSHL, MVT::i64, Custom);
668     setOperationAction(ISD::FSHR, MVT::i64, Custom);
669   }
670   setOperationAction(ISD::FSHL, MVT::i32, Custom);
671   setOperationAction(ISD::FSHR, MVT::i32, Custom);
672 
673   if (Subtarget.hasVSX()) {
674     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
675     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
676     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
677     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
678   }
679 
680   if (Subtarget.hasAltivec()) {
681     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
682       setOperationAction(ISD::SADDSAT, VT, Legal);
683       setOperationAction(ISD::SSUBSAT, VT, Legal);
684       setOperationAction(ISD::UADDSAT, VT, Legal);
685       setOperationAction(ISD::USUBSAT, VT, Legal);
686     }
687     // First set operation action for all vector types to expand. Then we
688     // will selectively turn on ones that can be effectively codegen'd.
689     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
690       // add/sub are legal for all supported vector VT's.
691       setOperationAction(ISD::ADD, VT, Legal);
692       setOperationAction(ISD::SUB, VT, Legal);
693 
694       // For v2i64, these are only valid with P8Vector. This is corrected after
695       // the loop.
696       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
697         setOperationAction(ISD::SMAX, VT, Legal);
698         setOperationAction(ISD::SMIN, VT, Legal);
699         setOperationAction(ISD::UMAX, VT, Legal);
700         setOperationAction(ISD::UMIN, VT, Legal);
701       }
702       else {
703         setOperationAction(ISD::SMAX, VT, Expand);
704         setOperationAction(ISD::SMIN, VT, Expand);
705         setOperationAction(ISD::UMAX, VT, Expand);
706         setOperationAction(ISD::UMIN, VT, Expand);
707       }
708 
709       if (Subtarget.hasVSX()) {
710         setOperationAction(ISD::FMAXNUM, VT, Legal);
711         setOperationAction(ISD::FMINNUM, VT, Legal);
712       }
713 
714       // Vector instructions introduced in P8
715       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
716         setOperationAction(ISD::CTPOP, VT, Legal);
717         setOperationAction(ISD::CTLZ, VT, Legal);
718       }
719       else {
720         setOperationAction(ISD::CTPOP, VT, Expand);
721         setOperationAction(ISD::CTLZ, VT, Expand);
722       }
723 
724       // Vector instructions introduced in P9
725       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
726         setOperationAction(ISD::CTTZ, VT, Legal);
727       else
728         setOperationAction(ISD::CTTZ, VT, Expand);
729 
730       // We promote all shuffles to v16i8.
731       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
732       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
733 
734       // We promote all non-typed operations to v4i32.
735       setOperationAction(ISD::AND   , VT, Promote);
736       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
737       setOperationAction(ISD::OR    , VT, Promote);
738       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
739       setOperationAction(ISD::XOR   , VT, Promote);
740       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
741       setOperationAction(ISD::LOAD  , VT, Promote);
742       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
743       setOperationAction(ISD::SELECT, VT, Promote);
744       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
745       setOperationAction(ISD::VSELECT, VT, Legal);
746       setOperationAction(ISD::SELECT_CC, VT, Promote);
747       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
748       setOperationAction(ISD::STORE, VT, Promote);
749       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
750 
751       // No other operations are legal.
752       setOperationAction(ISD::MUL , VT, Expand);
753       setOperationAction(ISD::SDIV, VT, Expand);
754       setOperationAction(ISD::SREM, VT, Expand);
755       setOperationAction(ISD::UDIV, VT, Expand);
756       setOperationAction(ISD::UREM, VT, Expand);
757       setOperationAction(ISD::FDIV, VT, Expand);
758       setOperationAction(ISD::FREM, VT, Expand);
759       setOperationAction(ISD::FNEG, VT, Expand);
760       setOperationAction(ISD::FSQRT, VT, Expand);
761       setOperationAction(ISD::FLOG, VT, Expand);
762       setOperationAction(ISD::FLOG10, VT, Expand);
763       setOperationAction(ISD::FLOG2, VT, Expand);
764       setOperationAction(ISD::FEXP, VT, Expand);
765       setOperationAction(ISD::FEXP2, VT, Expand);
766       setOperationAction(ISD::FSIN, VT, Expand);
767       setOperationAction(ISD::FCOS, VT, Expand);
768       setOperationAction(ISD::FABS, VT, Expand);
769       setOperationAction(ISD::FFLOOR, VT, Expand);
770       setOperationAction(ISD::FCEIL,  VT, Expand);
771       setOperationAction(ISD::FTRUNC, VT, Expand);
772       setOperationAction(ISD::FRINT,  VT, Expand);
773       setOperationAction(ISD::FNEARBYINT, VT, Expand);
774       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
775       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
776       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
777       setOperationAction(ISD::MULHU, VT, Expand);
778       setOperationAction(ISD::MULHS, VT, Expand);
779       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
780       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
781       setOperationAction(ISD::UDIVREM, VT, Expand);
782       setOperationAction(ISD::SDIVREM, VT, Expand);
783       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
784       setOperationAction(ISD::FPOW, VT, Expand);
785       setOperationAction(ISD::BSWAP, VT, Expand);
786       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
787       setOperationAction(ISD::ROTL, VT, Expand);
788       setOperationAction(ISD::ROTR, VT, Expand);
789 
790       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
791         setTruncStoreAction(VT, InnerVT, Expand);
792         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
793         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
794         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
795       }
796     }
797     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
798     if (!Subtarget.hasP8Vector()) {
799       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
800       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
801       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
802       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
803     }
804 
805     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
806       setOperationAction(ISD::ABS, VT, Custom);
807 
808     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
809     // with merges, splats, etc.
810     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
811 
812     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
813     // are cheap, so handle them before they get expanded to scalar.
814     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
815     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
816     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
817     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
818     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
819 
820     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
821     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
822     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
823     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
824     setOperationAction(ISD::SELECT, MVT::v4i32,
825                        Subtarget.useCRBits() ? Legal : Expand);
826     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
827     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
828     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
829     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
830     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
831     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
832     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
833     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
834     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
835     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
836     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
837     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
838     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
839 
840     // Without hasP8Altivec set, v2i64 SMAX isn't available.
841     // But ABS custom lowering requires SMAX support.
842     if (!Subtarget.hasP8Altivec())
843       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
844 
845     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
846     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
847     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
848     if (Subtarget.hasAltivec())
849       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
850         setOperationAction(ISD::ROTL, VT, Legal);
851     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
852     if (Subtarget.hasP8Altivec())
853       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
854 
855     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
856     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
857     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
858     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
859 
860     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
861     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
862 
863     if (Subtarget.hasVSX()) {
864       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
865       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
866     }
867 
868     if (Subtarget.hasP8Altivec())
869       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
870     else
871       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
872 
873     if (Subtarget.isISA3_1()) {
874       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
875       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
876       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
877       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
878       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
879       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
880       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
881       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
882       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
883       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
884       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
885       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
886       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
887     }
888 
889     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
890     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
891 
892     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
893     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
894 
895     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
896     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
897     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
898     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
899 
900     // Altivec does not contain unordered floating-point compare instructions
901     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
902     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
903     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
904     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
905 
906     if (Subtarget.hasVSX()) {
907       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
908       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
909       if (Subtarget.hasP8Vector()) {
910         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
911         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
912       }
913       if (Subtarget.hasDirectMove() && isPPC64) {
914         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
915         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
916         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
917         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
918         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
919         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
920         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
921         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
922       }
923       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
924 
925       // The nearbyint variants are not allowed to raise the inexact exception
926       // so we can only code-gen them with unsafe math.
927       if (TM.Options.UnsafeFPMath) {
928         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
929         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
930       }
931 
932       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
933       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
934       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
935       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
936       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
937       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
938       setOperationAction(ISD::FROUND, MVT::f64, Legal);
939       setOperationAction(ISD::FRINT, MVT::f64, Legal);
940 
941       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
942       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
943       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
944       setOperationAction(ISD::FROUND, MVT::f32, Legal);
945       setOperationAction(ISD::FRINT, MVT::f32, Legal);
946 
947       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
948       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
949 
950       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
951       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
952 
953       // Share the Altivec comparison restrictions.
954       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
955       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
956       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
957       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
958 
959       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
960       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
961 
962       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
963 
964       if (Subtarget.hasP8Vector())
965         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
966 
967       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
968 
969       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
970       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
971       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
972 
973       if (Subtarget.hasP8Altivec()) {
974         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
975         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
976         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
977 
978         // 128 bit shifts can be accomplished via 3 instructions for SHL and
979         // SRL, but not for SRA because of the instructions available:
980         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
981         // doing
982         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
983         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
984         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
985 
986         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
987       }
988       else {
989         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
990         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
991         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
992 
993         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
994 
995         // VSX v2i64 only supports non-arithmetic operations.
996         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
997         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
998       }
999 
1000       setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1001 
1002       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1003       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1004       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1005       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1006 
1007       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1008 
1009       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1010       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1011       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1012       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1013       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1014       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1015       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1016       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1017 
1018       // Custom handling for partial vectors of integers converted to
1019       // floating point. We already have optimal handling for v2i32 through
1020       // the DAG combine, so those aren't necessary.
1021       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1022       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1023       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1024       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1025       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1026       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1027       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1028       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1029       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1030       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1031       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1032       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1033       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1034       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1035       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1036       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1037 
1038       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1039       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1040       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1041       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1042       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1043       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1044 
1045       if (Subtarget.hasDirectMove())
1046         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1047       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1048 
1049       // Handle constrained floating-point operations of vector.
1050       // The predictor is `hasVSX` because altivec instruction has
1051       // no exception but VSX vector instruction has.
1052       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1053       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1054       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1055       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1056       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1057       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1058       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1059       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1060       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
1061       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1062       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1063       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1064       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1065 
1066       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1067       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1068       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1069       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1070       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1071       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1072       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1073       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1074       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
1075       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1076       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1077       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1078       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1079 
1080       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1081     }
1082 
1083     if (Subtarget.hasP8Altivec()) {
1084       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1085       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1086     }
1087 
1088     if (Subtarget.hasP9Vector()) {
1089       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1090       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1091 
1092       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1093       // SRL, but not for SRA because of the instructions available:
1094       // VS{RL} and VS{RL}O.
1095       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1096       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1097       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1098 
1099       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1100       setOperationAction(ISD::FADD, MVT::f128, Legal);
1101       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1102       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1103       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1104       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1105       // No extending loads to f128 on PPC.
1106       for (MVT FPT : MVT::fp_valuetypes())
1107         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1108       setOperationAction(ISD::FMA, MVT::f128, Legal);
1109       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1110       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1111       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1112       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1113       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1114       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1115 
1116       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1117       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1118       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1119       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1120       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1121       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1122 
1123       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1124       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1125       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1126       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1127       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1128       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1129       // No implementation for these ops for PowerPC.
1130       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1131       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1132       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1133       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1134       setOperationAction(ISD::FREM, MVT::f128, Expand);
1135 
1136       // Handle constrained floating-point operations of fp128
1137       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1138       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1139       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1140       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1141       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1142       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1143       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1144       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1145       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1146       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1147       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1148       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1149       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1150       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1151       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1152       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1153       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1154       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1155       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1156       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1157     }
1158 
1159     if (Subtarget.hasP9Altivec()) {
1160       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1161       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1162 
1163       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1164       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1165       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1166       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1167       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1168       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1169       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1170     }
1171   }
1172 
1173   if (Subtarget.has64BitSupport())
1174     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1175 
1176   if (Subtarget.isISA3_1())
1177     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1178 
1179   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1180 
1181   if (!isPPC64) {
1182     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1183     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1184   }
1185 
1186   setBooleanContents(ZeroOrOneBooleanContent);
1187 
1188   if (Subtarget.hasAltivec()) {
1189     // Altivec instructions set fields to all zeros or all ones.
1190     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1191   }
1192 
1193   if (!isPPC64) {
1194     // These libcalls are not available in 32-bit.
1195     setLibcallName(RTLIB::SHL_I128, nullptr);
1196     setLibcallName(RTLIB::SRL_I128, nullptr);
1197     setLibcallName(RTLIB::SRA_I128, nullptr);
1198   }
1199 
1200   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1201 
1202   // We have target-specific dag combine patterns for the following nodes:
1203   setTargetDAGCombine(ISD::ADD);
1204   setTargetDAGCombine(ISD::SHL);
1205   setTargetDAGCombine(ISD::SRA);
1206   setTargetDAGCombine(ISD::SRL);
1207   setTargetDAGCombine(ISD::MUL);
1208   setTargetDAGCombine(ISD::FMA);
1209   setTargetDAGCombine(ISD::SINT_TO_FP);
1210   setTargetDAGCombine(ISD::BUILD_VECTOR);
1211   if (Subtarget.hasFPCVT())
1212     setTargetDAGCombine(ISD::UINT_TO_FP);
1213   setTargetDAGCombine(ISD::LOAD);
1214   setTargetDAGCombine(ISD::STORE);
1215   setTargetDAGCombine(ISD::BR_CC);
1216   if (Subtarget.useCRBits())
1217     setTargetDAGCombine(ISD::BRCOND);
1218   setTargetDAGCombine(ISD::BSWAP);
1219   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1220   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1221   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1222 
1223   setTargetDAGCombine(ISD::SIGN_EXTEND);
1224   setTargetDAGCombine(ISD::ZERO_EXTEND);
1225   setTargetDAGCombine(ISD::ANY_EXTEND);
1226 
1227   setTargetDAGCombine(ISD::TRUNCATE);
1228   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1229 
1230 
1231   if (Subtarget.useCRBits()) {
1232     setTargetDAGCombine(ISD::TRUNCATE);
1233     setTargetDAGCombine(ISD::SETCC);
1234     setTargetDAGCombine(ISD::SELECT_CC);
1235   }
1236 
1237   if (Subtarget.hasP9Altivec()) {
1238     setTargetDAGCombine(ISD::ABS);
1239     setTargetDAGCombine(ISD::VSELECT);
1240   }
1241 
1242   setLibcallName(RTLIB::LOG_F128, "logf128");
1243   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1244   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1245   setLibcallName(RTLIB::EXP_F128, "expf128");
1246   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1247   setLibcallName(RTLIB::SIN_F128, "sinf128");
1248   setLibcallName(RTLIB::COS_F128, "cosf128");
1249   setLibcallName(RTLIB::POW_F128, "powf128");
1250   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1251   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1252   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1253   setLibcallName(RTLIB::REM_F128, "fmodf128");
1254 
1255   // With 32 condition bits, we don't need to sink (and duplicate) compares
1256   // aggressively in CodeGenPrep.
1257   if (Subtarget.useCRBits()) {
1258     setHasMultipleConditionRegisters();
1259     setJumpIsExpensive();
1260   }
1261 
1262   setMinFunctionAlignment(Align(4));
1263 
1264   switch (Subtarget.getCPUDirective()) {
1265   default: break;
1266   case PPC::DIR_970:
1267   case PPC::DIR_A2:
1268   case PPC::DIR_E500:
1269   case PPC::DIR_E500mc:
1270   case PPC::DIR_E5500:
1271   case PPC::DIR_PWR4:
1272   case PPC::DIR_PWR5:
1273   case PPC::DIR_PWR5X:
1274   case PPC::DIR_PWR6:
1275   case PPC::DIR_PWR6X:
1276   case PPC::DIR_PWR7:
1277   case PPC::DIR_PWR8:
1278   case PPC::DIR_PWR9:
1279   case PPC::DIR_PWR10:
1280   case PPC::DIR_PWR_FUTURE:
1281     setPrefLoopAlignment(Align(16));
1282     setPrefFunctionAlignment(Align(16));
1283     break;
1284   }
1285 
1286   if (Subtarget.enableMachineScheduler())
1287     setSchedulingPreference(Sched::Source);
1288   else
1289     setSchedulingPreference(Sched::Hybrid);
1290 
1291   computeRegisterProperties(STI.getRegisterInfo());
1292 
1293   // The Freescale cores do better with aggressive inlining of memcpy and
1294   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1295   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1296       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1297     MaxStoresPerMemset = 32;
1298     MaxStoresPerMemsetOptSize = 16;
1299     MaxStoresPerMemcpy = 32;
1300     MaxStoresPerMemcpyOptSize = 8;
1301     MaxStoresPerMemmove = 32;
1302     MaxStoresPerMemmoveOptSize = 8;
1303   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1304     // The A2 also benefits from (very) aggressive inlining of memcpy and
1305     // friends. The overhead of a the function call, even when warm, can be
1306     // over one hundred cycles.
1307     MaxStoresPerMemset = 128;
1308     MaxStoresPerMemcpy = 128;
1309     MaxStoresPerMemmove = 128;
1310     MaxLoadsPerMemcmp = 128;
1311   } else {
1312     MaxLoadsPerMemcmp = 8;
1313     MaxLoadsPerMemcmpOptSize = 4;
1314   }
1315 
1316   // Let the subtarget (CPU) decide if a predictable select is more expensive
1317   // than the corresponding branch. This information is used in CGP to decide
1318   // when to convert selects into branches.
1319   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1320 }
1321 
1322 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1323 /// the desired ByVal argument alignment.
1324 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1325   if (MaxAlign == MaxMaxAlign)
1326     return;
1327   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1328     if (MaxMaxAlign >= 32 &&
1329         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1330       MaxAlign = Align(32);
1331     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1332              MaxAlign < 16)
1333       MaxAlign = Align(16);
1334   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1335     Align EltAlign;
1336     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1337     if (EltAlign > MaxAlign)
1338       MaxAlign = EltAlign;
1339   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1340     for (auto *EltTy : STy->elements()) {
1341       Align EltAlign;
1342       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1343       if (EltAlign > MaxAlign)
1344         MaxAlign = EltAlign;
1345       if (MaxAlign == MaxMaxAlign)
1346         break;
1347     }
1348   }
1349 }
1350 
1351 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1352 /// function arguments in the caller parameter area.
1353 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1354                                                   const DataLayout &DL) const {
1355   // 16byte and wider vectors are passed on 16byte boundary.
1356   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1357   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1358   if (Subtarget.hasAltivec())
1359     getMaxByValAlign(Ty, Alignment, Align(16));
1360   return Alignment.value();
1361 }
1362 
1363 bool PPCTargetLowering::useSoftFloat() const {
1364   return Subtarget.useSoftFloat();
1365 }
1366 
1367 bool PPCTargetLowering::hasSPE() const {
1368   return Subtarget.hasSPE();
1369 }
1370 
1371 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1372   return VT.isScalarInteger();
1373 }
1374 
1375 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1376 /// type is cheaper than a multiply followed by a shift.
1377 /// This is true for words and doublewords on 64-bit PowerPC.
1378 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1379   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1380                               isOperationLegal(ISD::MULHU, Type)))
1381     return true;
1382   return TargetLowering::isMulhCheaperThanMulShift(Type);
1383 }
1384 
1385 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1386   switch ((PPCISD::NodeType)Opcode) {
1387   case PPCISD::FIRST_NUMBER:    break;
1388   case PPCISD::FSEL:            return "PPCISD::FSEL";
1389   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1390   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1391   case PPCISD::FCFID:           return "PPCISD::FCFID";
1392   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1393   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1394   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1395   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1396   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1397   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1398   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1399   case PPCISD::FP_TO_UINT_IN_VSR:
1400                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1401   case PPCISD::FP_TO_SINT_IN_VSR:
1402                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1403   case PPCISD::FRE:             return "PPCISD::FRE";
1404   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1405   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1406   case PPCISD::VPERM:           return "PPCISD::VPERM";
1407   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1408   case PPCISD::XXSPLTI_SP_TO_DP:
1409     return "PPCISD::XXSPLTI_SP_TO_DP";
1410   case PPCISD::XXSPLTI32DX:
1411     return "PPCISD::XXSPLTI32DX";
1412   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1413   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1414   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1415   case PPCISD::CMPB:            return "PPCISD::CMPB";
1416   case PPCISD::Hi:              return "PPCISD::Hi";
1417   case PPCISD::Lo:              return "PPCISD::Lo";
1418   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1419   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1420   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1421   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1422   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1423   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1424   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1425   case PPCISD::SRL:             return "PPCISD::SRL";
1426   case PPCISD::SRA:             return "PPCISD::SRA";
1427   case PPCISD::SHL:             return "PPCISD::SHL";
1428   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1429   case PPCISD::CALL:            return "PPCISD::CALL";
1430   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1431   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1432   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1433   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1434   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1435   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1436   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1437   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1438   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1439   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1440   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1441   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1442   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1443   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1444   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1445   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1446     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1447   case PPCISD::ANDI_rec_1_EQ_BIT:
1448     return "PPCISD::ANDI_rec_1_EQ_BIT";
1449   case PPCISD::ANDI_rec_1_GT_BIT:
1450     return "PPCISD::ANDI_rec_1_GT_BIT";
1451   case PPCISD::VCMP:            return "PPCISD::VCMP";
1452   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1453   case PPCISD::LBRX:            return "PPCISD::LBRX";
1454   case PPCISD::STBRX:           return "PPCISD::STBRX";
1455   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1456   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1457   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1458   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1459   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1460   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1461   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1462   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1463   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1464   case PPCISD::ST_VSR_SCAL_INT:
1465                                 return "PPCISD::ST_VSR_SCAL_INT";
1466   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1467   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1468   case PPCISD::BDZ:             return "PPCISD::BDZ";
1469   case PPCISD::MFFS:            return "PPCISD::MFFS";
1470   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1471   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1472   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1473   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1474   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1475   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1476   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1477   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1478   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1479   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1480   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1481   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1482   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1483   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1484   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1485   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1486   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1487   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1488   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1489   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1490   case PPCISD::SC:              return "PPCISD::SC";
1491   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1492   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1493   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1494   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1495   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1496   case PPCISD::VABSD:           return "PPCISD::VABSD";
1497   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1498   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1499   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1500   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1501   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1502   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1503   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1504   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1505     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1506   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1507   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1508   case PPCISD::STRICT_FCTIDZ:
1509     return "PPCISD::STRICT_FCTIDZ";
1510   case PPCISD::STRICT_FCTIWZ:
1511     return "PPCISD::STRICT_FCTIWZ";
1512   case PPCISD::STRICT_FCTIDUZ:
1513     return "PPCISD::STRICT_FCTIDUZ";
1514   case PPCISD::STRICT_FCTIWUZ:
1515     return "PPCISD::STRICT_FCTIWUZ";
1516   case PPCISD::STRICT_FCFID:
1517     return "PPCISD::STRICT_FCFID";
1518   case PPCISD::STRICT_FCFIDU:
1519     return "PPCISD::STRICT_FCFIDU";
1520   case PPCISD::STRICT_FCFIDS:
1521     return "PPCISD::STRICT_FCFIDS";
1522   case PPCISD::STRICT_FCFIDUS:
1523     return "PPCISD::STRICT_FCFIDUS";
1524   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1525   }
1526   return nullptr;
1527 }
1528 
1529 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1530                                           EVT VT) const {
1531   if (!VT.isVector())
1532     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1533 
1534   return VT.changeVectorElementTypeToInteger();
1535 }
1536 
1537 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1538   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1539   return true;
1540 }
1541 
1542 //===----------------------------------------------------------------------===//
1543 // Node matching predicates, for use by the tblgen matching code.
1544 //===----------------------------------------------------------------------===//
1545 
1546 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1547 static bool isFloatingPointZero(SDValue Op) {
1548   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1549     return CFP->getValueAPF().isZero();
1550   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1551     // Maybe this has already been legalized into the constant pool?
1552     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1553       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1554         return CFP->getValueAPF().isZero();
1555   }
1556   return false;
1557 }
1558 
1559 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1560 /// true if Op is undef or if it matches the specified value.
1561 static bool isConstantOrUndef(int Op, int Val) {
1562   return Op < 0 || Op == Val;
1563 }
1564 
1565 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1566 /// VPKUHUM instruction.
1567 /// The ShuffleKind distinguishes between big-endian operations with
1568 /// two different inputs (0), either-endian operations with two identical
1569 /// inputs (1), and little-endian operations with two different inputs (2).
1570 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1571 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1572                                SelectionDAG &DAG) {
1573   bool IsLE = DAG.getDataLayout().isLittleEndian();
1574   if (ShuffleKind == 0) {
1575     if (IsLE)
1576       return false;
1577     for (unsigned i = 0; i != 16; ++i)
1578       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1579         return false;
1580   } else if (ShuffleKind == 2) {
1581     if (!IsLE)
1582       return false;
1583     for (unsigned i = 0; i != 16; ++i)
1584       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1585         return false;
1586   } else if (ShuffleKind == 1) {
1587     unsigned j = IsLE ? 0 : 1;
1588     for (unsigned i = 0; i != 8; ++i)
1589       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1590           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1591         return false;
1592   }
1593   return true;
1594 }
1595 
1596 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1597 /// VPKUWUM instruction.
1598 /// The ShuffleKind distinguishes between big-endian operations with
1599 /// two different inputs (0), either-endian operations with two identical
1600 /// inputs (1), and little-endian operations with two different inputs (2).
1601 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1602 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1603                                SelectionDAG &DAG) {
1604   bool IsLE = DAG.getDataLayout().isLittleEndian();
1605   if (ShuffleKind == 0) {
1606     if (IsLE)
1607       return false;
1608     for (unsigned i = 0; i != 16; i += 2)
1609       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1610           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1611         return false;
1612   } else if (ShuffleKind == 2) {
1613     if (!IsLE)
1614       return false;
1615     for (unsigned i = 0; i != 16; i += 2)
1616       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1617           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1618         return false;
1619   } else if (ShuffleKind == 1) {
1620     unsigned j = IsLE ? 0 : 2;
1621     for (unsigned i = 0; i != 8; i += 2)
1622       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1623           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1624           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1625           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1626         return false;
1627   }
1628   return true;
1629 }
1630 
1631 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1632 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1633 /// current subtarget.
1634 ///
1635 /// The ShuffleKind distinguishes between big-endian operations with
1636 /// two different inputs (0), either-endian operations with two identical
1637 /// inputs (1), and little-endian operations with two different inputs (2).
1638 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1639 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1640                                SelectionDAG &DAG) {
1641   const PPCSubtarget& Subtarget =
1642       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1643   if (!Subtarget.hasP8Vector())
1644     return false;
1645 
1646   bool IsLE = DAG.getDataLayout().isLittleEndian();
1647   if (ShuffleKind == 0) {
1648     if (IsLE)
1649       return false;
1650     for (unsigned i = 0; i != 16; i += 4)
1651       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1652           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1653           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1654           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1655         return false;
1656   } else if (ShuffleKind == 2) {
1657     if (!IsLE)
1658       return false;
1659     for (unsigned i = 0; i != 16; i += 4)
1660       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1661           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1662           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1663           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1664         return false;
1665   } else if (ShuffleKind == 1) {
1666     unsigned j = IsLE ? 0 : 4;
1667     for (unsigned i = 0; i != 8; i += 4)
1668       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1669           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1670           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1671           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1672           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1673           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1674           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1675           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1676         return false;
1677   }
1678   return true;
1679 }
1680 
1681 /// isVMerge - Common function, used to match vmrg* shuffles.
1682 ///
1683 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1684                      unsigned LHSStart, unsigned RHSStart) {
1685   if (N->getValueType(0) != MVT::v16i8)
1686     return false;
1687   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1688          "Unsupported merge size!");
1689 
1690   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1691     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1692       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1693                              LHSStart+j+i*UnitSize) ||
1694           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1695                              RHSStart+j+i*UnitSize))
1696         return false;
1697     }
1698   return true;
1699 }
1700 
1701 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1702 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1703 /// The ShuffleKind distinguishes between big-endian merges with two
1704 /// different inputs (0), either-endian merges with two identical inputs (1),
1705 /// and little-endian merges with two different inputs (2).  For the latter,
1706 /// the input operands are swapped (see PPCInstrAltivec.td).
1707 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1708                              unsigned ShuffleKind, SelectionDAG &DAG) {
1709   if (DAG.getDataLayout().isLittleEndian()) {
1710     if (ShuffleKind == 1) // unary
1711       return isVMerge(N, UnitSize, 0, 0);
1712     else if (ShuffleKind == 2) // swapped
1713       return isVMerge(N, UnitSize, 0, 16);
1714     else
1715       return false;
1716   } else {
1717     if (ShuffleKind == 1) // unary
1718       return isVMerge(N, UnitSize, 8, 8);
1719     else if (ShuffleKind == 0) // normal
1720       return isVMerge(N, UnitSize, 8, 24);
1721     else
1722       return false;
1723   }
1724 }
1725 
1726 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1727 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1728 /// The ShuffleKind distinguishes between big-endian merges with two
1729 /// different inputs (0), either-endian merges with two identical inputs (1),
1730 /// and little-endian merges with two different inputs (2).  For the latter,
1731 /// the input operands are swapped (see PPCInstrAltivec.td).
1732 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1733                              unsigned ShuffleKind, SelectionDAG &DAG) {
1734   if (DAG.getDataLayout().isLittleEndian()) {
1735     if (ShuffleKind == 1) // unary
1736       return isVMerge(N, UnitSize, 8, 8);
1737     else if (ShuffleKind == 2) // swapped
1738       return isVMerge(N, UnitSize, 8, 24);
1739     else
1740       return false;
1741   } else {
1742     if (ShuffleKind == 1) // unary
1743       return isVMerge(N, UnitSize, 0, 0);
1744     else if (ShuffleKind == 0) // normal
1745       return isVMerge(N, UnitSize, 0, 16);
1746     else
1747       return false;
1748   }
1749 }
1750 
1751 /**
1752  * Common function used to match vmrgew and vmrgow shuffles
1753  *
1754  * The indexOffset determines whether to look for even or odd words in
1755  * the shuffle mask. This is based on the of the endianness of the target
1756  * machine.
1757  *   - Little Endian:
1758  *     - Use offset of 0 to check for odd elements
1759  *     - Use offset of 4 to check for even elements
1760  *   - Big Endian:
1761  *     - Use offset of 0 to check for even elements
1762  *     - Use offset of 4 to check for odd elements
1763  * A detailed description of the vector element ordering for little endian and
1764  * big endian can be found at
1765  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1766  * Targeting your applications - what little endian and big endian IBM XL C/C++
1767  * compiler differences mean to you
1768  *
1769  * The mask to the shuffle vector instruction specifies the indices of the
1770  * elements from the two input vectors to place in the result. The elements are
1771  * numbered in array-access order, starting with the first vector. These vectors
1772  * are always of type v16i8, thus each vector will contain 16 elements of size
1773  * 8. More info on the shuffle vector can be found in the
1774  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1775  * Language Reference.
1776  *
1777  * The RHSStartValue indicates whether the same input vectors are used (unary)
1778  * or two different input vectors are used, based on the following:
1779  *   - If the instruction uses the same vector for both inputs, the range of the
1780  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1781  *     be 0.
1782  *   - If the instruction has two different vectors then the range of the
1783  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1784  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1785  *     to 31 specify elements in the second vector).
1786  *
1787  * \param[in] N The shuffle vector SD Node to analyze
1788  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1789  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1790  * vector to the shuffle_vector instruction
1791  * \return true iff this shuffle vector represents an even or odd word merge
1792  */
1793 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1794                      unsigned RHSStartValue) {
1795   if (N->getValueType(0) != MVT::v16i8)
1796     return false;
1797 
1798   for (unsigned i = 0; i < 2; ++i)
1799     for (unsigned j = 0; j < 4; ++j)
1800       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1801                              i*RHSStartValue+j+IndexOffset) ||
1802           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1803                              i*RHSStartValue+j+IndexOffset+8))
1804         return false;
1805   return true;
1806 }
1807 
1808 /**
1809  * Determine if the specified shuffle mask is suitable for the vmrgew or
1810  * vmrgow instructions.
1811  *
1812  * \param[in] N The shuffle vector SD Node to analyze
1813  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1814  * \param[in] ShuffleKind Identify the type of merge:
1815  *   - 0 = big-endian merge with two different inputs;
1816  *   - 1 = either-endian merge with two identical inputs;
1817  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1818  *     little-endian merges).
1819  * \param[in] DAG The current SelectionDAG
1820  * \return true iff this shuffle mask
1821  */
1822 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1823                               unsigned ShuffleKind, SelectionDAG &DAG) {
1824   if (DAG.getDataLayout().isLittleEndian()) {
1825     unsigned indexOffset = CheckEven ? 4 : 0;
1826     if (ShuffleKind == 1) // Unary
1827       return isVMerge(N, indexOffset, 0);
1828     else if (ShuffleKind == 2) // swapped
1829       return isVMerge(N, indexOffset, 16);
1830     else
1831       return false;
1832   }
1833   else {
1834     unsigned indexOffset = CheckEven ? 0 : 4;
1835     if (ShuffleKind == 1) // Unary
1836       return isVMerge(N, indexOffset, 0);
1837     else if (ShuffleKind == 0) // Normal
1838       return isVMerge(N, indexOffset, 16);
1839     else
1840       return false;
1841   }
1842   return false;
1843 }
1844 
1845 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1846 /// amount, otherwise return -1.
1847 /// The ShuffleKind distinguishes between big-endian operations with two
1848 /// different inputs (0), either-endian operations with two identical inputs
1849 /// (1), and little-endian operations with two different inputs (2).  For the
1850 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1851 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1852                              SelectionDAG &DAG) {
1853   if (N->getValueType(0) != MVT::v16i8)
1854     return -1;
1855 
1856   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1857 
1858   // Find the first non-undef value in the shuffle mask.
1859   unsigned i;
1860   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1861     /*search*/;
1862 
1863   if (i == 16) return -1;  // all undef.
1864 
1865   // Otherwise, check to see if the rest of the elements are consecutively
1866   // numbered from this value.
1867   unsigned ShiftAmt = SVOp->getMaskElt(i);
1868   if (ShiftAmt < i) return -1;
1869 
1870   ShiftAmt -= i;
1871   bool isLE = DAG.getDataLayout().isLittleEndian();
1872 
1873   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1874     // Check the rest of the elements to see if they are consecutive.
1875     for (++i; i != 16; ++i)
1876       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1877         return -1;
1878   } else if (ShuffleKind == 1) {
1879     // Check the rest of the elements to see if they are consecutive.
1880     for (++i; i != 16; ++i)
1881       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1882         return -1;
1883   } else
1884     return -1;
1885 
1886   if (isLE)
1887     ShiftAmt = 16 - ShiftAmt;
1888 
1889   return ShiftAmt;
1890 }
1891 
1892 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1893 /// specifies a splat of a single element that is suitable for input to
1894 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1895 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1896   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1897          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1898 
1899   // The consecutive indices need to specify an element, not part of two
1900   // different elements.  So abandon ship early if this isn't the case.
1901   if (N->getMaskElt(0) % EltSize != 0)
1902     return false;
1903 
1904   // This is a splat operation if each element of the permute is the same, and
1905   // if the value doesn't reference the second vector.
1906   unsigned ElementBase = N->getMaskElt(0);
1907 
1908   // FIXME: Handle UNDEF elements too!
1909   if (ElementBase >= 16)
1910     return false;
1911 
1912   // Check that the indices are consecutive, in the case of a multi-byte element
1913   // splatted with a v16i8 mask.
1914   for (unsigned i = 1; i != EltSize; ++i)
1915     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1916       return false;
1917 
1918   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1919     if (N->getMaskElt(i) < 0) continue;
1920     for (unsigned j = 0; j != EltSize; ++j)
1921       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1922         return false;
1923   }
1924   return true;
1925 }
1926 
1927 /// Check that the mask is shuffling N byte elements. Within each N byte
1928 /// element of the mask, the indices could be either in increasing or
1929 /// decreasing order as long as they are consecutive.
1930 /// \param[in] N the shuffle vector SD Node to analyze
1931 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1932 /// Word/DoubleWord/QuadWord).
1933 /// \param[in] StepLen the delta indices number among the N byte element, if
1934 /// the mask is in increasing/decreasing order then it is 1/-1.
1935 /// \return true iff the mask is shuffling N byte elements.
1936 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1937                                    int StepLen) {
1938   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1939          "Unexpected element width.");
1940   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1941 
1942   unsigned NumOfElem = 16 / Width;
1943   unsigned MaskVal[16]; //  Width is never greater than 16
1944   for (unsigned i = 0; i < NumOfElem; ++i) {
1945     MaskVal[0] = N->getMaskElt(i * Width);
1946     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1947       return false;
1948     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1949       return false;
1950     }
1951 
1952     for (unsigned int j = 1; j < Width; ++j) {
1953       MaskVal[j] = N->getMaskElt(i * Width + j);
1954       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1955         return false;
1956       }
1957     }
1958   }
1959 
1960   return true;
1961 }
1962 
1963 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1964                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1965   if (!isNByteElemShuffleMask(N, 4, 1))
1966     return false;
1967 
1968   // Now we look at mask elements 0,4,8,12
1969   unsigned M0 = N->getMaskElt(0) / 4;
1970   unsigned M1 = N->getMaskElt(4) / 4;
1971   unsigned M2 = N->getMaskElt(8) / 4;
1972   unsigned M3 = N->getMaskElt(12) / 4;
1973   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1974   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1975 
1976   // Below, let H and L be arbitrary elements of the shuffle mask
1977   // where H is in the range [4,7] and L is in the range [0,3].
1978   // H, 1, 2, 3 or L, 5, 6, 7
1979   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1980       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1981     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1982     InsertAtByte = IsLE ? 12 : 0;
1983     Swap = M0 < 4;
1984     return true;
1985   }
1986   // 0, H, 2, 3 or 4, L, 6, 7
1987   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1988       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1989     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1990     InsertAtByte = IsLE ? 8 : 4;
1991     Swap = M1 < 4;
1992     return true;
1993   }
1994   // 0, 1, H, 3 or 4, 5, L, 7
1995   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1996       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1997     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1998     InsertAtByte = IsLE ? 4 : 8;
1999     Swap = M2 < 4;
2000     return true;
2001   }
2002   // 0, 1, 2, H or 4, 5, 6, L
2003   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2004       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2005     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2006     InsertAtByte = IsLE ? 0 : 12;
2007     Swap = M3 < 4;
2008     return true;
2009   }
2010 
2011   // If both vector operands for the shuffle are the same vector, the mask will
2012   // contain only elements from the first one and the second one will be undef.
2013   if (N->getOperand(1).isUndef()) {
2014     ShiftElts = 0;
2015     Swap = true;
2016     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2017     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2018       InsertAtByte = IsLE ? 12 : 0;
2019       return true;
2020     }
2021     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2022       InsertAtByte = IsLE ? 8 : 4;
2023       return true;
2024     }
2025     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2026       InsertAtByte = IsLE ? 4 : 8;
2027       return true;
2028     }
2029     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2030       InsertAtByte = IsLE ? 0 : 12;
2031       return true;
2032     }
2033   }
2034 
2035   return false;
2036 }
2037 
2038 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2039                                bool &Swap, bool IsLE) {
2040   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2041   // Ensure each byte index of the word is consecutive.
2042   if (!isNByteElemShuffleMask(N, 4, 1))
2043     return false;
2044 
2045   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2046   unsigned M0 = N->getMaskElt(0) / 4;
2047   unsigned M1 = N->getMaskElt(4) / 4;
2048   unsigned M2 = N->getMaskElt(8) / 4;
2049   unsigned M3 = N->getMaskElt(12) / 4;
2050 
2051   // If both vector operands for the shuffle are the same vector, the mask will
2052   // contain only elements from the first one and the second one will be undef.
2053   if (N->getOperand(1).isUndef()) {
2054     assert(M0 < 4 && "Indexing into an undef vector?");
2055     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2056       return false;
2057 
2058     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2059     Swap = false;
2060     return true;
2061   }
2062 
2063   // Ensure each word index of the ShuffleVector Mask is consecutive.
2064   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2065     return false;
2066 
2067   if (IsLE) {
2068     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2069       // Input vectors don't need to be swapped if the leading element
2070       // of the result is one of the 3 left elements of the second vector
2071       // (or if there is no shift to be done at all).
2072       Swap = false;
2073       ShiftElts = (8 - M0) % 8;
2074     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2075       // Input vectors need to be swapped if the leading element
2076       // of the result is one of the 3 left elements of the first vector
2077       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2078       Swap = true;
2079       ShiftElts = (4 - M0) % 4;
2080     }
2081 
2082     return true;
2083   } else {                                          // BE
2084     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2085       // Input vectors don't need to be swapped if the leading element
2086       // of the result is one of the 4 elements of the first vector.
2087       Swap = false;
2088       ShiftElts = M0;
2089     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2090       // Input vectors need to be swapped if the leading element
2091       // of the result is one of the 4 elements of the right vector.
2092       Swap = true;
2093       ShiftElts = M0 - 4;
2094     }
2095 
2096     return true;
2097   }
2098 }
2099 
2100 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2101   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2102 
2103   if (!isNByteElemShuffleMask(N, Width, -1))
2104     return false;
2105 
2106   for (int i = 0; i < 16; i += Width)
2107     if (N->getMaskElt(i) != i + Width - 1)
2108       return false;
2109 
2110   return true;
2111 }
2112 
2113 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2114   return isXXBRShuffleMaskHelper(N, 2);
2115 }
2116 
2117 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2118   return isXXBRShuffleMaskHelper(N, 4);
2119 }
2120 
2121 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2122   return isXXBRShuffleMaskHelper(N, 8);
2123 }
2124 
2125 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2126   return isXXBRShuffleMaskHelper(N, 16);
2127 }
2128 
2129 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2130 /// if the inputs to the instruction should be swapped and set \p DM to the
2131 /// value for the immediate.
2132 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2133 /// AND element 0 of the result comes from the first input (LE) or second input
2134 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2135 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2136 /// mask.
2137 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2138                                bool &Swap, bool IsLE) {
2139   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2140 
2141   // Ensure each byte index of the double word is consecutive.
2142   if (!isNByteElemShuffleMask(N, 8, 1))
2143     return false;
2144 
2145   unsigned M0 = N->getMaskElt(0) / 8;
2146   unsigned M1 = N->getMaskElt(8) / 8;
2147   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2148 
2149   // If both vector operands for the shuffle are the same vector, the mask will
2150   // contain only elements from the first one and the second one will be undef.
2151   if (N->getOperand(1).isUndef()) {
2152     if ((M0 | M1) < 2) {
2153       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2154       Swap = false;
2155       return true;
2156     } else
2157       return false;
2158   }
2159 
2160   if (IsLE) {
2161     if (M0 > 1 && M1 < 2) {
2162       Swap = false;
2163     } else if (M0 < 2 && M1 > 1) {
2164       M0 = (M0 + 2) % 4;
2165       M1 = (M1 + 2) % 4;
2166       Swap = true;
2167     } else
2168       return false;
2169 
2170     // Note: if control flow comes here that means Swap is already set above
2171     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2172     return true;
2173   } else { // BE
2174     if (M0 < 2 && M1 > 1) {
2175       Swap = false;
2176     } else if (M0 > 1 && M1 < 2) {
2177       M0 = (M0 + 2) % 4;
2178       M1 = (M1 + 2) % 4;
2179       Swap = true;
2180     } else
2181       return false;
2182 
2183     // Note: if control flow comes here that means Swap is already set above
2184     DM = (M0 << 1) + (M1 & 1);
2185     return true;
2186   }
2187 }
2188 
2189 
2190 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2191 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2192 /// elements are counted from the left of the vector register).
2193 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2194                                          SelectionDAG &DAG) {
2195   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2196   assert(isSplatShuffleMask(SVOp, EltSize));
2197   if (DAG.getDataLayout().isLittleEndian())
2198     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2199   else
2200     return SVOp->getMaskElt(0) / EltSize;
2201 }
2202 
2203 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2204 /// by using a vspltis[bhw] instruction of the specified element size, return
2205 /// the constant being splatted.  The ByteSize field indicates the number of
2206 /// bytes of each element [124] -> [bhw].
2207 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2208   SDValue OpVal(nullptr, 0);
2209 
2210   // If ByteSize of the splat is bigger than the element size of the
2211   // build_vector, then we have a case where we are checking for a splat where
2212   // multiple elements of the buildvector are folded together into a single
2213   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2214   unsigned EltSize = 16/N->getNumOperands();
2215   if (EltSize < ByteSize) {
2216     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2217     SDValue UniquedVals[4];
2218     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2219 
2220     // See if all of the elements in the buildvector agree across.
2221     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2222       if (N->getOperand(i).isUndef()) continue;
2223       // If the element isn't a constant, bail fully out.
2224       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2225 
2226       if (!UniquedVals[i&(Multiple-1)].getNode())
2227         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2228       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2229         return SDValue();  // no match.
2230     }
2231 
2232     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2233     // either constant or undef values that are identical for each chunk.  See
2234     // if these chunks can form into a larger vspltis*.
2235 
2236     // Check to see if all of the leading entries are either 0 or -1.  If
2237     // neither, then this won't fit into the immediate field.
2238     bool LeadingZero = true;
2239     bool LeadingOnes = true;
2240     for (unsigned i = 0; i != Multiple-1; ++i) {
2241       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2242 
2243       LeadingZero &= isNullConstant(UniquedVals[i]);
2244       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2245     }
2246     // Finally, check the least significant entry.
2247     if (LeadingZero) {
2248       if (!UniquedVals[Multiple-1].getNode())
2249         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2250       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2251       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2252         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2253     }
2254     if (LeadingOnes) {
2255       if (!UniquedVals[Multiple-1].getNode())
2256         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2257       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2258       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2259         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2260     }
2261 
2262     return SDValue();
2263   }
2264 
2265   // Check to see if this buildvec has a single non-undef value in its elements.
2266   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2267     if (N->getOperand(i).isUndef()) continue;
2268     if (!OpVal.getNode())
2269       OpVal = N->getOperand(i);
2270     else if (OpVal != N->getOperand(i))
2271       return SDValue();
2272   }
2273 
2274   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2275 
2276   unsigned ValSizeInBytes = EltSize;
2277   uint64_t Value = 0;
2278   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2279     Value = CN->getZExtValue();
2280   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2281     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2282     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2283   }
2284 
2285   // If the splat value is larger than the element value, then we can never do
2286   // this splat.  The only case that we could fit the replicated bits into our
2287   // immediate field for would be zero, and we prefer to use vxor for it.
2288   if (ValSizeInBytes < ByteSize) return SDValue();
2289 
2290   // If the element value is larger than the splat value, check if it consists
2291   // of a repeated bit pattern of size ByteSize.
2292   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2293     return SDValue();
2294 
2295   // Properly sign extend the value.
2296   int MaskVal = SignExtend32(Value, ByteSize * 8);
2297 
2298   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2299   if (MaskVal == 0) return SDValue();
2300 
2301   // Finally, if this value fits in a 5 bit sext field, return it
2302   if (SignExtend32<5>(MaskVal) == MaskVal)
2303     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2304   return SDValue();
2305 }
2306 
2307 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2308 /// amount, otherwise return -1.
2309 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2310   EVT VT = N->getValueType(0);
2311   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2312     return -1;
2313 
2314   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2315 
2316   // Find the first non-undef value in the shuffle mask.
2317   unsigned i;
2318   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2319     /*search*/;
2320 
2321   if (i == 4) return -1;  // all undef.
2322 
2323   // Otherwise, check to see if the rest of the elements are consecutively
2324   // numbered from this value.
2325   unsigned ShiftAmt = SVOp->getMaskElt(i);
2326   if (ShiftAmt < i) return -1;
2327   ShiftAmt -= i;
2328 
2329   // Check the rest of the elements to see if they are consecutive.
2330   for (++i; i != 4; ++i)
2331     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2332       return -1;
2333 
2334   return ShiftAmt;
2335 }
2336 
2337 //===----------------------------------------------------------------------===//
2338 //  Addressing Mode Selection
2339 //===----------------------------------------------------------------------===//
2340 
2341 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2342 /// or 64-bit immediate, and if the value can be accurately represented as a
2343 /// sign extension from a 16-bit value.  If so, this returns true and the
2344 /// immediate.
2345 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2346   if (!isa<ConstantSDNode>(N))
2347     return false;
2348 
2349   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2350   if (N->getValueType(0) == MVT::i32)
2351     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2352   else
2353     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2354 }
2355 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2356   return isIntS16Immediate(Op.getNode(), Imm);
2357 }
2358 
2359 
2360 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2361 /// be represented as an indexed [r+r] operation.
2362 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2363                                                SDValue &Index,
2364                                                SelectionDAG &DAG) const {
2365   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2366       UI != E; ++UI) {
2367     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2368       if (Memop->getMemoryVT() == MVT::f64) {
2369           Base = N.getOperand(0);
2370           Index = N.getOperand(1);
2371           return true;
2372       }
2373     }
2374   }
2375   return false;
2376 }
2377 
2378 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2379 /// can be represented as an indexed [r+r] operation.  Returns false if it
2380 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2381 /// non-zero and N can be represented by a base register plus a signed 16-bit
2382 /// displacement, make a more precise judgement by checking (displacement % \p
2383 /// EncodingAlignment).
2384 bool PPCTargetLowering::SelectAddressRegReg(
2385     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2386     MaybeAlign EncodingAlignment) const {
2387   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2388   // a [pc+imm].
2389   if (SelectAddressPCRel(N, Base))
2390     return false;
2391 
2392   int16_t Imm = 0;
2393   if (N.getOpcode() == ISD::ADD) {
2394     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2395     // SPE load/store can only handle 8-bit offsets.
2396     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2397         return true;
2398     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2399         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2400       return false; // r+i
2401     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2402       return false;    // r+i
2403 
2404     Base = N.getOperand(0);
2405     Index = N.getOperand(1);
2406     return true;
2407   } else if (N.getOpcode() == ISD::OR) {
2408     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2409         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2410       return false; // r+i can fold it if we can.
2411 
2412     // If this is an or of disjoint bitfields, we can codegen this as an add
2413     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2414     // disjoint.
2415     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2416 
2417     if (LHSKnown.Zero.getBoolValue()) {
2418       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2419       // If all of the bits are known zero on the LHS or RHS, the add won't
2420       // carry.
2421       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2422         Base = N.getOperand(0);
2423         Index = N.getOperand(1);
2424         return true;
2425       }
2426     }
2427   }
2428 
2429   return false;
2430 }
2431 
2432 // If we happen to be doing an i64 load or store into a stack slot that has
2433 // less than a 4-byte alignment, then the frame-index elimination may need to
2434 // use an indexed load or store instruction (because the offset may not be a
2435 // multiple of 4). The extra register needed to hold the offset comes from the
2436 // register scavenger, and it is possible that the scavenger will need to use
2437 // an emergency spill slot. As a result, we need to make sure that a spill slot
2438 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2439 // stack slot.
2440 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2441   // FIXME: This does not handle the LWA case.
2442   if (VT != MVT::i64)
2443     return;
2444 
2445   // NOTE: We'll exclude negative FIs here, which come from argument
2446   // lowering, because there are no known test cases triggering this problem
2447   // using packed structures (or similar). We can remove this exclusion if
2448   // we find such a test case. The reason why this is so test-case driven is
2449   // because this entire 'fixup' is only to prevent crashes (from the
2450   // register scavenger) on not-really-valid inputs. For example, if we have:
2451   //   %a = alloca i1
2452   //   %b = bitcast i1* %a to i64*
2453   //   store i64* a, i64 b
2454   // then the store should really be marked as 'align 1', but is not. If it
2455   // were marked as 'align 1' then the indexed form would have been
2456   // instruction-selected initially, and the problem this 'fixup' is preventing
2457   // won't happen regardless.
2458   if (FrameIdx < 0)
2459     return;
2460 
2461   MachineFunction &MF = DAG.getMachineFunction();
2462   MachineFrameInfo &MFI = MF.getFrameInfo();
2463 
2464   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2465     return;
2466 
2467   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2468   FuncInfo->setHasNonRISpills();
2469 }
2470 
2471 /// Returns true if the address N can be represented by a base register plus
2472 /// a signed 16-bit displacement [r+imm], and if it is not better
2473 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2474 /// displacements that are multiples of that value.
2475 bool PPCTargetLowering::SelectAddressRegImm(
2476     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2477     MaybeAlign EncodingAlignment) const {
2478   // FIXME dl should come from parent load or store, not from address
2479   SDLoc dl(N);
2480 
2481   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2482   // a [pc+imm].
2483   if (SelectAddressPCRel(N, Base))
2484     return false;
2485 
2486   // If this can be more profitably realized as r+r, fail.
2487   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2488     return false;
2489 
2490   if (N.getOpcode() == ISD::ADD) {
2491     int16_t imm = 0;
2492     if (isIntS16Immediate(N.getOperand(1), imm) &&
2493         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2494       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2495       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2496         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2497         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2498       } else {
2499         Base = N.getOperand(0);
2500       }
2501       return true; // [r+i]
2502     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2503       // Match LOAD (ADD (X, Lo(G))).
2504       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2505              && "Cannot handle constant offsets yet!");
2506       Disp = N.getOperand(1).getOperand(0);  // The global address.
2507       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2508              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2509              Disp.getOpcode() == ISD::TargetConstantPool ||
2510              Disp.getOpcode() == ISD::TargetJumpTable);
2511       Base = N.getOperand(0);
2512       return true;  // [&g+r]
2513     }
2514   } else if (N.getOpcode() == ISD::OR) {
2515     int16_t imm = 0;
2516     if (isIntS16Immediate(N.getOperand(1), imm) &&
2517         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2518       // If this is an or of disjoint bitfields, we can codegen this as an add
2519       // (for better address arithmetic) if the LHS and RHS of the OR are
2520       // provably disjoint.
2521       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2522 
2523       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2524         // If all of the bits are known zero on the LHS or RHS, the add won't
2525         // carry.
2526         if (FrameIndexSDNode *FI =
2527               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2528           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2529           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2530         } else {
2531           Base = N.getOperand(0);
2532         }
2533         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2534         return true;
2535       }
2536     }
2537   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2538     // Loading from a constant address.
2539 
2540     // If this address fits entirely in a 16-bit sext immediate field, codegen
2541     // this as "d, 0"
2542     int16_t Imm;
2543     if (isIntS16Immediate(CN, Imm) &&
2544         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2545       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2546       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2547                              CN->getValueType(0));
2548       return true;
2549     }
2550 
2551     // Handle 32-bit sext immediates with LIS + addr mode.
2552     if ((CN->getValueType(0) == MVT::i32 ||
2553          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2554         (!EncodingAlignment ||
2555          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2556       int Addr = (int)CN->getZExtValue();
2557 
2558       // Otherwise, break this down into an LIS + disp.
2559       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2560 
2561       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2562                                    MVT::i32);
2563       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2564       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2565       return true;
2566     }
2567   }
2568 
2569   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2570   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2571     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2572     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2573   } else
2574     Base = N;
2575   return true;      // [r+0]
2576 }
2577 
2578 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2579 /// represented as an indexed [r+r] operation.
2580 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2581                                                 SDValue &Index,
2582                                                 SelectionDAG &DAG) const {
2583   // Check to see if we can easily represent this as an [r+r] address.  This
2584   // will fail if it thinks that the address is more profitably represented as
2585   // reg+imm, e.g. where imm = 0.
2586   if (SelectAddressRegReg(N, Base, Index, DAG))
2587     return true;
2588 
2589   // If the address is the result of an add, we will utilize the fact that the
2590   // address calculation includes an implicit add.  However, we can reduce
2591   // register pressure if we do not materialize a constant just for use as the
2592   // index register.  We only get rid of the add if it is not an add of a
2593   // value and a 16-bit signed constant and both have a single use.
2594   int16_t imm = 0;
2595   if (N.getOpcode() == ISD::ADD &&
2596       (!isIntS16Immediate(N.getOperand(1), imm) ||
2597        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2598     Base = N.getOperand(0);
2599     Index = N.getOperand(1);
2600     return true;
2601   }
2602 
2603   // Otherwise, do it the hard way, using R0 as the base register.
2604   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2605                          N.getValueType());
2606   Index = N;
2607   return true;
2608 }
2609 
2610 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2611   Ty *PCRelCand = dyn_cast<Ty>(N);
2612   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2613 }
2614 
2615 /// Returns true if this address is a PC Relative address.
2616 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2617 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2618 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2619   // This is a materialize PC Relative node. Always select this as PC Relative.
2620   Base = N;
2621   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2622     return true;
2623   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2624       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2625       isValidPCRelNode<JumpTableSDNode>(N) ||
2626       isValidPCRelNode<BlockAddressSDNode>(N))
2627     return true;
2628   return false;
2629 }
2630 
2631 /// Returns true if we should use a direct load into vector instruction
2632 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2633 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2634 
2635   // If there are any other uses other than scalar to vector, then we should
2636   // keep it as a scalar load -> direct move pattern to prevent multiple
2637   // loads.
2638   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2639   if (!LD)
2640     return false;
2641 
2642   EVT MemVT = LD->getMemoryVT();
2643   if (!MemVT.isSimple())
2644     return false;
2645   switch(MemVT.getSimpleVT().SimpleTy) {
2646   case MVT::i64:
2647     break;
2648   case MVT::i32:
2649     if (!ST.hasP8Vector())
2650       return false;
2651     break;
2652   case MVT::i16:
2653   case MVT::i8:
2654     if (!ST.hasP9Vector())
2655       return false;
2656     break;
2657   default:
2658     return false;
2659   }
2660 
2661   SDValue LoadedVal(N, 0);
2662   if (!LoadedVal.hasOneUse())
2663     return false;
2664 
2665   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2666        UI != UE; ++UI)
2667     if (UI.getUse().get().getResNo() == 0 &&
2668         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2669         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2670       return false;
2671 
2672   return true;
2673 }
2674 
2675 /// getPreIndexedAddressParts - returns true by value, base pointer and
2676 /// offset pointer and addressing mode by reference if the node's address
2677 /// can be legally represented as pre-indexed load / store address.
2678 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2679                                                   SDValue &Offset,
2680                                                   ISD::MemIndexedMode &AM,
2681                                                   SelectionDAG &DAG) const {
2682   if (DisablePPCPreinc) return false;
2683 
2684   bool isLoad = true;
2685   SDValue Ptr;
2686   EVT VT;
2687   unsigned Alignment;
2688   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2689     Ptr = LD->getBasePtr();
2690     VT = LD->getMemoryVT();
2691     Alignment = LD->getAlignment();
2692   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2693     Ptr = ST->getBasePtr();
2694     VT  = ST->getMemoryVT();
2695     Alignment = ST->getAlignment();
2696     isLoad = false;
2697   } else
2698     return false;
2699 
2700   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2701   // instructions because we can fold these into a more efficient instruction
2702   // instead, (such as LXSD).
2703   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2704     return false;
2705   }
2706 
2707   // PowerPC doesn't have preinc load/store instructions for vectors
2708   if (VT.isVector())
2709     return false;
2710 
2711   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2712     // Common code will reject creating a pre-inc form if the base pointer
2713     // is a frame index, or if N is a store and the base pointer is either
2714     // the same as or a predecessor of the value being stored.  Check for
2715     // those situations here, and try with swapped Base/Offset instead.
2716     bool Swap = false;
2717 
2718     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2719       Swap = true;
2720     else if (!isLoad) {
2721       SDValue Val = cast<StoreSDNode>(N)->getValue();
2722       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2723         Swap = true;
2724     }
2725 
2726     if (Swap)
2727       std::swap(Base, Offset);
2728 
2729     AM = ISD::PRE_INC;
2730     return true;
2731   }
2732 
2733   // LDU/STU can only handle immediates that are a multiple of 4.
2734   if (VT != MVT::i64) {
2735     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2736       return false;
2737   } else {
2738     // LDU/STU need an address with at least 4-byte alignment.
2739     if (Alignment < 4)
2740       return false;
2741 
2742     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2743       return false;
2744   }
2745 
2746   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2747     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2748     // sext i32 to i64 when addr mode is r+i.
2749     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2750         LD->getExtensionType() == ISD::SEXTLOAD &&
2751         isa<ConstantSDNode>(Offset))
2752       return false;
2753   }
2754 
2755   AM = ISD::PRE_INC;
2756   return true;
2757 }
2758 
2759 //===----------------------------------------------------------------------===//
2760 //  LowerOperation implementation
2761 //===----------------------------------------------------------------------===//
2762 
2763 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2764 /// and LoOpFlags to the target MO flags.
2765 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2766                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2767                                const GlobalValue *GV = nullptr) {
2768   HiOpFlags = PPCII::MO_HA;
2769   LoOpFlags = PPCII::MO_LO;
2770 
2771   // Don't use the pic base if not in PIC relocation model.
2772   if (IsPIC) {
2773     HiOpFlags |= PPCII::MO_PIC_FLAG;
2774     LoOpFlags |= PPCII::MO_PIC_FLAG;
2775   }
2776 }
2777 
2778 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2779                              SelectionDAG &DAG) {
2780   SDLoc DL(HiPart);
2781   EVT PtrVT = HiPart.getValueType();
2782   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2783 
2784   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2785   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2786 
2787   // With PIC, the first instruction is actually "GR+hi(&G)".
2788   if (isPIC)
2789     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2790                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2791 
2792   // Generate non-pic code that has direct accesses to the constant pool.
2793   // The address of the global is just (hi(&g)+lo(&g)).
2794   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2795 }
2796 
2797 static void setUsesTOCBasePtr(MachineFunction &MF) {
2798   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2799   FuncInfo->setUsesTOCBasePtr();
2800 }
2801 
2802 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2803   setUsesTOCBasePtr(DAG.getMachineFunction());
2804 }
2805 
2806 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2807                                        SDValue GA) const {
2808   const bool Is64Bit = Subtarget.isPPC64();
2809   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2810   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2811                         : Subtarget.isAIXABI()
2812                               ? DAG.getRegister(PPC::R2, VT)
2813                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2814   SDValue Ops[] = { GA, Reg };
2815   return DAG.getMemIntrinsicNode(
2816       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2817       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2818       MachineMemOperand::MOLoad);
2819 }
2820 
2821 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2822                                              SelectionDAG &DAG) const {
2823   EVT PtrVT = Op.getValueType();
2824   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2825   const Constant *C = CP->getConstVal();
2826 
2827   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2828   // The actual address of the GlobalValue is stored in the TOC.
2829   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2830     if (Subtarget.isUsingPCRelativeCalls()) {
2831       SDLoc DL(CP);
2832       EVT Ty = getPointerTy(DAG.getDataLayout());
2833       SDValue ConstPool = DAG.getTargetConstantPool(
2834           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2835       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2836     }
2837     setUsesTOCBasePtr(DAG);
2838     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2839     return getTOCEntry(DAG, SDLoc(CP), GA);
2840   }
2841 
2842   unsigned MOHiFlag, MOLoFlag;
2843   bool IsPIC = isPositionIndependent();
2844   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2845 
2846   if (IsPIC && Subtarget.isSVR4ABI()) {
2847     SDValue GA =
2848         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2849     return getTOCEntry(DAG, SDLoc(CP), GA);
2850   }
2851 
2852   SDValue CPIHi =
2853       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2854   SDValue CPILo =
2855       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2856   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2857 }
2858 
2859 // For 64-bit PowerPC, prefer the more compact relative encodings.
2860 // This trades 32 bits per jump table entry for one or two instructions
2861 // on the jump site.
2862 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2863   if (isJumpTableRelative())
2864     return MachineJumpTableInfo::EK_LabelDifference32;
2865 
2866   return TargetLowering::getJumpTableEncoding();
2867 }
2868 
2869 bool PPCTargetLowering::isJumpTableRelative() const {
2870   if (UseAbsoluteJumpTables)
2871     return false;
2872   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2873     return true;
2874   return TargetLowering::isJumpTableRelative();
2875 }
2876 
2877 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2878                                                     SelectionDAG &DAG) const {
2879   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2880     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2881 
2882   switch (getTargetMachine().getCodeModel()) {
2883   case CodeModel::Small:
2884   case CodeModel::Medium:
2885     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2886   default:
2887     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2888                        getPointerTy(DAG.getDataLayout()));
2889   }
2890 }
2891 
2892 const MCExpr *
2893 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2894                                                 unsigned JTI,
2895                                                 MCContext &Ctx) const {
2896   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2897     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2898 
2899   switch (getTargetMachine().getCodeModel()) {
2900   case CodeModel::Small:
2901   case CodeModel::Medium:
2902     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2903   default:
2904     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2905   }
2906 }
2907 
2908 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2909   EVT PtrVT = Op.getValueType();
2910   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2911 
2912   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2913   if (Subtarget.isUsingPCRelativeCalls()) {
2914     SDLoc DL(JT);
2915     EVT Ty = getPointerTy(DAG.getDataLayout());
2916     SDValue GA =
2917         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2918     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2919     return MatAddr;
2920   }
2921 
2922   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2923   // The actual address of the GlobalValue is stored in the TOC.
2924   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2925     setUsesTOCBasePtr(DAG);
2926     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2927     return getTOCEntry(DAG, SDLoc(JT), GA);
2928   }
2929 
2930   unsigned MOHiFlag, MOLoFlag;
2931   bool IsPIC = isPositionIndependent();
2932   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2933 
2934   if (IsPIC && Subtarget.isSVR4ABI()) {
2935     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2936                                         PPCII::MO_PIC_FLAG);
2937     return getTOCEntry(DAG, SDLoc(GA), GA);
2938   }
2939 
2940   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2941   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2942   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2943 }
2944 
2945 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2946                                              SelectionDAG &DAG) const {
2947   EVT PtrVT = Op.getValueType();
2948   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2949   const BlockAddress *BA = BASDN->getBlockAddress();
2950 
2951   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2952   if (Subtarget.isUsingPCRelativeCalls()) {
2953     SDLoc DL(BASDN);
2954     EVT Ty = getPointerTy(DAG.getDataLayout());
2955     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
2956                                            PPCII::MO_PCREL_FLAG);
2957     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2958     return MatAddr;
2959   }
2960 
2961   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2962   // The actual BlockAddress is stored in the TOC.
2963   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2964     setUsesTOCBasePtr(DAG);
2965     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2966     return getTOCEntry(DAG, SDLoc(BASDN), GA);
2967   }
2968 
2969   // 32-bit position-independent ELF stores the BlockAddress in the .got.
2970   if (Subtarget.is32BitELFABI() && isPositionIndependent())
2971     return getTOCEntry(
2972         DAG, SDLoc(BASDN),
2973         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2974 
2975   unsigned MOHiFlag, MOLoFlag;
2976   bool IsPIC = isPositionIndependent();
2977   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2978   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2979   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2980   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2981 }
2982 
2983 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2984                                               SelectionDAG &DAG) const {
2985   // FIXME: TLS addresses currently use medium model code sequences,
2986   // which is the most useful form.  Eventually support for small and
2987   // large models could be added if users need it, at the cost of
2988   // additional complexity.
2989   if (Subtarget.isUsingPCRelativeCalls() && !EnablePPCPCRelTLS)
2990     report_fatal_error("Thread local storage is not supported with pc-relative"
2991                        " addressing - please compile with -mno-pcrel");
2992   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2993   if (DAG.getTarget().useEmulatedTLS())
2994     return LowerToTLSEmulatedModel(GA, DAG);
2995 
2996   SDLoc dl(GA);
2997   const GlobalValue *GV = GA->getGlobal();
2998   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2999   bool is64bit = Subtarget.isPPC64();
3000   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3001   PICLevel::Level picLevel = M->getPICLevel();
3002 
3003   const TargetMachine &TM = getTargetMachine();
3004   TLSModel::Model Model = TM.getTLSModel(GV);
3005 
3006   if (Model == TLSModel::LocalExec) {
3007     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3008                                                PPCII::MO_TPREL_HA);
3009     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3010                                                PPCII::MO_TPREL_LO);
3011     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3012                              : DAG.getRegister(PPC::R2, MVT::i32);
3013 
3014     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3015     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3016   }
3017 
3018   if (Model == TLSModel::InitialExec) {
3019     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3020     SDValue TGA = DAG.getTargetGlobalAddress(
3021         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3022     SDValue TGATLS = DAG.getTargetGlobalAddress(
3023         GV, dl, PtrVT, 0,
3024         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3025     SDValue TPOffset;
3026     if (IsPCRel) {
3027       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3028       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3029                              MachinePointerInfo());
3030     } else {
3031       SDValue GOTPtr;
3032       if (is64bit) {
3033         setUsesTOCBasePtr(DAG);
3034         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3035         GOTPtr =
3036             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3037       } else {
3038         if (!TM.isPositionIndependent())
3039           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3040         else if (picLevel == PICLevel::SmallPIC)
3041           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3042         else
3043           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3044       }
3045       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3046     }
3047     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3048   }
3049 
3050   if (Model == TLSModel::GeneralDynamic) {
3051     if (Subtarget.isUsingPCRelativeCalls()) {
3052       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3053                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3054       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3055     }
3056 
3057     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3058     SDValue GOTPtr;
3059     if (is64bit) {
3060       setUsesTOCBasePtr(DAG);
3061       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3062       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3063                                    GOTReg, TGA);
3064     } else {
3065       if (picLevel == PICLevel::SmallPIC)
3066         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3067       else
3068         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3069     }
3070     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3071                        GOTPtr, TGA, TGA);
3072   }
3073 
3074   if (Model == TLSModel::LocalDynamic) {
3075     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3076     SDValue GOTPtr;
3077     if (is64bit) {
3078       setUsesTOCBasePtr(DAG);
3079       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3080       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3081                            GOTReg, TGA);
3082     } else {
3083       if (picLevel == PICLevel::SmallPIC)
3084         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3085       else
3086         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3087     }
3088     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3089                                   PtrVT, GOTPtr, TGA, TGA);
3090     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3091                                       PtrVT, TLSAddr, TGA);
3092     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3093   }
3094 
3095   llvm_unreachable("Unknown TLS model!");
3096 }
3097 
3098 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3099                                               SelectionDAG &DAG) const {
3100   EVT PtrVT = Op.getValueType();
3101   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3102   SDLoc DL(GSDN);
3103   const GlobalValue *GV = GSDN->getGlobal();
3104 
3105   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3106   // The actual address of the GlobalValue is stored in the TOC.
3107   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3108     if (Subtarget.isUsingPCRelativeCalls()) {
3109       EVT Ty = getPointerTy(DAG.getDataLayout());
3110       if (isAccessedAsGotIndirect(Op)) {
3111         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3112                                                 PPCII::MO_PCREL_FLAG |
3113                                                     PPCII::MO_GOT_FLAG);
3114         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3115         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3116                                    MachinePointerInfo());
3117         return Load;
3118       } else {
3119         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3120                                                 PPCII::MO_PCREL_FLAG);
3121         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3122       }
3123     }
3124     setUsesTOCBasePtr(DAG);
3125     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3126     return getTOCEntry(DAG, DL, GA);
3127   }
3128 
3129   unsigned MOHiFlag, MOLoFlag;
3130   bool IsPIC = isPositionIndependent();
3131   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3132 
3133   if (IsPIC && Subtarget.isSVR4ABI()) {
3134     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3135                                             GSDN->getOffset(),
3136                                             PPCII::MO_PIC_FLAG);
3137     return getTOCEntry(DAG, DL, GA);
3138   }
3139 
3140   SDValue GAHi =
3141     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3142   SDValue GALo =
3143     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3144 
3145   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3146 }
3147 
3148 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3149   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3150   SDLoc dl(Op);
3151 
3152   if (Op.getValueType() == MVT::v2i64) {
3153     // When the operands themselves are v2i64 values, we need to do something
3154     // special because VSX has no underlying comparison operations for these.
3155     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3156       // Equality can be handled by casting to the legal type for Altivec
3157       // comparisons, everything else needs to be expanded.
3158       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3159         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3160                  DAG.getSetCC(dl, MVT::v4i32,
3161                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3162                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3163                    CC));
3164       }
3165 
3166       return SDValue();
3167     }
3168 
3169     // We handle most of these in the usual way.
3170     return Op;
3171   }
3172 
3173   // If we're comparing for equality to zero, expose the fact that this is
3174   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3175   // fold the new nodes.
3176   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3177     return V;
3178 
3179   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3180     // Leave comparisons against 0 and -1 alone for now, since they're usually
3181     // optimized.  FIXME: revisit this when we can custom lower all setcc
3182     // optimizations.
3183     if (C->isAllOnesValue() || C->isNullValue())
3184       return SDValue();
3185   }
3186 
3187   // If we have an integer seteq/setne, turn it into a compare against zero
3188   // by xor'ing the rhs with the lhs, which is faster than setting a
3189   // condition register, reading it back out, and masking the correct bit.  The
3190   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3191   // the result to other bit-twiddling opportunities.
3192   EVT LHSVT = Op.getOperand(0).getValueType();
3193   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3194     EVT VT = Op.getValueType();
3195     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3196                                 Op.getOperand(1));
3197     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3198   }
3199   return SDValue();
3200 }
3201 
3202 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3203   SDNode *Node = Op.getNode();
3204   EVT VT = Node->getValueType(0);
3205   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3206   SDValue InChain = Node->getOperand(0);
3207   SDValue VAListPtr = Node->getOperand(1);
3208   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3209   SDLoc dl(Node);
3210 
3211   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3212 
3213   // gpr_index
3214   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3215                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3216   InChain = GprIndex.getValue(1);
3217 
3218   if (VT == MVT::i64) {
3219     // Check if GprIndex is even
3220     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3221                                  DAG.getConstant(1, dl, MVT::i32));
3222     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3223                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3224     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3225                                           DAG.getConstant(1, dl, MVT::i32));
3226     // Align GprIndex to be even if it isn't
3227     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3228                            GprIndex);
3229   }
3230 
3231   // fpr index is 1 byte after gpr
3232   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3233                                DAG.getConstant(1, dl, MVT::i32));
3234 
3235   // fpr
3236   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3237                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3238   InChain = FprIndex.getValue(1);
3239 
3240   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3241                                        DAG.getConstant(8, dl, MVT::i32));
3242 
3243   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3244                                         DAG.getConstant(4, dl, MVT::i32));
3245 
3246   // areas
3247   SDValue OverflowArea =
3248       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3249   InChain = OverflowArea.getValue(1);
3250 
3251   SDValue RegSaveArea =
3252       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3253   InChain = RegSaveArea.getValue(1);
3254 
3255   // select overflow_area if index > 8
3256   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3257                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3258 
3259   // adjustment constant gpr_index * 4/8
3260   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3261                                     VT.isInteger() ? GprIndex : FprIndex,
3262                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3263                                                     MVT::i32));
3264 
3265   // OurReg = RegSaveArea + RegConstant
3266   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3267                                RegConstant);
3268 
3269   // Floating types are 32 bytes into RegSaveArea
3270   if (VT.isFloatingPoint())
3271     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3272                          DAG.getConstant(32, dl, MVT::i32));
3273 
3274   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3275   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3276                                    VT.isInteger() ? GprIndex : FprIndex,
3277                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3278                                                    MVT::i32));
3279 
3280   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3281                               VT.isInteger() ? VAListPtr : FprPtr,
3282                               MachinePointerInfo(SV), MVT::i8);
3283 
3284   // determine if we should load from reg_save_area or overflow_area
3285   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3286 
3287   // increase overflow_area by 4/8 if gpr/fpr > 8
3288   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3289                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3290                                           dl, MVT::i32));
3291 
3292   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3293                              OverflowAreaPlusN);
3294 
3295   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3296                               MachinePointerInfo(), MVT::i32);
3297 
3298   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3299 }
3300 
3301 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3302   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3303 
3304   // We have to copy the entire va_list struct:
3305   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3306   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3307                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3308                        false, true, false, MachinePointerInfo(),
3309                        MachinePointerInfo());
3310 }
3311 
3312 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3313                                                   SelectionDAG &DAG) const {
3314   if (Subtarget.isAIXABI())
3315     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3316 
3317   return Op.getOperand(0);
3318 }
3319 
3320 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3321                                                 SelectionDAG &DAG) const {
3322   if (Subtarget.isAIXABI())
3323     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3324 
3325   SDValue Chain = Op.getOperand(0);
3326   SDValue Trmp = Op.getOperand(1); // trampoline
3327   SDValue FPtr = Op.getOperand(2); // nested function
3328   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3329   SDLoc dl(Op);
3330 
3331   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3332   bool isPPC64 = (PtrVT == MVT::i64);
3333   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3334 
3335   TargetLowering::ArgListTy Args;
3336   TargetLowering::ArgListEntry Entry;
3337 
3338   Entry.Ty = IntPtrTy;
3339   Entry.Node = Trmp; Args.push_back(Entry);
3340 
3341   // TrampSize == (isPPC64 ? 48 : 40);
3342   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3343                                isPPC64 ? MVT::i64 : MVT::i32);
3344   Args.push_back(Entry);
3345 
3346   Entry.Node = FPtr; Args.push_back(Entry);
3347   Entry.Node = Nest; Args.push_back(Entry);
3348 
3349   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3350   TargetLowering::CallLoweringInfo CLI(DAG);
3351   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3352       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3353       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3354 
3355   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3356   return CallResult.second;
3357 }
3358 
3359 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3360   MachineFunction &MF = DAG.getMachineFunction();
3361   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3362   EVT PtrVT = getPointerTy(MF.getDataLayout());
3363 
3364   SDLoc dl(Op);
3365 
3366   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3367     // vastart just stores the address of the VarArgsFrameIndex slot into the
3368     // memory location argument.
3369     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3370     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3371     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3372                         MachinePointerInfo(SV));
3373   }
3374 
3375   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3376   // We suppose the given va_list is already allocated.
3377   //
3378   // typedef struct {
3379   //  char gpr;     /* index into the array of 8 GPRs
3380   //                 * stored in the register save area
3381   //                 * gpr=0 corresponds to r3,
3382   //                 * gpr=1 to r4, etc.
3383   //                 */
3384   //  char fpr;     /* index into the array of 8 FPRs
3385   //                 * stored in the register save area
3386   //                 * fpr=0 corresponds to f1,
3387   //                 * fpr=1 to f2, etc.
3388   //                 */
3389   //  char *overflow_arg_area;
3390   //                /* location on stack that holds
3391   //                 * the next overflow argument
3392   //                 */
3393   //  char *reg_save_area;
3394   //               /* where r3:r10 and f1:f8 (if saved)
3395   //                * are stored
3396   //                */
3397   // } va_list[1];
3398 
3399   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3400   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3401   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3402                                             PtrVT);
3403   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3404                                  PtrVT);
3405 
3406   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3407   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3408 
3409   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3410   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3411 
3412   uint64_t FPROffset = 1;
3413   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3414 
3415   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3416 
3417   // Store first byte : number of int regs
3418   SDValue firstStore =
3419       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3420                         MachinePointerInfo(SV), MVT::i8);
3421   uint64_t nextOffset = FPROffset;
3422   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3423                                   ConstFPROffset);
3424 
3425   // Store second byte : number of float regs
3426   SDValue secondStore =
3427       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3428                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3429   nextOffset += StackOffset;
3430   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3431 
3432   // Store second word : arguments given on stack
3433   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3434                                     MachinePointerInfo(SV, nextOffset));
3435   nextOffset += FrameOffset;
3436   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3437 
3438   // Store third word : arguments given in registers
3439   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3440                       MachinePointerInfo(SV, nextOffset));
3441 }
3442 
3443 /// FPR - The set of FP registers that should be allocated for arguments
3444 /// on Darwin and AIX.
3445 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3446                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3447                                 PPC::F11, PPC::F12, PPC::F13};
3448 
3449 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3450 /// the stack.
3451 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3452                                        unsigned PtrByteSize) {
3453   unsigned ArgSize = ArgVT.getStoreSize();
3454   if (Flags.isByVal())
3455     ArgSize = Flags.getByValSize();
3456 
3457   // Round up to multiples of the pointer size, except for array members,
3458   // which are always packed.
3459   if (!Flags.isInConsecutiveRegs())
3460     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3461 
3462   return ArgSize;
3463 }
3464 
3465 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3466 /// on the stack.
3467 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3468                                          ISD::ArgFlagsTy Flags,
3469                                          unsigned PtrByteSize) {
3470   Align Alignment(PtrByteSize);
3471 
3472   // Altivec parameters are padded to a 16 byte boundary.
3473   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3474       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3475       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3476       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3477     Alignment = Align(16);
3478 
3479   // ByVal parameters are aligned as requested.
3480   if (Flags.isByVal()) {
3481     auto BVAlign = Flags.getNonZeroByValAlign();
3482     if (BVAlign > PtrByteSize) {
3483       if (BVAlign.value() % PtrByteSize != 0)
3484         llvm_unreachable(
3485             "ByVal alignment is not a multiple of the pointer size");
3486 
3487       Alignment = BVAlign;
3488     }
3489   }
3490 
3491   // Array members are always packed to their original alignment.
3492   if (Flags.isInConsecutiveRegs()) {
3493     // If the array member was split into multiple registers, the first
3494     // needs to be aligned to the size of the full type.  (Except for
3495     // ppcf128, which is only aligned as its f64 components.)
3496     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3497       Alignment = Align(OrigVT.getStoreSize());
3498     else
3499       Alignment = Align(ArgVT.getStoreSize());
3500   }
3501 
3502   return Alignment;
3503 }
3504 
3505 /// CalculateStackSlotUsed - Return whether this argument will use its
3506 /// stack slot (instead of being passed in registers).  ArgOffset,
3507 /// AvailableFPRs, and AvailableVRs must hold the current argument
3508 /// position, and will be updated to account for this argument.
3509 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3510                                    unsigned PtrByteSize, unsigned LinkageSize,
3511                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3512                                    unsigned &AvailableFPRs,
3513                                    unsigned &AvailableVRs) {
3514   bool UseMemory = false;
3515 
3516   // Respect alignment of argument on the stack.
3517   Align Alignment =
3518       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3519   ArgOffset = alignTo(ArgOffset, Alignment);
3520   // If there's no space left in the argument save area, we must
3521   // use memory (this check also catches zero-sized arguments).
3522   if (ArgOffset >= LinkageSize + ParamAreaSize)
3523     UseMemory = true;
3524 
3525   // Allocate argument on the stack.
3526   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3527   if (Flags.isInConsecutiveRegsLast())
3528     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3529   // If we overran the argument save area, we must use memory
3530   // (this check catches arguments passed partially in memory)
3531   if (ArgOffset > LinkageSize + ParamAreaSize)
3532     UseMemory = true;
3533 
3534   // However, if the argument is actually passed in an FPR or a VR,
3535   // we don't use memory after all.
3536   if (!Flags.isByVal()) {
3537     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3538       if (AvailableFPRs > 0) {
3539         --AvailableFPRs;
3540         return false;
3541       }
3542     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3543         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3544         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3545         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3546       if (AvailableVRs > 0) {
3547         --AvailableVRs;
3548         return false;
3549       }
3550   }
3551 
3552   return UseMemory;
3553 }
3554 
3555 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3556 /// ensure minimum alignment required for target.
3557 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3558                                      unsigned NumBytes) {
3559   return alignTo(NumBytes, Lowering->getStackAlign());
3560 }
3561 
3562 SDValue PPCTargetLowering::LowerFormalArguments(
3563     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3564     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3565     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3566   if (Subtarget.isAIXABI())
3567     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3568                                     InVals);
3569   if (Subtarget.is64BitELFABI())
3570     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3571                                        InVals);
3572   if (Subtarget.is32BitELFABI())
3573     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3574                                        InVals);
3575 
3576   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3577                                      InVals);
3578 }
3579 
3580 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3581     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3582     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3583     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3584 
3585   // 32-bit SVR4 ABI Stack Frame Layout:
3586   //              +-----------------------------------+
3587   //        +-->  |            Back chain             |
3588   //        |     +-----------------------------------+
3589   //        |     | Floating-point register save area |
3590   //        |     +-----------------------------------+
3591   //        |     |    General register save area     |
3592   //        |     +-----------------------------------+
3593   //        |     |          CR save word             |
3594   //        |     +-----------------------------------+
3595   //        |     |         VRSAVE save word          |
3596   //        |     +-----------------------------------+
3597   //        |     |         Alignment padding         |
3598   //        |     +-----------------------------------+
3599   //        |     |     Vector register save area     |
3600   //        |     +-----------------------------------+
3601   //        |     |       Local variable space        |
3602   //        |     +-----------------------------------+
3603   //        |     |        Parameter list area        |
3604   //        |     +-----------------------------------+
3605   //        |     |           LR save word            |
3606   //        |     +-----------------------------------+
3607   // SP-->  +---  |            Back chain             |
3608   //              +-----------------------------------+
3609   //
3610   // Specifications:
3611   //   System V Application Binary Interface PowerPC Processor Supplement
3612   //   AltiVec Technology Programming Interface Manual
3613 
3614   MachineFunction &MF = DAG.getMachineFunction();
3615   MachineFrameInfo &MFI = MF.getFrameInfo();
3616   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3617 
3618   EVT PtrVT = getPointerTy(MF.getDataLayout());
3619   // Potential tail calls could cause overwriting of argument stack slots.
3620   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3621                        (CallConv == CallingConv::Fast));
3622   const Align PtrAlign(4);
3623 
3624   // Assign locations to all of the incoming arguments.
3625   SmallVector<CCValAssign, 16> ArgLocs;
3626   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3627                  *DAG.getContext());
3628 
3629   // Reserve space for the linkage area on the stack.
3630   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3631   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3632   if (useSoftFloat())
3633     CCInfo.PreAnalyzeFormalArguments(Ins);
3634 
3635   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3636   CCInfo.clearWasPPCF128();
3637 
3638   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3639     CCValAssign &VA = ArgLocs[i];
3640 
3641     // Arguments stored in registers.
3642     if (VA.isRegLoc()) {
3643       const TargetRegisterClass *RC;
3644       EVT ValVT = VA.getValVT();
3645 
3646       switch (ValVT.getSimpleVT().SimpleTy) {
3647         default:
3648           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3649         case MVT::i1:
3650         case MVT::i32:
3651           RC = &PPC::GPRCRegClass;
3652           break;
3653         case MVT::f32:
3654           if (Subtarget.hasP8Vector())
3655             RC = &PPC::VSSRCRegClass;
3656           else if (Subtarget.hasSPE())
3657             RC = &PPC::GPRCRegClass;
3658           else
3659             RC = &PPC::F4RCRegClass;
3660           break;
3661         case MVT::f64:
3662           if (Subtarget.hasVSX())
3663             RC = &PPC::VSFRCRegClass;
3664           else if (Subtarget.hasSPE())
3665             // SPE passes doubles in GPR pairs.
3666             RC = &PPC::GPRCRegClass;
3667           else
3668             RC = &PPC::F8RCRegClass;
3669           break;
3670         case MVT::v16i8:
3671         case MVT::v8i16:
3672         case MVT::v4i32:
3673           RC = &PPC::VRRCRegClass;
3674           break;
3675         case MVT::v4f32:
3676           RC = &PPC::VRRCRegClass;
3677           break;
3678         case MVT::v2f64:
3679         case MVT::v2i64:
3680           RC = &PPC::VRRCRegClass;
3681           break;
3682       }
3683 
3684       SDValue ArgValue;
3685       // Transform the arguments stored in physical registers into
3686       // virtual ones.
3687       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3688         assert(i + 1 < e && "No second half of double precision argument");
3689         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3690         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3691         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3692         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3693         if (!Subtarget.isLittleEndian())
3694           std::swap (ArgValueLo, ArgValueHi);
3695         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3696                                ArgValueHi);
3697       } else {
3698         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3699         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3700                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3701         if (ValVT == MVT::i1)
3702           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3703       }
3704 
3705       InVals.push_back(ArgValue);
3706     } else {
3707       // Argument stored in memory.
3708       assert(VA.isMemLoc());
3709 
3710       // Get the extended size of the argument type in stack
3711       unsigned ArgSize = VA.getLocVT().getStoreSize();
3712       // Get the actual size of the argument type
3713       unsigned ObjSize = VA.getValVT().getStoreSize();
3714       unsigned ArgOffset = VA.getLocMemOffset();
3715       // Stack objects in PPC32 are right justified.
3716       ArgOffset += ArgSize - ObjSize;
3717       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3718 
3719       // Create load nodes to retrieve arguments from the stack.
3720       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3721       InVals.push_back(
3722           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3723     }
3724   }
3725 
3726   // Assign locations to all of the incoming aggregate by value arguments.
3727   // Aggregates passed by value are stored in the local variable space of the
3728   // caller's stack frame, right above the parameter list area.
3729   SmallVector<CCValAssign, 16> ByValArgLocs;
3730   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3731                       ByValArgLocs, *DAG.getContext());
3732 
3733   // Reserve stack space for the allocations in CCInfo.
3734   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3735 
3736   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3737 
3738   // Area that is at least reserved in the caller of this function.
3739   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3740   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3741 
3742   // Set the size that is at least reserved in caller of this function.  Tail
3743   // call optimized function's reserved stack space needs to be aligned so that
3744   // taking the difference between two stack areas will result in an aligned
3745   // stack.
3746   MinReservedArea =
3747       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3748   FuncInfo->setMinReservedArea(MinReservedArea);
3749 
3750   SmallVector<SDValue, 8> MemOps;
3751 
3752   // If the function takes variable number of arguments, make a frame index for
3753   // the start of the first vararg value... for expansion of llvm.va_start.
3754   if (isVarArg) {
3755     static const MCPhysReg GPArgRegs[] = {
3756       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3757       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3758     };
3759     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3760 
3761     static const MCPhysReg FPArgRegs[] = {
3762       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3763       PPC::F8
3764     };
3765     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3766 
3767     if (useSoftFloat() || hasSPE())
3768        NumFPArgRegs = 0;
3769 
3770     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3771     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3772 
3773     // Make room for NumGPArgRegs and NumFPArgRegs.
3774     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3775                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3776 
3777     FuncInfo->setVarArgsStackOffset(
3778       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3779                             CCInfo.getNextStackOffset(), true));
3780 
3781     FuncInfo->setVarArgsFrameIndex(
3782         MFI.CreateStackObject(Depth, Align(8), false));
3783     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3784 
3785     // The fixed integer arguments of a variadic function are stored to the
3786     // VarArgsFrameIndex on the stack so that they may be loaded by
3787     // dereferencing the result of va_next.
3788     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3789       // Get an existing live-in vreg, or add a new one.
3790       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3791       if (!VReg)
3792         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3793 
3794       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3795       SDValue Store =
3796           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3797       MemOps.push_back(Store);
3798       // Increment the address by four for the next argument to store
3799       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3800       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3801     }
3802 
3803     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3804     // is set.
3805     // The double arguments are stored to the VarArgsFrameIndex
3806     // on the stack.
3807     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3808       // Get an existing live-in vreg, or add a new one.
3809       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3810       if (!VReg)
3811         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3812 
3813       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3814       SDValue Store =
3815           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3816       MemOps.push_back(Store);
3817       // Increment the address by eight for the next argument to store
3818       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3819                                          PtrVT);
3820       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3821     }
3822   }
3823 
3824   if (!MemOps.empty())
3825     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3826 
3827   return Chain;
3828 }
3829 
3830 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3831 // value to MVT::i64 and then truncate to the correct register size.
3832 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3833                                              EVT ObjectVT, SelectionDAG &DAG,
3834                                              SDValue ArgVal,
3835                                              const SDLoc &dl) const {
3836   if (Flags.isSExt())
3837     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3838                          DAG.getValueType(ObjectVT));
3839   else if (Flags.isZExt())
3840     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3841                          DAG.getValueType(ObjectVT));
3842 
3843   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3844 }
3845 
3846 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3847     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3848     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3849     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3850   // TODO: add description of PPC stack frame format, or at least some docs.
3851   //
3852   bool isELFv2ABI = Subtarget.isELFv2ABI();
3853   bool isLittleEndian = Subtarget.isLittleEndian();
3854   MachineFunction &MF = DAG.getMachineFunction();
3855   MachineFrameInfo &MFI = MF.getFrameInfo();
3856   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3857 
3858   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3859          "fastcc not supported on varargs functions");
3860 
3861   EVT PtrVT = getPointerTy(MF.getDataLayout());
3862   // Potential tail calls could cause overwriting of argument stack slots.
3863   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3864                        (CallConv == CallingConv::Fast));
3865   unsigned PtrByteSize = 8;
3866   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3867 
3868   static const MCPhysReg GPR[] = {
3869     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3870     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3871   };
3872   static const MCPhysReg VR[] = {
3873     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3874     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3875   };
3876 
3877   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3878   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3879   const unsigned Num_VR_Regs  = array_lengthof(VR);
3880 
3881   // Do a first pass over the arguments to determine whether the ABI
3882   // guarantees that our caller has allocated the parameter save area
3883   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3884   // in the ELFv2 ABI, it is true if this is a vararg function or if
3885   // any parameter is located in a stack slot.
3886 
3887   bool HasParameterArea = !isELFv2ABI || isVarArg;
3888   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3889   unsigned NumBytes = LinkageSize;
3890   unsigned AvailableFPRs = Num_FPR_Regs;
3891   unsigned AvailableVRs = Num_VR_Regs;
3892   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3893     if (Ins[i].Flags.isNest())
3894       continue;
3895 
3896     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3897                                PtrByteSize, LinkageSize, ParamAreaSize,
3898                                NumBytes, AvailableFPRs, AvailableVRs))
3899       HasParameterArea = true;
3900   }
3901 
3902   // Add DAG nodes to load the arguments or copy them out of registers.  On
3903   // entry to a function on PPC, the arguments start after the linkage area,
3904   // although the first ones are often in registers.
3905 
3906   unsigned ArgOffset = LinkageSize;
3907   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3908   SmallVector<SDValue, 8> MemOps;
3909   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3910   unsigned CurArgIdx = 0;
3911   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3912     SDValue ArgVal;
3913     bool needsLoad = false;
3914     EVT ObjectVT = Ins[ArgNo].VT;
3915     EVT OrigVT = Ins[ArgNo].ArgVT;
3916     unsigned ObjSize = ObjectVT.getStoreSize();
3917     unsigned ArgSize = ObjSize;
3918     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3919     if (Ins[ArgNo].isOrigArg()) {
3920       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3921       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3922     }
3923     // We re-align the argument offset for each argument, except when using the
3924     // fast calling convention, when we need to make sure we do that only when
3925     // we'll actually use a stack slot.
3926     unsigned CurArgOffset;
3927     Align Alignment;
3928     auto ComputeArgOffset = [&]() {
3929       /* Respect alignment of argument on the stack.  */
3930       Alignment =
3931           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3932       ArgOffset = alignTo(ArgOffset, Alignment);
3933       CurArgOffset = ArgOffset;
3934     };
3935 
3936     if (CallConv != CallingConv::Fast) {
3937       ComputeArgOffset();
3938 
3939       /* Compute GPR index associated with argument offset.  */
3940       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3941       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3942     }
3943 
3944     // FIXME the codegen can be much improved in some cases.
3945     // We do not have to keep everything in memory.
3946     if (Flags.isByVal()) {
3947       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3948 
3949       if (CallConv == CallingConv::Fast)
3950         ComputeArgOffset();
3951 
3952       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3953       ObjSize = Flags.getByValSize();
3954       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3955       // Empty aggregate parameters do not take up registers.  Examples:
3956       //   struct { } a;
3957       //   union  { } b;
3958       //   int c[0];
3959       // etc.  However, we have to provide a place-holder in InVals, so
3960       // pretend we have an 8-byte item at the current address for that
3961       // purpose.
3962       if (!ObjSize) {
3963         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3964         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3965         InVals.push_back(FIN);
3966         continue;
3967       }
3968 
3969       // Create a stack object covering all stack doublewords occupied
3970       // by the argument.  If the argument is (fully or partially) on
3971       // the stack, or if the argument is fully in registers but the
3972       // caller has allocated the parameter save anyway, we can refer
3973       // directly to the caller's stack frame.  Otherwise, create a
3974       // local copy in our own frame.
3975       int FI;
3976       if (HasParameterArea ||
3977           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3978         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3979       else
3980         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
3981       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3982 
3983       // Handle aggregates smaller than 8 bytes.
3984       if (ObjSize < PtrByteSize) {
3985         // The value of the object is its address, which differs from the
3986         // address of the enclosing doubleword on big-endian systems.
3987         SDValue Arg = FIN;
3988         if (!isLittleEndian) {
3989           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3990           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3991         }
3992         InVals.push_back(Arg);
3993 
3994         if (GPR_idx != Num_GPR_Regs) {
3995           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3996           FuncInfo->addLiveInAttr(VReg, Flags);
3997           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3998           SDValue Store;
3999 
4000           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4001             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4002                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4003             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4004                                       MachinePointerInfo(&*FuncArg), ObjType);
4005           } else {
4006             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4007             // store the whole register as-is to the parameter save area
4008             // slot.
4009             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4010                                  MachinePointerInfo(&*FuncArg));
4011           }
4012 
4013           MemOps.push_back(Store);
4014         }
4015         // Whether we copied from a register or not, advance the offset
4016         // into the parameter save area by a full doubleword.
4017         ArgOffset += PtrByteSize;
4018         continue;
4019       }
4020 
4021       // The value of the object is its address, which is the address of
4022       // its first stack doubleword.
4023       InVals.push_back(FIN);
4024 
4025       // Store whatever pieces of the object are in registers to memory.
4026       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4027         if (GPR_idx == Num_GPR_Regs)
4028           break;
4029 
4030         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4031         FuncInfo->addLiveInAttr(VReg, Flags);
4032         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4033         SDValue Addr = FIN;
4034         if (j) {
4035           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4036           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4037         }
4038         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4039                                      MachinePointerInfo(&*FuncArg, j));
4040         MemOps.push_back(Store);
4041         ++GPR_idx;
4042       }
4043       ArgOffset += ArgSize;
4044       continue;
4045     }
4046 
4047     switch (ObjectVT.getSimpleVT().SimpleTy) {
4048     default: llvm_unreachable("Unhandled argument type!");
4049     case MVT::i1:
4050     case MVT::i32:
4051     case MVT::i64:
4052       if (Flags.isNest()) {
4053         // The 'nest' parameter, if any, is passed in R11.
4054         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4055         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4056 
4057         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4058           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4059 
4060         break;
4061       }
4062 
4063       // These can be scalar arguments or elements of an integer array type
4064       // passed directly.  Clang may use those instead of "byval" aggregate
4065       // types to avoid forcing arguments to memory unnecessarily.
4066       if (GPR_idx != Num_GPR_Regs) {
4067         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4068         FuncInfo->addLiveInAttr(VReg, Flags);
4069         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4070 
4071         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4072           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4073           // value to MVT::i64 and then truncate to the correct register size.
4074           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4075       } else {
4076         if (CallConv == CallingConv::Fast)
4077           ComputeArgOffset();
4078 
4079         needsLoad = true;
4080         ArgSize = PtrByteSize;
4081       }
4082       if (CallConv != CallingConv::Fast || needsLoad)
4083         ArgOffset += 8;
4084       break;
4085 
4086     case MVT::f32:
4087     case MVT::f64:
4088       // These can be scalar arguments or elements of a float array type
4089       // passed directly.  The latter are used to implement ELFv2 homogenous
4090       // float aggregates.
4091       if (FPR_idx != Num_FPR_Regs) {
4092         unsigned VReg;
4093 
4094         if (ObjectVT == MVT::f32)
4095           VReg = MF.addLiveIn(FPR[FPR_idx],
4096                               Subtarget.hasP8Vector()
4097                                   ? &PPC::VSSRCRegClass
4098                                   : &PPC::F4RCRegClass);
4099         else
4100           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4101                                                 ? &PPC::VSFRCRegClass
4102                                                 : &PPC::F8RCRegClass);
4103 
4104         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4105         ++FPR_idx;
4106       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4107         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4108         // once we support fp <-> gpr moves.
4109 
4110         // This can only ever happen in the presence of f32 array types,
4111         // since otherwise we never run out of FPRs before running out
4112         // of GPRs.
4113         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4114         FuncInfo->addLiveInAttr(VReg, Flags);
4115         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4116 
4117         if (ObjectVT == MVT::f32) {
4118           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4119             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4120                                  DAG.getConstant(32, dl, MVT::i32));
4121           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4122         }
4123 
4124         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4125       } else {
4126         if (CallConv == CallingConv::Fast)
4127           ComputeArgOffset();
4128 
4129         needsLoad = true;
4130       }
4131 
4132       // When passing an array of floats, the array occupies consecutive
4133       // space in the argument area; only round up to the next doubleword
4134       // at the end of the array.  Otherwise, each float takes 8 bytes.
4135       if (CallConv != CallingConv::Fast || needsLoad) {
4136         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4137         ArgOffset += ArgSize;
4138         if (Flags.isInConsecutiveRegsLast())
4139           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4140       }
4141       break;
4142     case MVT::v4f32:
4143     case MVT::v4i32:
4144     case MVT::v8i16:
4145     case MVT::v16i8:
4146     case MVT::v2f64:
4147     case MVT::v2i64:
4148     case MVT::v1i128:
4149     case MVT::f128:
4150       // These can be scalar arguments or elements of a vector array type
4151       // passed directly.  The latter are used to implement ELFv2 homogenous
4152       // vector aggregates.
4153       if (VR_idx != Num_VR_Regs) {
4154         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4155         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4156         ++VR_idx;
4157       } else {
4158         if (CallConv == CallingConv::Fast)
4159           ComputeArgOffset();
4160         needsLoad = true;
4161       }
4162       if (CallConv != CallingConv::Fast || needsLoad)
4163         ArgOffset += 16;
4164       break;
4165     }
4166 
4167     // We need to load the argument to a virtual register if we determined
4168     // above that we ran out of physical registers of the appropriate type.
4169     if (needsLoad) {
4170       if (ObjSize < ArgSize && !isLittleEndian)
4171         CurArgOffset += ArgSize - ObjSize;
4172       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4173       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4174       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4175     }
4176 
4177     InVals.push_back(ArgVal);
4178   }
4179 
4180   // Area that is at least reserved in the caller of this function.
4181   unsigned MinReservedArea;
4182   if (HasParameterArea)
4183     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4184   else
4185     MinReservedArea = LinkageSize;
4186 
4187   // Set the size that is at least reserved in caller of this function.  Tail
4188   // call optimized functions' reserved stack space needs to be aligned so that
4189   // taking the difference between two stack areas will result in an aligned
4190   // stack.
4191   MinReservedArea =
4192       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4193   FuncInfo->setMinReservedArea(MinReservedArea);
4194 
4195   // If the function takes variable number of arguments, make a frame index for
4196   // the start of the first vararg value... for expansion of llvm.va_start.
4197   // On ELFv2ABI spec, it writes:
4198   // C programs that are intended to be *portable* across different compilers
4199   // and architectures must use the header file <stdarg.h> to deal with variable
4200   // argument lists.
4201   if (isVarArg && MFI.hasVAStart()) {
4202     int Depth = ArgOffset;
4203 
4204     FuncInfo->setVarArgsFrameIndex(
4205       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4206     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4207 
4208     // If this function is vararg, store any remaining integer argument regs
4209     // to their spots on the stack so that they may be loaded by dereferencing
4210     // the result of va_next.
4211     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4212          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4213       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4214       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4215       SDValue Store =
4216           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4217       MemOps.push_back(Store);
4218       // Increment the address by four for the next argument to store
4219       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4220       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4221     }
4222   }
4223 
4224   if (!MemOps.empty())
4225     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4226 
4227   return Chain;
4228 }
4229 
4230 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4231     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4232     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4233     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4234   // TODO: add description of PPC stack frame format, or at least some docs.
4235   //
4236   MachineFunction &MF = DAG.getMachineFunction();
4237   MachineFrameInfo &MFI = MF.getFrameInfo();
4238   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4239 
4240   EVT PtrVT = getPointerTy(MF.getDataLayout());
4241   bool isPPC64 = PtrVT == MVT::i64;
4242   // Potential tail calls could cause overwriting of argument stack slots.
4243   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4244                        (CallConv == CallingConv::Fast));
4245   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4246   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4247   unsigned ArgOffset = LinkageSize;
4248   // Area that is at least reserved in caller of this function.
4249   unsigned MinReservedArea = ArgOffset;
4250 
4251   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4252     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4253     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4254   };
4255   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4256     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4257     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4258   };
4259   static const MCPhysReg VR[] = {
4260     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4261     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4262   };
4263 
4264   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4265   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4266   const unsigned Num_VR_Regs  = array_lengthof( VR);
4267 
4268   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4269 
4270   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4271 
4272   // In 32-bit non-varargs functions, the stack space for vectors is after the
4273   // stack space for non-vectors.  We do not use this space unless we have
4274   // too many vectors to fit in registers, something that only occurs in
4275   // constructed examples:), but we have to walk the arglist to figure
4276   // that out...for the pathological case, compute VecArgOffset as the
4277   // start of the vector parameter area.  Computing VecArgOffset is the
4278   // entire point of the following loop.
4279   unsigned VecArgOffset = ArgOffset;
4280   if (!isVarArg && !isPPC64) {
4281     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4282          ++ArgNo) {
4283       EVT ObjectVT = Ins[ArgNo].VT;
4284       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4285 
4286       if (Flags.isByVal()) {
4287         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4288         unsigned ObjSize = Flags.getByValSize();
4289         unsigned ArgSize =
4290                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4291         VecArgOffset += ArgSize;
4292         continue;
4293       }
4294 
4295       switch(ObjectVT.getSimpleVT().SimpleTy) {
4296       default: llvm_unreachable("Unhandled argument type!");
4297       case MVT::i1:
4298       case MVT::i32:
4299       case MVT::f32:
4300         VecArgOffset += 4;
4301         break;
4302       case MVT::i64:  // PPC64
4303       case MVT::f64:
4304         // FIXME: We are guaranteed to be !isPPC64 at this point.
4305         // Does MVT::i64 apply?
4306         VecArgOffset += 8;
4307         break;
4308       case MVT::v4f32:
4309       case MVT::v4i32:
4310       case MVT::v8i16:
4311       case MVT::v16i8:
4312         // Nothing to do, we're only looking at Nonvector args here.
4313         break;
4314       }
4315     }
4316   }
4317   // We've found where the vector parameter area in memory is.  Skip the
4318   // first 12 parameters; these don't use that memory.
4319   VecArgOffset = ((VecArgOffset+15)/16)*16;
4320   VecArgOffset += 12*16;
4321 
4322   // Add DAG nodes to load the arguments or copy them out of registers.  On
4323   // entry to a function on PPC, the arguments start after the linkage area,
4324   // although the first ones are often in registers.
4325 
4326   SmallVector<SDValue, 8> MemOps;
4327   unsigned nAltivecParamsAtEnd = 0;
4328   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4329   unsigned CurArgIdx = 0;
4330   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4331     SDValue ArgVal;
4332     bool needsLoad = false;
4333     EVT ObjectVT = Ins[ArgNo].VT;
4334     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4335     unsigned ArgSize = ObjSize;
4336     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4337     if (Ins[ArgNo].isOrigArg()) {
4338       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4339       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4340     }
4341     unsigned CurArgOffset = ArgOffset;
4342 
4343     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4344     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4345         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4346       if (isVarArg || isPPC64) {
4347         MinReservedArea = ((MinReservedArea+15)/16)*16;
4348         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4349                                                   Flags,
4350                                                   PtrByteSize);
4351       } else  nAltivecParamsAtEnd++;
4352     } else
4353       // Calculate min reserved area.
4354       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4355                                                 Flags,
4356                                                 PtrByteSize);
4357 
4358     // FIXME the codegen can be much improved in some cases.
4359     // We do not have to keep everything in memory.
4360     if (Flags.isByVal()) {
4361       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4362 
4363       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4364       ObjSize = Flags.getByValSize();
4365       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4366       // Objects of size 1 and 2 are right justified, everything else is
4367       // left justified.  This means the memory address is adjusted forwards.
4368       if (ObjSize==1 || ObjSize==2) {
4369         CurArgOffset = CurArgOffset + (4 - ObjSize);
4370       }
4371       // The value of the object is its address.
4372       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4373       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4374       InVals.push_back(FIN);
4375       if (ObjSize==1 || ObjSize==2) {
4376         if (GPR_idx != Num_GPR_Regs) {
4377           unsigned VReg;
4378           if (isPPC64)
4379             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4380           else
4381             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4382           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4383           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4384           SDValue Store =
4385               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4386                                 MachinePointerInfo(&*FuncArg), ObjType);
4387           MemOps.push_back(Store);
4388           ++GPR_idx;
4389         }
4390 
4391         ArgOffset += PtrByteSize;
4392 
4393         continue;
4394       }
4395       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4396         // Store whatever pieces of the object are in registers
4397         // to memory.  ArgOffset will be the address of the beginning
4398         // of the object.
4399         if (GPR_idx != Num_GPR_Regs) {
4400           unsigned VReg;
4401           if (isPPC64)
4402             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4403           else
4404             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4405           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4406           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4407           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4408           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4409                                        MachinePointerInfo(&*FuncArg, j));
4410           MemOps.push_back(Store);
4411           ++GPR_idx;
4412           ArgOffset += PtrByteSize;
4413         } else {
4414           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4415           break;
4416         }
4417       }
4418       continue;
4419     }
4420 
4421     switch (ObjectVT.getSimpleVT().SimpleTy) {
4422     default: llvm_unreachable("Unhandled argument type!");
4423     case MVT::i1:
4424     case MVT::i32:
4425       if (!isPPC64) {
4426         if (GPR_idx != Num_GPR_Regs) {
4427           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4428           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4429 
4430           if (ObjectVT == MVT::i1)
4431             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4432 
4433           ++GPR_idx;
4434         } else {
4435           needsLoad = true;
4436           ArgSize = PtrByteSize;
4437         }
4438         // All int arguments reserve stack space in the Darwin ABI.
4439         ArgOffset += PtrByteSize;
4440         break;
4441       }
4442       LLVM_FALLTHROUGH;
4443     case MVT::i64:  // PPC64
4444       if (GPR_idx != Num_GPR_Regs) {
4445         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4446         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4447 
4448         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4449           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4450           // value to MVT::i64 and then truncate to the correct register size.
4451           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4452 
4453         ++GPR_idx;
4454       } else {
4455         needsLoad = true;
4456         ArgSize = PtrByteSize;
4457       }
4458       // All int arguments reserve stack space in the Darwin ABI.
4459       ArgOffset += 8;
4460       break;
4461 
4462     case MVT::f32:
4463     case MVT::f64:
4464       // Every 4 bytes of argument space consumes one of the GPRs available for
4465       // argument passing.
4466       if (GPR_idx != Num_GPR_Regs) {
4467         ++GPR_idx;
4468         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4469           ++GPR_idx;
4470       }
4471       if (FPR_idx != Num_FPR_Regs) {
4472         unsigned VReg;
4473 
4474         if (ObjectVT == MVT::f32)
4475           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4476         else
4477           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4478 
4479         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4480         ++FPR_idx;
4481       } else {
4482         needsLoad = true;
4483       }
4484 
4485       // All FP arguments reserve stack space in the Darwin ABI.
4486       ArgOffset += isPPC64 ? 8 : ObjSize;
4487       break;
4488     case MVT::v4f32:
4489     case MVT::v4i32:
4490     case MVT::v8i16:
4491     case MVT::v16i8:
4492       // Note that vector arguments in registers don't reserve stack space,
4493       // except in varargs functions.
4494       if (VR_idx != Num_VR_Regs) {
4495         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4496         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4497         if (isVarArg) {
4498           while ((ArgOffset % 16) != 0) {
4499             ArgOffset += PtrByteSize;
4500             if (GPR_idx != Num_GPR_Regs)
4501               GPR_idx++;
4502           }
4503           ArgOffset += 16;
4504           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4505         }
4506         ++VR_idx;
4507       } else {
4508         if (!isVarArg && !isPPC64) {
4509           // Vectors go after all the nonvectors.
4510           CurArgOffset = VecArgOffset;
4511           VecArgOffset += 16;
4512         } else {
4513           // Vectors are aligned.
4514           ArgOffset = ((ArgOffset+15)/16)*16;
4515           CurArgOffset = ArgOffset;
4516           ArgOffset += 16;
4517         }
4518         needsLoad = true;
4519       }
4520       break;
4521     }
4522 
4523     // We need to load the argument to a virtual register if we determined above
4524     // that we ran out of physical registers of the appropriate type.
4525     if (needsLoad) {
4526       int FI = MFI.CreateFixedObject(ObjSize,
4527                                      CurArgOffset + (ArgSize - ObjSize),
4528                                      isImmutable);
4529       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4530       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4531     }
4532 
4533     InVals.push_back(ArgVal);
4534   }
4535 
4536   // Allow for Altivec parameters at the end, if needed.
4537   if (nAltivecParamsAtEnd) {
4538     MinReservedArea = ((MinReservedArea+15)/16)*16;
4539     MinReservedArea += 16*nAltivecParamsAtEnd;
4540   }
4541 
4542   // Area that is at least reserved in the caller of this function.
4543   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4544 
4545   // Set the size that is at least reserved in caller of this function.  Tail
4546   // call optimized functions' reserved stack space needs to be aligned so that
4547   // taking the difference between two stack areas will result in an aligned
4548   // stack.
4549   MinReservedArea =
4550       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4551   FuncInfo->setMinReservedArea(MinReservedArea);
4552 
4553   // If the function takes variable number of arguments, make a frame index for
4554   // the start of the first vararg value... for expansion of llvm.va_start.
4555   if (isVarArg) {
4556     int Depth = ArgOffset;
4557 
4558     FuncInfo->setVarArgsFrameIndex(
4559       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4560                             Depth, true));
4561     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4562 
4563     // If this function is vararg, store any remaining integer argument regs
4564     // to their spots on the stack so that they may be loaded by dereferencing
4565     // the result of va_next.
4566     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4567       unsigned VReg;
4568 
4569       if (isPPC64)
4570         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4571       else
4572         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4573 
4574       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4575       SDValue Store =
4576           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4577       MemOps.push_back(Store);
4578       // Increment the address by four for the next argument to store
4579       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4580       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4581     }
4582   }
4583 
4584   if (!MemOps.empty())
4585     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4586 
4587   return Chain;
4588 }
4589 
4590 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4591 /// adjusted to accommodate the arguments for the tailcall.
4592 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4593                                    unsigned ParamSize) {
4594 
4595   if (!isTailCall) return 0;
4596 
4597   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4598   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4599   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4600   // Remember only if the new adjustment is bigger.
4601   if (SPDiff < FI->getTailCallSPDelta())
4602     FI->setTailCallSPDelta(SPDiff);
4603 
4604   return SPDiff;
4605 }
4606 
4607 static bool isFunctionGlobalAddress(SDValue Callee);
4608 
4609 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4610                               const TargetMachine &TM) {
4611   // It does not make sense to call callsShareTOCBase() with a caller that
4612   // is PC Relative since PC Relative callers do not have a TOC.
4613 #ifndef NDEBUG
4614   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4615   assert(!STICaller->isUsingPCRelativeCalls() &&
4616          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4617 #endif
4618 
4619   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4620   // don't have enough information to determine if the caller and callee share
4621   // the same  TOC base, so we have to pessimistically assume they don't for
4622   // correctness.
4623   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4624   if (!G)
4625     return false;
4626 
4627   const GlobalValue *GV = G->getGlobal();
4628 
4629   // If the callee is preemptable, then the static linker will use a plt-stub
4630   // which saves the toc to the stack, and needs a nop after the call
4631   // instruction to convert to a toc-restore.
4632   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4633     return false;
4634 
4635   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4636   // We may need a TOC restore in the situation where the caller requires a
4637   // valid TOC but the callee is PC Relative and does not.
4638   const Function *F = dyn_cast<Function>(GV);
4639   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4640 
4641   // If we have an Alias we can try to get the function from there.
4642   if (Alias) {
4643     const GlobalObject *GlobalObj = Alias->getBaseObject();
4644     F = dyn_cast<Function>(GlobalObj);
4645   }
4646 
4647   // If we still have no valid function pointer we do not have enough
4648   // information to determine if the callee uses PC Relative calls so we must
4649   // assume that it does.
4650   if (!F)
4651     return false;
4652 
4653   // If the callee uses PC Relative we cannot guarantee that the callee won't
4654   // clobber the TOC of the caller and so we must assume that the two
4655   // functions do not share a TOC base.
4656   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4657   if (STICallee->isUsingPCRelativeCalls())
4658     return false;
4659 
4660   // The medium and large code models are expected to provide a sufficiently
4661   // large TOC to provide all data addressing needs of a module with a
4662   // single TOC.
4663   if (CodeModel::Medium == TM.getCodeModel() ||
4664       CodeModel::Large == TM.getCodeModel())
4665     return true;
4666 
4667   // Otherwise we need to ensure callee and caller are in the same section,
4668   // since the linker may allocate multiple TOCs, and we don't know which
4669   // sections will belong to the same TOC base.
4670   if (!GV->isStrongDefinitionForLinker())
4671     return false;
4672 
4673   // Any explicitly-specified sections and section prefixes must also match.
4674   // Also, if we're using -ffunction-sections, then each function is always in
4675   // a different section (the same is true for COMDAT functions).
4676   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4677       GV->getSection() != Caller->getSection())
4678     return false;
4679   if (const auto *F = dyn_cast<Function>(GV)) {
4680     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4681       return false;
4682   }
4683 
4684   return true;
4685 }
4686 
4687 static bool
4688 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4689                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4690   assert(Subtarget.is64BitELFABI());
4691 
4692   const unsigned PtrByteSize = 8;
4693   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4694 
4695   static const MCPhysReg GPR[] = {
4696     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4697     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4698   };
4699   static const MCPhysReg VR[] = {
4700     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4701     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4702   };
4703 
4704   const unsigned NumGPRs = array_lengthof(GPR);
4705   const unsigned NumFPRs = 13;
4706   const unsigned NumVRs = array_lengthof(VR);
4707   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4708 
4709   unsigned NumBytes = LinkageSize;
4710   unsigned AvailableFPRs = NumFPRs;
4711   unsigned AvailableVRs = NumVRs;
4712 
4713   for (const ISD::OutputArg& Param : Outs) {
4714     if (Param.Flags.isNest()) continue;
4715 
4716     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4717                                LinkageSize, ParamAreaSize, NumBytes,
4718                                AvailableFPRs, AvailableVRs))
4719       return true;
4720   }
4721   return false;
4722 }
4723 
4724 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4725   if (CB.arg_size() != CallerFn->arg_size())
4726     return false;
4727 
4728   auto CalleeArgIter = CB.arg_begin();
4729   auto CalleeArgEnd = CB.arg_end();
4730   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4731 
4732   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4733     const Value* CalleeArg = *CalleeArgIter;
4734     const Value* CallerArg = &(*CallerArgIter);
4735     if (CalleeArg == CallerArg)
4736       continue;
4737 
4738     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4739     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4740     //      }
4741     // 1st argument of callee is undef and has the same type as caller.
4742     if (CalleeArg->getType() == CallerArg->getType() &&
4743         isa<UndefValue>(CalleeArg))
4744       continue;
4745 
4746     return false;
4747   }
4748 
4749   return true;
4750 }
4751 
4752 // Returns true if TCO is possible between the callers and callees
4753 // calling conventions.
4754 static bool
4755 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4756                                     CallingConv::ID CalleeCC) {
4757   // Tail calls are possible with fastcc and ccc.
4758   auto isTailCallableCC  = [] (CallingConv::ID CC){
4759       return  CC == CallingConv::C || CC == CallingConv::Fast;
4760   };
4761   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4762     return false;
4763 
4764   // We can safely tail call both fastcc and ccc callees from a c calling
4765   // convention caller. If the caller is fastcc, we may have less stack space
4766   // than a non-fastcc caller with the same signature so disable tail-calls in
4767   // that case.
4768   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4769 }
4770 
4771 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4772     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4773     const SmallVectorImpl<ISD::OutputArg> &Outs,
4774     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4775   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4776 
4777   if (DisableSCO && !TailCallOpt) return false;
4778 
4779   // Variadic argument functions are not supported.
4780   if (isVarArg) return false;
4781 
4782   auto &Caller = DAG.getMachineFunction().getFunction();
4783   // Check that the calling conventions are compatible for tco.
4784   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4785     return false;
4786 
4787   // Caller contains any byval parameter is not supported.
4788   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4789     return false;
4790 
4791   // Callee contains any byval parameter is not supported, too.
4792   // Note: This is a quick work around, because in some cases, e.g.
4793   // caller's stack size > callee's stack size, we are still able to apply
4794   // sibling call optimization. For example, gcc is able to do SCO for caller1
4795   // in the following example, but not for caller2.
4796   //   struct test {
4797   //     long int a;
4798   //     char ary[56];
4799   //   } gTest;
4800   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4801   //     b->a = v.a;
4802   //     return 0;
4803   //   }
4804   //   void caller1(struct test a, struct test c, struct test *b) {
4805   //     callee(gTest, b); }
4806   //   void caller2(struct test *b) { callee(gTest, b); }
4807   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4808     return false;
4809 
4810   // If callee and caller use different calling conventions, we cannot pass
4811   // parameters on stack since offsets for the parameter area may be different.
4812   if (Caller.getCallingConv() != CalleeCC &&
4813       needStackSlotPassParameters(Subtarget, Outs))
4814     return false;
4815 
4816   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4817   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4818   // callee potentially have different TOC bases then we cannot tail call since
4819   // we need to restore the TOC pointer after the call.
4820   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4821   // We cannot guarantee this for indirect calls or calls to external functions.
4822   // When PC-Relative addressing is used, the concept of the TOC is no longer
4823   // applicable so this check is not required.
4824   // Check first for indirect calls.
4825   if (!Subtarget.isUsingPCRelativeCalls() &&
4826       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4827     return false;
4828 
4829   // Check if we share the TOC base.
4830   if (!Subtarget.isUsingPCRelativeCalls() &&
4831       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4832     return false;
4833 
4834   // TCO allows altering callee ABI, so we don't have to check further.
4835   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4836     return true;
4837 
4838   if (DisableSCO) return false;
4839 
4840   // If callee use the same argument list that caller is using, then we can
4841   // apply SCO on this case. If it is not, then we need to check if callee needs
4842   // stack for passing arguments.
4843   // PC Relative tail calls may not have a CallBase.
4844   // If there is no CallBase we cannot verify if we have the same argument
4845   // list so assume that we don't have the same argument list.
4846   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4847       needStackSlotPassParameters(Subtarget, Outs))
4848     return false;
4849   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4850     return false;
4851 
4852   return true;
4853 }
4854 
4855 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4856 /// for tail call optimization. Targets which want to do tail call
4857 /// optimization should implement this function.
4858 bool
4859 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4860                                                      CallingConv::ID CalleeCC,
4861                                                      bool isVarArg,
4862                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4863                                                      SelectionDAG& DAG) const {
4864   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4865     return false;
4866 
4867   // Variable argument functions are not supported.
4868   if (isVarArg)
4869     return false;
4870 
4871   MachineFunction &MF = DAG.getMachineFunction();
4872   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4873   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4874     // Functions containing by val parameters are not supported.
4875     for (unsigned i = 0; i != Ins.size(); i++) {
4876        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4877        if (Flags.isByVal()) return false;
4878     }
4879 
4880     // Non-PIC/GOT tail calls are supported.
4881     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4882       return true;
4883 
4884     // At the moment we can only do local tail calls (in same module, hidden
4885     // or protected) if we are generating PIC.
4886     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4887       return G->getGlobal()->hasHiddenVisibility()
4888           || G->getGlobal()->hasProtectedVisibility();
4889   }
4890 
4891   return false;
4892 }
4893 
4894 /// isCallCompatibleAddress - Return the immediate to use if the specified
4895 /// 32-bit value is representable in the immediate field of a BxA instruction.
4896 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4897   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4898   if (!C) return nullptr;
4899 
4900   int Addr = C->getZExtValue();
4901   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4902       SignExtend32<26>(Addr) != Addr)
4903     return nullptr;  // Top 6 bits have to be sext of immediate.
4904 
4905   return DAG
4906       .getConstant(
4907           (int)C->getZExtValue() >> 2, SDLoc(Op),
4908           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4909       .getNode();
4910 }
4911 
4912 namespace {
4913 
4914 struct TailCallArgumentInfo {
4915   SDValue Arg;
4916   SDValue FrameIdxOp;
4917   int FrameIdx = 0;
4918 
4919   TailCallArgumentInfo() = default;
4920 };
4921 
4922 } // end anonymous namespace
4923 
4924 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4925 static void StoreTailCallArgumentsToStackSlot(
4926     SelectionDAG &DAG, SDValue Chain,
4927     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4928     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4929   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4930     SDValue Arg = TailCallArgs[i].Arg;
4931     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4932     int FI = TailCallArgs[i].FrameIdx;
4933     // Store relative to framepointer.
4934     MemOpChains.push_back(DAG.getStore(
4935         Chain, dl, Arg, FIN,
4936         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4937   }
4938 }
4939 
4940 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4941 /// the appropriate stack slot for the tail call optimized function call.
4942 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4943                                              SDValue OldRetAddr, SDValue OldFP,
4944                                              int SPDiff, const SDLoc &dl) {
4945   if (SPDiff) {
4946     // Calculate the new stack slot for the return address.
4947     MachineFunction &MF = DAG.getMachineFunction();
4948     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4949     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4950     bool isPPC64 = Subtarget.isPPC64();
4951     int SlotSize = isPPC64 ? 8 : 4;
4952     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4953     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4954                                                          NewRetAddrLoc, true);
4955     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4956     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4957     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4958                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4959   }
4960   return Chain;
4961 }
4962 
4963 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4964 /// the position of the argument.
4965 static void
4966 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4967                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4968                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4969   int Offset = ArgOffset + SPDiff;
4970   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4971   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4972   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4973   SDValue FIN = DAG.getFrameIndex(FI, VT);
4974   TailCallArgumentInfo Info;
4975   Info.Arg = Arg;
4976   Info.FrameIdxOp = FIN;
4977   Info.FrameIdx = FI;
4978   TailCallArguments.push_back(Info);
4979 }
4980 
4981 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4982 /// stack slot. Returns the chain as result and the loaded frame pointers in
4983 /// LROpOut/FPOpout. Used when tail calling.
4984 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4985     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4986     SDValue &FPOpOut, const SDLoc &dl) const {
4987   if (SPDiff) {
4988     // Load the LR and FP stack slot for later adjusting.
4989     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4990     LROpOut = getReturnAddrFrameIndex(DAG);
4991     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4992     Chain = SDValue(LROpOut.getNode(), 1);
4993   }
4994   return Chain;
4995 }
4996 
4997 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4998 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4999 /// specified by the specific parameter attribute. The copy will be passed as
5000 /// a byval function parameter.
5001 /// Sometimes what we are copying is the end of a larger object, the part that
5002 /// does not fit in registers.
5003 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5004                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5005                                          SelectionDAG &DAG, const SDLoc &dl) {
5006   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5007   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5008                        Flags.getNonZeroByValAlign(), false, false, false,
5009                        MachinePointerInfo(), MachinePointerInfo());
5010 }
5011 
5012 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5013 /// tail calls.
5014 static void LowerMemOpCallTo(
5015     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5016     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5017     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5018     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5019   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5020   if (!isTailCall) {
5021     if (isVector) {
5022       SDValue StackPtr;
5023       if (isPPC64)
5024         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5025       else
5026         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5027       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5028                            DAG.getConstant(ArgOffset, dl, PtrVT));
5029     }
5030     MemOpChains.push_back(
5031         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5032     // Calculate and remember argument location.
5033   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5034                                   TailCallArguments);
5035 }
5036 
5037 static void
5038 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5039                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5040                 SDValue FPOp,
5041                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5042   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5043   // might overwrite each other in case of tail call optimization.
5044   SmallVector<SDValue, 8> MemOpChains2;
5045   // Do not flag preceding copytoreg stuff together with the following stuff.
5046   InFlag = SDValue();
5047   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5048                                     MemOpChains2, dl);
5049   if (!MemOpChains2.empty())
5050     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5051 
5052   // Store the return address to the appropriate stack slot.
5053   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5054 
5055   // Emit callseq_end just before tailcall node.
5056   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5057                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5058   InFlag = Chain.getValue(1);
5059 }
5060 
5061 // Is this global address that of a function that can be called by name? (as
5062 // opposed to something that must hold a descriptor for an indirect call).
5063 static bool isFunctionGlobalAddress(SDValue Callee) {
5064   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5065     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5066         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5067       return false;
5068 
5069     return G->getGlobal()->getValueType()->isFunctionTy();
5070   }
5071 
5072   return false;
5073 }
5074 
5075 SDValue PPCTargetLowering::LowerCallResult(
5076     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5077     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5078     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5079   SmallVector<CCValAssign, 16> RVLocs;
5080   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5081                     *DAG.getContext());
5082 
5083   CCRetInfo.AnalyzeCallResult(
5084       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5085                ? RetCC_PPC_Cold
5086                : RetCC_PPC);
5087 
5088   // Copy all of the result registers out of their specified physreg.
5089   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5090     CCValAssign &VA = RVLocs[i];
5091     assert(VA.isRegLoc() && "Can only return in registers!");
5092 
5093     SDValue Val;
5094 
5095     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5096       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5097                                       InFlag);
5098       Chain = Lo.getValue(1);
5099       InFlag = Lo.getValue(2);
5100       VA = RVLocs[++i]; // skip ahead to next loc
5101       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5102                                       InFlag);
5103       Chain = Hi.getValue(1);
5104       InFlag = Hi.getValue(2);
5105       if (!Subtarget.isLittleEndian())
5106         std::swap (Lo, Hi);
5107       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5108     } else {
5109       Val = DAG.getCopyFromReg(Chain, dl,
5110                                VA.getLocReg(), VA.getLocVT(), InFlag);
5111       Chain = Val.getValue(1);
5112       InFlag = Val.getValue(2);
5113     }
5114 
5115     switch (VA.getLocInfo()) {
5116     default: llvm_unreachable("Unknown loc info!");
5117     case CCValAssign::Full: break;
5118     case CCValAssign::AExt:
5119       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5120       break;
5121     case CCValAssign::ZExt:
5122       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5123                         DAG.getValueType(VA.getValVT()));
5124       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5125       break;
5126     case CCValAssign::SExt:
5127       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5128                         DAG.getValueType(VA.getValVT()));
5129       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5130       break;
5131     }
5132 
5133     InVals.push_back(Val);
5134   }
5135 
5136   return Chain;
5137 }
5138 
5139 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5140                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5141   // PatchPoint calls are not indirect.
5142   if (isPatchPoint)
5143     return false;
5144 
5145   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5146     return false;
5147 
5148   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5149   // becuase the immediate function pointer points to a descriptor instead of
5150   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5151   // pointer immediate points to the global entry point, while the BLA would
5152   // need to jump to the local entry point (see rL211174).
5153   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5154       isBLACompatibleAddress(Callee, DAG))
5155     return false;
5156 
5157   return true;
5158 }
5159 
5160 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5161 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5162   return Subtarget.isAIXABI() ||
5163          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5164 }
5165 
5166 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5167                               const Function &Caller,
5168                               const SDValue &Callee,
5169                               const PPCSubtarget &Subtarget,
5170                               const TargetMachine &TM) {
5171   if (CFlags.IsTailCall)
5172     return PPCISD::TC_RETURN;
5173 
5174   // This is a call through a function pointer.
5175   if (CFlags.IsIndirect) {
5176     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5177     // indirect calls. The save of the caller's TOC pointer to the stack will be
5178     // inserted into the DAG as part of call lowering. The restore of the TOC
5179     // pointer is modeled by using a pseudo instruction for the call opcode that
5180     // represents the 2 instruction sequence of an indirect branch and link,
5181     // immediately followed by a load of the TOC pointer from the the stack save
5182     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5183     // as it is not saved or used.
5184     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5185                                                : PPCISD::BCTRL;
5186   }
5187 
5188   if (Subtarget.isUsingPCRelativeCalls()) {
5189     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5190     return PPCISD::CALL_NOTOC;
5191   }
5192 
5193   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5194   // immediately following the call instruction if the caller and callee may
5195   // have different TOC bases. At link time if the linker determines the calls
5196   // may not share a TOC base, the call is redirected to a trampoline inserted
5197   // by the linker. The trampoline will (among other things) save the callers
5198   // TOC pointer at an ABI designated offset in the linkage area and the linker
5199   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5200   // into gpr2.
5201   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5202     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5203                                                   : PPCISD::CALL_NOP;
5204 
5205   return PPCISD::CALL;
5206 }
5207 
5208 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5209                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5210   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5211     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5212       return SDValue(Dest, 0);
5213 
5214   // Returns true if the callee is local, and false otherwise.
5215   auto isLocalCallee = [&]() {
5216     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5217     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5218     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5219 
5220     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5221            !dyn_cast_or_null<GlobalIFunc>(GV);
5222   };
5223 
5224   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5225   // a static relocation model causes some versions of GNU LD (2.17.50, at
5226   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5227   // built with secure-PLT.
5228   bool UsePlt =
5229       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5230       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5231 
5232   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5233     const TargetMachine &TM = Subtarget.getTargetMachine();
5234     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5235     MCSymbolXCOFF *S =
5236         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5237 
5238     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5239     return DAG.getMCSymbol(S, PtrVT);
5240   };
5241 
5242   if (isFunctionGlobalAddress(Callee)) {
5243     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5244 
5245     if (Subtarget.isAIXABI()) {
5246       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5247       return getAIXFuncEntryPointSymbolSDNode(GV);
5248     }
5249     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5250                                       UsePlt ? PPCII::MO_PLT : 0);
5251   }
5252 
5253   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5254     const char *SymName = S->getSymbol();
5255     if (Subtarget.isAIXABI()) {
5256       // If there exists a user-declared function whose name is the same as the
5257       // ExternalSymbol's, then we pick up the user-declared version.
5258       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5259       if (const Function *F =
5260               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5261         return getAIXFuncEntryPointSymbolSDNode(F);
5262 
5263       // On AIX, direct function calls reference the symbol for the function's
5264       // entry point, which is named by prepending a "." before the function's
5265       // C-linkage name. A Qualname is returned here because an external
5266       // function entry point is a csect with XTY_ER property.
5267       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5268         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5269         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5270             (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER,
5271             SectionKind::getMetadata());
5272         return Sec->getQualNameSymbol();
5273       };
5274 
5275       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5276     }
5277     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5278                                        UsePlt ? PPCII::MO_PLT : 0);
5279   }
5280 
5281   // No transformation needed.
5282   assert(Callee.getNode() && "What no callee?");
5283   return Callee;
5284 }
5285 
5286 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5287   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5288          "Expected a CALLSEQ_STARTSDNode.");
5289 
5290   // The last operand is the chain, except when the node has glue. If the node
5291   // has glue, then the last operand is the glue, and the chain is the second
5292   // last operand.
5293   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5294   if (LastValue.getValueType() != MVT::Glue)
5295     return LastValue;
5296 
5297   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5298 }
5299 
5300 // Creates the node that moves a functions address into the count register
5301 // to prepare for an indirect call instruction.
5302 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5303                                 SDValue &Glue, SDValue &Chain,
5304                                 const SDLoc &dl) {
5305   SDValue MTCTROps[] = {Chain, Callee, Glue};
5306   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5307   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5308                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5309   // The glue is the second value produced.
5310   Glue = Chain.getValue(1);
5311 }
5312 
5313 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5314                                           SDValue &Glue, SDValue &Chain,
5315                                           SDValue CallSeqStart,
5316                                           const CallBase *CB, const SDLoc &dl,
5317                                           bool hasNest,
5318                                           const PPCSubtarget &Subtarget) {
5319   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5320   // entry point, but to the function descriptor (the function entry point
5321   // address is part of the function descriptor though).
5322   // The function descriptor is a three doubleword structure with the
5323   // following fields: function entry point, TOC base address and
5324   // environment pointer.
5325   // Thus for a call through a function pointer, the following actions need
5326   // to be performed:
5327   //   1. Save the TOC of the caller in the TOC save area of its stack
5328   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5329   //   2. Load the address of the function entry point from the function
5330   //      descriptor.
5331   //   3. Load the TOC of the callee from the function descriptor into r2.
5332   //   4. Load the environment pointer from the function descriptor into
5333   //      r11.
5334   //   5. Branch to the function entry point address.
5335   //   6. On return of the callee, the TOC of the caller needs to be
5336   //      restored (this is done in FinishCall()).
5337   //
5338   // The loads are scheduled at the beginning of the call sequence, and the
5339   // register copies are flagged together to ensure that no other
5340   // operations can be scheduled in between. E.g. without flagging the
5341   // copies together, a TOC access in the caller could be scheduled between
5342   // the assignment of the callee TOC and the branch to the callee, which leads
5343   // to incorrect code.
5344 
5345   // Start by loading the function address from the descriptor.
5346   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5347   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5348                       ? (MachineMemOperand::MODereferenceable |
5349                          MachineMemOperand::MOInvariant)
5350                       : MachineMemOperand::MONone;
5351 
5352   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5353 
5354   // Registers used in building the DAG.
5355   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5356   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5357 
5358   // Offsets of descriptor members.
5359   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5360   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5361 
5362   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5363   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5364 
5365   // One load for the functions entry point address.
5366   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5367                                     Alignment, MMOFlags);
5368 
5369   // One for loading the TOC anchor for the module that contains the called
5370   // function.
5371   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5372   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5373   SDValue TOCPtr =
5374       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5375                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5376 
5377   // One for loading the environment pointer.
5378   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5379   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5380   SDValue LoadEnvPtr =
5381       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5382                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5383 
5384 
5385   // Then copy the newly loaded TOC anchor to the TOC pointer.
5386   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5387   Chain = TOCVal.getValue(0);
5388   Glue = TOCVal.getValue(1);
5389 
5390   // If the function call has an explicit 'nest' parameter, it takes the
5391   // place of the environment pointer.
5392   assert((!hasNest || !Subtarget.isAIXABI()) &&
5393          "Nest parameter is not supported on AIX.");
5394   if (!hasNest) {
5395     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5396     Chain = EnvVal.getValue(0);
5397     Glue = EnvVal.getValue(1);
5398   }
5399 
5400   // The rest of the indirect call sequence is the same as the non-descriptor
5401   // DAG.
5402   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5403 }
5404 
5405 static void
5406 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5407                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5408                   SelectionDAG &DAG,
5409                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5410                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5411                   const PPCSubtarget &Subtarget) {
5412   const bool IsPPC64 = Subtarget.isPPC64();
5413   // MVT for a general purpose register.
5414   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5415 
5416   // First operand is always the chain.
5417   Ops.push_back(Chain);
5418 
5419   // If it's a direct call pass the callee as the second operand.
5420   if (!CFlags.IsIndirect)
5421     Ops.push_back(Callee);
5422   else {
5423     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5424 
5425     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5426     // on the stack (this would have been done in `LowerCall_64SVR4` or
5427     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5428     // represents both the indirect branch and a load that restores the TOC
5429     // pointer from the linkage area. The operand for the TOC restore is an add
5430     // of the TOC save offset to the stack pointer. This must be the second
5431     // operand: after the chain input but before any other variadic arguments.
5432     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5433     // saved or used.
5434     if (isTOCSaveRestoreRequired(Subtarget)) {
5435       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5436 
5437       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5438       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5439       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5440       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5441       Ops.push_back(AddTOC);
5442     }
5443 
5444     // Add the register used for the environment pointer.
5445     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5446       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5447                                     RegVT));
5448 
5449 
5450     // Add CTR register as callee so a bctr can be emitted later.
5451     if (CFlags.IsTailCall)
5452       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5453   }
5454 
5455   // If this is a tail call add stack pointer delta.
5456   if (CFlags.IsTailCall)
5457     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5458 
5459   // Add argument registers to the end of the list so that they are known live
5460   // into the call.
5461   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5462     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5463                                   RegsToPass[i].second.getValueType()));
5464 
5465   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5466   // no way to mark dependencies as implicit here.
5467   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5468   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5469        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5470     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5471 
5472   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5473   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5474     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5475 
5476   // Add a register mask operand representing the call-preserved registers.
5477   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5478   const uint32_t *Mask =
5479       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5480   assert(Mask && "Missing call preserved mask for calling convention");
5481   Ops.push_back(DAG.getRegisterMask(Mask));
5482 
5483   // If the glue is valid, it is the last operand.
5484   if (Glue.getNode())
5485     Ops.push_back(Glue);
5486 }
5487 
5488 SDValue PPCTargetLowering::FinishCall(
5489     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5490     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5491     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5492     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5493     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5494 
5495   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5496       Subtarget.isAIXABI())
5497     setUsesTOCBasePtr(DAG);
5498 
5499   unsigned CallOpc =
5500       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5501                     Subtarget, DAG.getTarget());
5502 
5503   if (!CFlags.IsIndirect)
5504     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5505   else if (Subtarget.usesFunctionDescriptors())
5506     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5507                                   dl, CFlags.HasNest, Subtarget);
5508   else
5509     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5510 
5511   // Build the operand list for the call instruction.
5512   SmallVector<SDValue, 8> Ops;
5513   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5514                     SPDiff, Subtarget);
5515 
5516   // Emit tail call.
5517   if (CFlags.IsTailCall) {
5518     // Indirect tail call when using PC Relative calls do not have the same
5519     // constraints.
5520     assert(((Callee.getOpcode() == ISD::Register &&
5521              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5522             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5523             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5524             isa<ConstantSDNode>(Callee) ||
5525             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5526            "Expecting a global address, external symbol, absolute value, "
5527            "register or an indirect tail call when PC Relative calls are "
5528            "used.");
5529     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5530     assert(CallOpc == PPCISD::TC_RETURN &&
5531            "Unexpected call opcode for a tail call.");
5532     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5533     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5534   }
5535 
5536   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5537   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5538   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5539   Glue = Chain.getValue(1);
5540 
5541   // When performing tail call optimization the callee pops its arguments off
5542   // the stack. Account for this here so these bytes can be pushed back on in
5543   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5544   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5545                          getTargetMachine().Options.GuaranteedTailCallOpt)
5546                             ? NumBytes
5547                             : 0;
5548 
5549   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5550                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5551                              Glue, dl);
5552   Glue = Chain.getValue(1);
5553 
5554   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5555                          DAG, InVals);
5556 }
5557 
5558 SDValue
5559 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5560                              SmallVectorImpl<SDValue> &InVals) const {
5561   SelectionDAG &DAG                     = CLI.DAG;
5562   SDLoc &dl                             = CLI.DL;
5563   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5564   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5565   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5566   SDValue Chain                         = CLI.Chain;
5567   SDValue Callee                        = CLI.Callee;
5568   bool &isTailCall                      = CLI.IsTailCall;
5569   CallingConv::ID CallConv              = CLI.CallConv;
5570   bool isVarArg                         = CLI.IsVarArg;
5571   bool isPatchPoint                     = CLI.IsPatchPoint;
5572   const CallBase *CB                    = CLI.CB;
5573 
5574   if (isTailCall) {
5575     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5576       isTailCall = false;
5577     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5578       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5579           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5580     else
5581       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5582                                                      Ins, DAG);
5583     if (isTailCall) {
5584       ++NumTailCalls;
5585       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5586         ++NumSiblingCalls;
5587 
5588       // PC Relative calls no longer guarantee that the callee is a Global
5589       // Address Node. The callee could be an indirect tail call in which
5590       // case the SDValue for the callee could be a load (to load the address
5591       // of a function pointer) or it may be a register copy (to move the
5592       // address of the callee from a function parameter into a virtual
5593       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5594       assert((Subtarget.isUsingPCRelativeCalls() ||
5595               isa<GlobalAddressSDNode>(Callee)) &&
5596              "Callee should be an llvm::Function object.");
5597 
5598       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5599                         << "\nTCO callee: ");
5600       LLVM_DEBUG(Callee.dump());
5601     }
5602   }
5603 
5604   if (!isTailCall && CB && CB->isMustTailCall())
5605     report_fatal_error("failed to perform tail call elimination on a call "
5606                        "site marked musttail");
5607 
5608   // When long calls (i.e. indirect calls) are always used, calls are always
5609   // made via function pointer. If we have a function name, first translate it
5610   // into a pointer.
5611   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5612       !isTailCall)
5613     Callee = LowerGlobalAddress(Callee, DAG);
5614 
5615   CallFlags CFlags(
5616       CallConv, isTailCall, isVarArg, isPatchPoint,
5617       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5618       // hasNest
5619       Subtarget.is64BitELFABI() &&
5620           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5621       CLI.NoMerge);
5622 
5623   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5624     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5625                             InVals, CB);
5626 
5627   if (Subtarget.isSVR4ABI())
5628     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5629                             InVals, CB);
5630 
5631   if (Subtarget.isAIXABI())
5632     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5633                          InVals, CB);
5634 
5635   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5636                           InVals, CB);
5637 }
5638 
5639 SDValue PPCTargetLowering::LowerCall_32SVR4(
5640     SDValue Chain, SDValue Callee, CallFlags CFlags,
5641     const SmallVectorImpl<ISD::OutputArg> &Outs,
5642     const SmallVectorImpl<SDValue> &OutVals,
5643     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5644     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5645     const CallBase *CB) const {
5646   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5647   // of the 32-bit SVR4 ABI stack frame layout.
5648 
5649   const CallingConv::ID CallConv = CFlags.CallConv;
5650   const bool IsVarArg = CFlags.IsVarArg;
5651   const bool IsTailCall = CFlags.IsTailCall;
5652 
5653   assert((CallConv == CallingConv::C ||
5654           CallConv == CallingConv::Cold ||
5655           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5656 
5657   const Align PtrAlign(4);
5658 
5659   MachineFunction &MF = DAG.getMachineFunction();
5660 
5661   // Mark this function as potentially containing a function that contains a
5662   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5663   // and restoring the callers stack pointer in this functions epilog. This is
5664   // done because by tail calling the called function might overwrite the value
5665   // in this function's (MF) stack pointer stack slot 0(SP).
5666   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5667       CallConv == CallingConv::Fast)
5668     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5669 
5670   // Count how many bytes are to be pushed on the stack, including the linkage
5671   // area, parameter list area and the part of the local variable space which
5672   // contains copies of aggregates which are passed by value.
5673 
5674   // Assign locations to all of the outgoing arguments.
5675   SmallVector<CCValAssign, 16> ArgLocs;
5676   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5677 
5678   // Reserve space for the linkage area on the stack.
5679   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5680                        PtrAlign);
5681   if (useSoftFloat())
5682     CCInfo.PreAnalyzeCallOperands(Outs);
5683 
5684   if (IsVarArg) {
5685     // Handle fixed and variable vector arguments differently.
5686     // Fixed vector arguments go into registers as long as registers are
5687     // available. Variable vector arguments always go into memory.
5688     unsigned NumArgs = Outs.size();
5689 
5690     for (unsigned i = 0; i != NumArgs; ++i) {
5691       MVT ArgVT = Outs[i].VT;
5692       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5693       bool Result;
5694 
5695       if (Outs[i].IsFixed) {
5696         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5697                                CCInfo);
5698       } else {
5699         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5700                                       ArgFlags, CCInfo);
5701       }
5702 
5703       if (Result) {
5704 #ifndef NDEBUG
5705         errs() << "Call operand #" << i << " has unhandled type "
5706              << EVT(ArgVT).getEVTString() << "\n";
5707 #endif
5708         llvm_unreachable(nullptr);
5709       }
5710     }
5711   } else {
5712     // All arguments are treated the same.
5713     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5714   }
5715   CCInfo.clearWasPPCF128();
5716 
5717   // Assign locations to all of the outgoing aggregate by value arguments.
5718   SmallVector<CCValAssign, 16> ByValArgLocs;
5719   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5720 
5721   // Reserve stack space for the allocations in CCInfo.
5722   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5723 
5724   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5725 
5726   // Size of the linkage area, parameter list area and the part of the local
5727   // space variable where copies of aggregates which are passed by value are
5728   // stored.
5729   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5730 
5731   // Calculate by how many bytes the stack has to be adjusted in case of tail
5732   // call optimization.
5733   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5734 
5735   // Adjust the stack pointer for the new arguments...
5736   // These operations are automatically eliminated by the prolog/epilog pass
5737   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5738   SDValue CallSeqStart = Chain;
5739 
5740   // Load the return address and frame pointer so it can be moved somewhere else
5741   // later.
5742   SDValue LROp, FPOp;
5743   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5744 
5745   // Set up a copy of the stack pointer for use loading and storing any
5746   // arguments that may not fit in the registers available for argument
5747   // passing.
5748   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5749 
5750   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5751   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5752   SmallVector<SDValue, 8> MemOpChains;
5753 
5754   bool seenFloatArg = false;
5755   // Walk the register/memloc assignments, inserting copies/loads.
5756   // i - Tracks the index into the list of registers allocated for the call
5757   // RealArgIdx - Tracks the index into the list of actual function arguments
5758   // j - Tracks the index into the list of byval arguments
5759   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5760        i != e;
5761        ++i, ++RealArgIdx) {
5762     CCValAssign &VA = ArgLocs[i];
5763     SDValue Arg = OutVals[RealArgIdx];
5764     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5765 
5766     if (Flags.isByVal()) {
5767       // Argument is an aggregate which is passed by value, thus we need to
5768       // create a copy of it in the local variable space of the current stack
5769       // frame (which is the stack frame of the caller) and pass the address of
5770       // this copy to the callee.
5771       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5772       CCValAssign &ByValVA = ByValArgLocs[j++];
5773       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5774 
5775       // Memory reserved in the local variable space of the callers stack frame.
5776       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5777 
5778       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5779       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5780                            StackPtr, PtrOff);
5781 
5782       // Create a copy of the argument in the local area of the current
5783       // stack frame.
5784       SDValue MemcpyCall =
5785         CreateCopyOfByValArgument(Arg, PtrOff,
5786                                   CallSeqStart.getNode()->getOperand(0),
5787                                   Flags, DAG, dl);
5788 
5789       // This must go outside the CALLSEQ_START..END.
5790       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5791                                                      SDLoc(MemcpyCall));
5792       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5793                              NewCallSeqStart.getNode());
5794       Chain = CallSeqStart = NewCallSeqStart;
5795 
5796       // Pass the address of the aggregate copy on the stack either in a
5797       // physical register or in the parameter list area of the current stack
5798       // frame to the callee.
5799       Arg = PtrOff;
5800     }
5801 
5802     // When useCRBits() is true, there can be i1 arguments.
5803     // It is because getRegisterType(MVT::i1) => MVT::i1,
5804     // and for other integer types getRegisterType() => MVT::i32.
5805     // Extend i1 and ensure callee will get i32.
5806     if (Arg.getValueType() == MVT::i1)
5807       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5808                         dl, MVT::i32, Arg);
5809 
5810     if (VA.isRegLoc()) {
5811       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5812       // Put argument in a physical register.
5813       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5814         bool IsLE = Subtarget.isLittleEndian();
5815         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5816                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5817         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5818         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5819                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5820         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5821                              SVal.getValue(0)));
5822       } else
5823         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5824     } else {
5825       // Put argument in the parameter list area of the current stack frame.
5826       assert(VA.isMemLoc());
5827       unsigned LocMemOffset = VA.getLocMemOffset();
5828 
5829       if (!IsTailCall) {
5830         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5831         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5832                              StackPtr, PtrOff);
5833 
5834         MemOpChains.push_back(
5835             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5836       } else {
5837         // Calculate and remember argument location.
5838         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5839                                  TailCallArguments);
5840       }
5841     }
5842   }
5843 
5844   if (!MemOpChains.empty())
5845     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5846 
5847   // Build a sequence of copy-to-reg nodes chained together with token chain
5848   // and flag operands which copy the outgoing args into the appropriate regs.
5849   SDValue InFlag;
5850   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5851     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5852                              RegsToPass[i].second, InFlag);
5853     InFlag = Chain.getValue(1);
5854   }
5855 
5856   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5857   // registers.
5858   if (IsVarArg) {
5859     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5860     SDValue Ops[] = { Chain, InFlag };
5861 
5862     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5863                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5864 
5865     InFlag = Chain.getValue(1);
5866   }
5867 
5868   if (IsTailCall)
5869     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5870                     TailCallArguments);
5871 
5872   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5873                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5874 }
5875 
5876 // Copy an argument into memory, being careful to do this outside the
5877 // call sequence for the call to which the argument belongs.
5878 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5879     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5880     SelectionDAG &DAG, const SDLoc &dl) const {
5881   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5882                         CallSeqStart.getNode()->getOperand(0),
5883                         Flags, DAG, dl);
5884   // The MEMCPY must go outside the CALLSEQ_START..END.
5885   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5886   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5887                                                  SDLoc(MemcpyCall));
5888   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5889                          NewCallSeqStart.getNode());
5890   return NewCallSeqStart;
5891 }
5892 
5893 SDValue PPCTargetLowering::LowerCall_64SVR4(
5894     SDValue Chain, SDValue Callee, CallFlags CFlags,
5895     const SmallVectorImpl<ISD::OutputArg> &Outs,
5896     const SmallVectorImpl<SDValue> &OutVals,
5897     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5898     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5899     const CallBase *CB) const {
5900   bool isELFv2ABI = Subtarget.isELFv2ABI();
5901   bool isLittleEndian = Subtarget.isLittleEndian();
5902   unsigned NumOps = Outs.size();
5903   bool IsSibCall = false;
5904   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5905 
5906   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5907   unsigned PtrByteSize = 8;
5908 
5909   MachineFunction &MF = DAG.getMachineFunction();
5910 
5911   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5912     IsSibCall = true;
5913 
5914   // Mark this function as potentially containing a function that contains a
5915   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5916   // and restoring the callers stack pointer in this functions epilog. This is
5917   // done because by tail calling the called function might overwrite the value
5918   // in this function's (MF) stack pointer stack slot 0(SP).
5919   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5920     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5921 
5922   assert(!(IsFastCall && CFlags.IsVarArg) &&
5923          "fastcc not supported on varargs functions");
5924 
5925   // Count how many bytes are to be pushed on the stack, including the linkage
5926   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5927   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5928   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5929   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5930   unsigned NumBytes = LinkageSize;
5931   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5932 
5933   static const MCPhysReg GPR[] = {
5934     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5935     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5936   };
5937   static const MCPhysReg VR[] = {
5938     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5939     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5940   };
5941 
5942   const unsigned NumGPRs = array_lengthof(GPR);
5943   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5944   const unsigned NumVRs  = array_lengthof(VR);
5945 
5946   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5947   // can be passed to the callee in registers.
5948   // For the fast calling convention, there is another check below.
5949   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5950   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5951   if (!HasParameterArea) {
5952     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5953     unsigned AvailableFPRs = NumFPRs;
5954     unsigned AvailableVRs = NumVRs;
5955     unsigned NumBytesTmp = NumBytes;
5956     for (unsigned i = 0; i != NumOps; ++i) {
5957       if (Outs[i].Flags.isNest()) continue;
5958       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5959                                  PtrByteSize, LinkageSize, ParamAreaSize,
5960                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5961         HasParameterArea = true;
5962     }
5963   }
5964 
5965   // When using the fast calling convention, we don't provide backing for
5966   // arguments that will be in registers.
5967   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5968 
5969   // Avoid allocating parameter area for fastcc functions if all the arguments
5970   // can be passed in the registers.
5971   if (IsFastCall)
5972     HasParameterArea = false;
5973 
5974   // Add up all the space actually used.
5975   for (unsigned i = 0; i != NumOps; ++i) {
5976     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5977     EVT ArgVT = Outs[i].VT;
5978     EVT OrigVT = Outs[i].ArgVT;
5979 
5980     if (Flags.isNest())
5981       continue;
5982 
5983     if (IsFastCall) {
5984       if (Flags.isByVal()) {
5985         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5986         if (NumGPRsUsed > NumGPRs)
5987           HasParameterArea = true;
5988       } else {
5989         switch (ArgVT.getSimpleVT().SimpleTy) {
5990         default: llvm_unreachable("Unexpected ValueType for argument!");
5991         case MVT::i1:
5992         case MVT::i32:
5993         case MVT::i64:
5994           if (++NumGPRsUsed <= NumGPRs)
5995             continue;
5996           break;
5997         case MVT::v4i32:
5998         case MVT::v8i16:
5999         case MVT::v16i8:
6000         case MVT::v2f64:
6001         case MVT::v2i64:
6002         case MVT::v1i128:
6003         case MVT::f128:
6004           if (++NumVRsUsed <= NumVRs)
6005             continue;
6006           break;
6007         case MVT::v4f32:
6008           if (++NumVRsUsed <= NumVRs)
6009             continue;
6010           break;
6011         case MVT::f32:
6012         case MVT::f64:
6013           if (++NumFPRsUsed <= NumFPRs)
6014             continue;
6015           break;
6016         }
6017         HasParameterArea = true;
6018       }
6019     }
6020 
6021     /* Respect alignment of argument on the stack.  */
6022     auto Alignement =
6023         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6024     NumBytes = alignTo(NumBytes, Alignement);
6025 
6026     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6027     if (Flags.isInConsecutiveRegsLast())
6028       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6029   }
6030 
6031   unsigned NumBytesActuallyUsed = NumBytes;
6032 
6033   // In the old ELFv1 ABI,
6034   // the prolog code of the callee may store up to 8 GPR argument registers to
6035   // the stack, allowing va_start to index over them in memory if its varargs.
6036   // Because we cannot tell if this is needed on the caller side, we have to
6037   // conservatively assume that it is needed.  As such, make sure we have at
6038   // least enough stack space for the caller to store the 8 GPRs.
6039   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6040   // really requires memory operands, e.g. a vararg function.
6041   if (HasParameterArea)
6042     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6043   else
6044     NumBytes = LinkageSize;
6045 
6046   // Tail call needs the stack to be aligned.
6047   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6048     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6049 
6050   int SPDiff = 0;
6051 
6052   // Calculate by how many bytes the stack has to be adjusted in case of tail
6053   // call optimization.
6054   if (!IsSibCall)
6055     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6056 
6057   // To protect arguments on the stack from being clobbered in a tail call,
6058   // force all the loads to happen before doing any other lowering.
6059   if (CFlags.IsTailCall)
6060     Chain = DAG.getStackArgumentTokenFactor(Chain);
6061 
6062   // Adjust the stack pointer for the new arguments...
6063   // These operations are automatically eliminated by the prolog/epilog pass
6064   if (!IsSibCall)
6065     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6066   SDValue CallSeqStart = Chain;
6067 
6068   // Load the return address and frame pointer so it can be move somewhere else
6069   // later.
6070   SDValue LROp, FPOp;
6071   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6072 
6073   // Set up a copy of the stack pointer for use loading and storing any
6074   // arguments that may not fit in the registers available for argument
6075   // passing.
6076   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6077 
6078   // Figure out which arguments are going to go in registers, and which in
6079   // memory.  Also, if this is a vararg function, floating point operations
6080   // must be stored to our stack, and loaded into integer regs as well, if
6081   // any integer regs are available for argument passing.
6082   unsigned ArgOffset = LinkageSize;
6083 
6084   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6085   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6086 
6087   SmallVector<SDValue, 8> MemOpChains;
6088   for (unsigned i = 0; i != NumOps; ++i) {
6089     SDValue Arg = OutVals[i];
6090     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6091     EVT ArgVT = Outs[i].VT;
6092     EVT OrigVT = Outs[i].ArgVT;
6093 
6094     // PtrOff will be used to store the current argument to the stack if a
6095     // register cannot be found for it.
6096     SDValue PtrOff;
6097 
6098     // We re-align the argument offset for each argument, except when using the
6099     // fast calling convention, when we need to make sure we do that only when
6100     // we'll actually use a stack slot.
6101     auto ComputePtrOff = [&]() {
6102       /* Respect alignment of argument on the stack.  */
6103       auto Alignment =
6104           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6105       ArgOffset = alignTo(ArgOffset, Alignment);
6106 
6107       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6108 
6109       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6110     };
6111 
6112     if (!IsFastCall) {
6113       ComputePtrOff();
6114 
6115       /* Compute GPR index associated with argument offset.  */
6116       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6117       GPR_idx = std::min(GPR_idx, NumGPRs);
6118     }
6119 
6120     // Promote integers to 64-bit values.
6121     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6122       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6123       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6124       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6125     }
6126 
6127     // FIXME memcpy is used way more than necessary.  Correctness first.
6128     // Note: "by value" is code for passing a structure by value, not
6129     // basic types.
6130     if (Flags.isByVal()) {
6131       // Note: Size includes alignment padding, so
6132       //   struct x { short a; char b; }
6133       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6134       // These are the proper values we need for right-justifying the
6135       // aggregate in a parameter register.
6136       unsigned Size = Flags.getByValSize();
6137 
6138       // An empty aggregate parameter takes up no storage and no
6139       // registers.
6140       if (Size == 0)
6141         continue;
6142 
6143       if (IsFastCall)
6144         ComputePtrOff();
6145 
6146       // All aggregates smaller than 8 bytes must be passed right-justified.
6147       if (Size==1 || Size==2 || Size==4) {
6148         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6149         if (GPR_idx != NumGPRs) {
6150           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6151                                         MachinePointerInfo(), VT);
6152           MemOpChains.push_back(Load.getValue(1));
6153           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6154 
6155           ArgOffset += PtrByteSize;
6156           continue;
6157         }
6158       }
6159 
6160       if (GPR_idx == NumGPRs && Size < 8) {
6161         SDValue AddPtr = PtrOff;
6162         if (!isLittleEndian) {
6163           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6164                                           PtrOff.getValueType());
6165           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6166         }
6167         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6168                                                           CallSeqStart,
6169                                                           Flags, DAG, dl);
6170         ArgOffset += PtrByteSize;
6171         continue;
6172       }
6173       // Copy entire object into memory.  There are cases where gcc-generated
6174       // code assumes it is there, even if it could be put entirely into
6175       // registers.  (This is not what the doc says.)
6176 
6177       // FIXME: The above statement is likely due to a misunderstanding of the
6178       // documents.  All arguments must be copied into the parameter area BY
6179       // THE CALLEE in the event that the callee takes the address of any
6180       // formal argument.  That has not yet been implemented.  However, it is
6181       // reasonable to use the stack area as a staging area for the register
6182       // load.
6183 
6184       // Skip this for small aggregates, as we will use the same slot for a
6185       // right-justified copy, below.
6186       if (Size >= 8)
6187         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6188                                                           CallSeqStart,
6189                                                           Flags, DAG, dl);
6190 
6191       // When a register is available, pass a small aggregate right-justified.
6192       if (Size < 8 && GPR_idx != NumGPRs) {
6193         // The easiest way to get this right-justified in a register
6194         // is to copy the structure into the rightmost portion of a
6195         // local variable slot, then load the whole slot into the
6196         // register.
6197         // FIXME: The memcpy seems to produce pretty awful code for
6198         // small aggregates, particularly for packed ones.
6199         // FIXME: It would be preferable to use the slot in the
6200         // parameter save area instead of a new local variable.
6201         SDValue AddPtr = PtrOff;
6202         if (!isLittleEndian) {
6203           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6204           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6205         }
6206         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6207                                                           CallSeqStart,
6208                                                           Flags, DAG, dl);
6209 
6210         // Load the slot into the register.
6211         SDValue Load =
6212             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6213         MemOpChains.push_back(Load.getValue(1));
6214         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6215 
6216         // Done with this argument.
6217         ArgOffset += PtrByteSize;
6218         continue;
6219       }
6220 
6221       // For aggregates larger than PtrByteSize, copy the pieces of the
6222       // object that fit into registers from the parameter save area.
6223       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6224         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6225         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6226         if (GPR_idx != NumGPRs) {
6227           SDValue Load =
6228               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6229           MemOpChains.push_back(Load.getValue(1));
6230           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6231           ArgOffset += PtrByteSize;
6232         } else {
6233           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6234           break;
6235         }
6236       }
6237       continue;
6238     }
6239 
6240     switch (Arg.getSimpleValueType().SimpleTy) {
6241     default: llvm_unreachable("Unexpected ValueType for argument!");
6242     case MVT::i1:
6243     case MVT::i32:
6244     case MVT::i64:
6245       if (Flags.isNest()) {
6246         // The 'nest' parameter, if any, is passed in R11.
6247         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6248         break;
6249       }
6250 
6251       // These can be scalar arguments or elements of an integer array type
6252       // passed directly.  Clang may use those instead of "byval" aggregate
6253       // types to avoid forcing arguments to memory unnecessarily.
6254       if (GPR_idx != NumGPRs) {
6255         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6256       } else {
6257         if (IsFastCall)
6258           ComputePtrOff();
6259 
6260         assert(HasParameterArea &&
6261                "Parameter area must exist to pass an argument in memory.");
6262         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6263                          true, CFlags.IsTailCall, false, MemOpChains,
6264                          TailCallArguments, dl);
6265         if (IsFastCall)
6266           ArgOffset += PtrByteSize;
6267       }
6268       if (!IsFastCall)
6269         ArgOffset += PtrByteSize;
6270       break;
6271     case MVT::f32:
6272     case MVT::f64: {
6273       // These can be scalar arguments or elements of a float array type
6274       // passed directly.  The latter are used to implement ELFv2 homogenous
6275       // float aggregates.
6276 
6277       // Named arguments go into FPRs first, and once they overflow, the
6278       // remaining arguments go into GPRs and then the parameter save area.
6279       // Unnamed arguments for vararg functions always go to GPRs and
6280       // then the parameter save area.  For now, put all arguments to vararg
6281       // routines always in both locations (FPR *and* GPR or stack slot).
6282       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6283       bool NeededLoad = false;
6284 
6285       // First load the argument into the next available FPR.
6286       if (FPR_idx != NumFPRs)
6287         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6288 
6289       // Next, load the argument into GPR or stack slot if needed.
6290       if (!NeedGPROrStack)
6291         ;
6292       else if (GPR_idx != NumGPRs && !IsFastCall) {
6293         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6294         // once we support fp <-> gpr moves.
6295 
6296         // In the non-vararg case, this can only ever happen in the
6297         // presence of f32 array types, since otherwise we never run
6298         // out of FPRs before running out of GPRs.
6299         SDValue ArgVal;
6300 
6301         // Double values are always passed in a single GPR.
6302         if (Arg.getValueType() != MVT::f32) {
6303           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6304 
6305         // Non-array float values are extended and passed in a GPR.
6306         } else if (!Flags.isInConsecutiveRegs()) {
6307           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6308           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6309 
6310         // If we have an array of floats, we collect every odd element
6311         // together with its predecessor into one GPR.
6312         } else if (ArgOffset % PtrByteSize != 0) {
6313           SDValue Lo, Hi;
6314           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6315           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6316           if (!isLittleEndian)
6317             std::swap(Lo, Hi);
6318           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6319 
6320         // The final element, if even, goes into the first half of a GPR.
6321         } else if (Flags.isInConsecutiveRegsLast()) {
6322           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6323           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6324           if (!isLittleEndian)
6325             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6326                                  DAG.getConstant(32, dl, MVT::i32));
6327 
6328         // Non-final even elements are skipped; they will be handled
6329         // together the with subsequent argument on the next go-around.
6330         } else
6331           ArgVal = SDValue();
6332 
6333         if (ArgVal.getNode())
6334           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6335       } else {
6336         if (IsFastCall)
6337           ComputePtrOff();
6338 
6339         // Single-precision floating-point values are mapped to the
6340         // second (rightmost) word of the stack doubleword.
6341         if (Arg.getValueType() == MVT::f32 &&
6342             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6343           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6344           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6345         }
6346 
6347         assert(HasParameterArea &&
6348                "Parameter area must exist to pass an argument in memory.");
6349         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6350                          true, CFlags.IsTailCall, false, MemOpChains,
6351                          TailCallArguments, dl);
6352 
6353         NeededLoad = true;
6354       }
6355       // When passing an array of floats, the array occupies consecutive
6356       // space in the argument area; only round up to the next doubleword
6357       // at the end of the array.  Otherwise, each float takes 8 bytes.
6358       if (!IsFastCall || NeededLoad) {
6359         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6360                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6361         if (Flags.isInConsecutiveRegsLast())
6362           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6363       }
6364       break;
6365     }
6366     case MVT::v4f32:
6367     case MVT::v4i32:
6368     case MVT::v8i16:
6369     case MVT::v16i8:
6370     case MVT::v2f64:
6371     case MVT::v2i64:
6372     case MVT::v1i128:
6373     case MVT::f128:
6374       // These can be scalar arguments or elements of a vector array type
6375       // passed directly.  The latter are used to implement ELFv2 homogenous
6376       // vector aggregates.
6377 
6378       // For a varargs call, named arguments go into VRs or on the stack as
6379       // usual; unnamed arguments always go to the stack or the corresponding
6380       // GPRs when within range.  For now, we always put the value in both
6381       // locations (or even all three).
6382       if (CFlags.IsVarArg) {
6383         assert(HasParameterArea &&
6384                "Parameter area must exist if we have a varargs call.");
6385         // We could elide this store in the case where the object fits
6386         // entirely in R registers.  Maybe later.
6387         SDValue Store =
6388             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6389         MemOpChains.push_back(Store);
6390         if (VR_idx != NumVRs) {
6391           SDValue Load =
6392               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6393           MemOpChains.push_back(Load.getValue(1));
6394           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6395         }
6396         ArgOffset += 16;
6397         for (unsigned i=0; i<16; i+=PtrByteSize) {
6398           if (GPR_idx == NumGPRs)
6399             break;
6400           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6401                                    DAG.getConstant(i, dl, PtrVT));
6402           SDValue Load =
6403               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6404           MemOpChains.push_back(Load.getValue(1));
6405           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6406         }
6407         break;
6408       }
6409 
6410       // Non-varargs Altivec params go into VRs or on the stack.
6411       if (VR_idx != NumVRs) {
6412         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6413       } else {
6414         if (IsFastCall)
6415           ComputePtrOff();
6416 
6417         assert(HasParameterArea &&
6418                "Parameter area must exist to pass an argument in memory.");
6419         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6420                          true, CFlags.IsTailCall, true, MemOpChains,
6421                          TailCallArguments, dl);
6422         if (IsFastCall)
6423           ArgOffset += 16;
6424       }
6425 
6426       if (!IsFastCall)
6427         ArgOffset += 16;
6428       break;
6429     }
6430   }
6431 
6432   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6433          "mismatch in size of parameter area");
6434   (void)NumBytesActuallyUsed;
6435 
6436   if (!MemOpChains.empty())
6437     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6438 
6439   // Check if this is an indirect call (MTCTR/BCTRL).
6440   // See prepareDescriptorIndirectCall and buildCallOperands for more
6441   // information about calls through function pointers in the 64-bit SVR4 ABI.
6442   if (CFlags.IsIndirect) {
6443     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6444     // caller in the TOC save area.
6445     if (isTOCSaveRestoreRequired(Subtarget)) {
6446       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6447       // Load r2 into a virtual register and store it to the TOC save area.
6448       setUsesTOCBasePtr(DAG);
6449       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6450       // TOC save area offset.
6451       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6452       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6453       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6454       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6455                            MachinePointerInfo::getStack(
6456                                DAG.getMachineFunction(), TOCSaveOffset));
6457     }
6458     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6459     // This does not mean the MTCTR instruction must use R12; it's easier
6460     // to model this as an extra parameter, so do that.
6461     if (isELFv2ABI && !CFlags.IsPatchPoint)
6462       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6463   }
6464 
6465   // Build a sequence of copy-to-reg nodes chained together with token chain
6466   // and flag operands which copy the outgoing args into the appropriate regs.
6467   SDValue InFlag;
6468   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6469     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6470                              RegsToPass[i].second, InFlag);
6471     InFlag = Chain.getValue(1);
6472   }
6473 
6474   if (CFlags.IsTailCall && !IsSibCall)
6475     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6476                     TailCallArguments);
6477 
6478   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6479                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6480 }
6481 
6482 SDValue PPCTargetLowering::LowerCall_Darwin(
6483     SDValue Chain, SDValue Callee, CallFlags CFlags,
6484     const SmallVectorImpl<ISD::OutputArg> &Outs,
6485     const SmallVectorImpl<SDValue> &OutVals,
6486     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6487     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6488     const CallBase *CB) const {
6489   unsigned NumOps = Outs.size();
6490 
6491   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6492   bool isPPC64 = PtrVT == MVT::i64;
6493   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6494 
6495   MachineFunction &MF = DAG.getMachineFunction();
6496 
6497   // Mark this function as potentially containing a function that contains a
6498   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6499   // and restoring the callers stack pointer in this functions epilog. This is
6500   // done because by tail calling the called function might overwrite the value
6501   // in this function's (MF) stack pointer stack slot 0(SP).
6502   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6503       CFlags.CallConv == CallingConv::Fast)
6504     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6505 
6506   // Count how many bytes are to be pushed on the stack, including the linkage
6507   // area, and parameter passing area.  We start with 24/48 bytes, which is
6508   // prereserved space for [SP][CR][LR][3 x unused].
6509   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6510   unsigned NumBytes = LinkageSize;
6511 
6512   // Add up all the space actually used.
6513   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6514   // they all go in registers, but we must reserve stack space for them for
6515   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6516   // assigned stack space in order, with padding so Altivec parameters are
6517   // 16-byte aligned.
6518   unsigned nAltivecParamsAtEnd = 0;
6519   for (unsigned i = 0; i != NumOps; ++i) {
6520     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6521     EVT ArgVT = Outs[i].VT;
6522     // Varargs Altivec parameters are padded to a 16 byte boundary.
6523     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6524         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6525         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6526       if (!CFlags.IsVarArg && !isPPC64) {
6527         // Non-varargs Altivec parameters go after all the non-Altivec
6528         // parameters; handle those later so we know how much padding we need.
6529         nAltivecParamsAtEnd++;
6530         continue;
6531       }
6532       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6533       NumBytes = ((NumBytes+15)/16)*16;
6534     }
6535     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6536   }
6537 
6538   // Allow for Altivec parameters at the end, if needed.
6539   if (nAltivecParamsAtEnd) {
6540     NumBytes = ((NumBytes+15)/16)*16;
6541     NumBytes += 16*nAltivecParamsAtEnd;
6542   }
6543 
6544   // The prolog code of the callee may store up to 8 GPR argument registers to
6545   // the stack, allowing va_start to index over them in memory if its varargs.
6546   // Because we cannot tell if this is needed on the caller side, we have to
6547   // conservatively assume that it is needed.  As such, make sure we have at
6548   // least enough stack space for the caller to store the 8 GPRs.
6549   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6550 
6551   // Tail call needs the stack to be aligned.
6552   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6553       CFlags.CallConv == CallingConv::Fast)
6554     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6555 
6556   // Calculate by how many bytes the stack has to be adjusted in case of tail
6557   // call optimization.
6558   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6559 
6560   // To protect arguments on the stack from being clobbered in a tail call,
6561   // force all the loads to happen before doing any other lowering.
6562   if (CFlags.IsTailCall)
6563     Chain = DAG.getStackArgumentTokenFactor(Chain);
6564 
6565   // Adjust the stack pointer for the new arguments...
6566   // These operations are automatically eliminated by the prolog/epilog pass
6567   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6568   SDValue CallSeqStart = Chain;
6569 
6570   // Load the return address and frame pointer so it can be move somewhere else
6571   // later.
6572   SDValue LROp, FPOp;
6573   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6574 
6575   // Set up a copy of the stack pointer for use loading and storing any
6576   // arguments that may not fit in the registers available for argument
6577   // passing.
6578   SDValue StackPtr;
6579   if (isPPC64)
6580     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6581   else
6582     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6583 
6584   // Figure out which arguments are going to go in registers, and which in
6585   // memory.  Also, if this is a vararg function, floating point operations
6586   // must be stored to our stack, and loaded into integer regs as well, if
6587   // any integer regs are available for argument passing.
6588   unsigned ArgOffset = LinkageSize;
6589   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6590 
6591   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6592     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6593     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6594   };
6595   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6596     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6597     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6598   };
6599   static const MCPhysReg VR[] = {
6600     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6601     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6602   };
6603   const unsigned NumGPRs = array_lengthof(GPR_32);
6604   const unsigned NumFPRs = 13;
6605   const unsigned NumVRs  = array_lengthof(VR);
6606 
6607   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6608 
6609   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6610   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6611 
6612   SmallVector<SDValue, 8> MemOpChains;
6613   for (unsigned i = 0; i != NumOps; ++i) {
6614     SDValue Arg = OutVals[i];
6615     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6616 
6617     // PtrOff will be used to store the current argument to the stack if a
6618     // register cannot be found for it.
6619     SDValue PtrOff;
6620 
6621     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6622 
6623     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6624 
6625     // On PPC64, promote integers to 64-bit values.
6626     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6627       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6628       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6629       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6630     }
6631 
6632     // FIXME memcpy is used way more than necessary.  Correctness first.
6633     // Note: "by value" is code for passing a structure by value, not
6634     // basic types.
6635     if (Flags.isByVal()) {
6636       unsigned Size = Flags.getByValSize();
6637       // Very small objects are passed right-justified.  Everything else is
6638       // passed left-justified.
6639       if (Size==1 || Size==2) {
6640         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6641         if (GPR_idx != NumGPRs) {
6642           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6643                                         MachinePointerInfo(), VT);
6644           MemOpChains.push_back(Load.getValue(1));
6645           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6646 
6647           ArgOffset += PtrByteSize;
6648         } else {
6649           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6650                                           PtrOff.getValueType());
6651           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6652           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6653                                                             CallSeqStart,
6654                                                             Flags, DAG, dl);
6655           ArgOffset += PtrByteSize;
6656         }
6657         continue;
6658       }
6659       // Copy entire object into memory.  There are cases where gcc-generated
6660       // code assumes it is there, even if it could be put entirely into
6661       // registers.  (This is not what the doc says.)
6662       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6663                                                         CallSeqStart,
6664                                                         Flags, DAG, dl);
6665 
6666       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6667       // copy the pieces of the object that fit into registers from the
6668       // parameter save area.
6669       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6670         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6671         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6672         if (GPR_idx != NumGPRs) {
6673           SDValue Load =
6674               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6675           MemOpChains.push_back(Load.getValue(1));
6676           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6677           ArgOffset += PtrByteSize;
6678         } else {
6679           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6680           break;
6681         }
6682       }
6683       continue;
6684     }
6685 
6686     switch (Arg.getSimpleValueType().SimpleTy) {
6687     default: llvm_unreachable("Unexpected ValueType for argument!");
6688     case MVT::i1:
6689     case MVT::i32:
6690     case MVT::i64:
6691       if (GPR_idx != NumGPRs) {
6692         if (Arg.getValueType() == MVT::i1)
6693           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6694 
6695         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6696       } else {
6697         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6698                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6699                          TailCallArguments, dl);
6700       }
6701       ArgOffset += PtrByteSize;
6702       break;
6703     case MVT::f32:
6704     case MVT::f64:
6705       if (FPR_idx != NumFPRs) {
6706         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6707 
6708         if (CFlags.IsVarArg) {
6709           SDValue Store =
6710               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6711           MemOpChains.push_back(Store);
6712 
6713           // Float varargs are always shadowed in available integer registers
6714           if (GPR_idx != NumGPRs) {
6715             SDValue Load =
6716                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6717             MemOpChains.push_back(Load.getValue(1));
6718             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6719           }
6720           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6721             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6722             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6723             SDValue Load =
6724                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6725             MemOpChains.push_back(Load.getValue(1));
6726             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6727           }
6728         } else {
6729           // If we have any FPRs remaining, we may also have GPRs remaining.
6730           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6731           // GPRs.
6732           if (GPR_idx != NumGPRs)
6733             ++GPR_idx;
6734           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6735               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6736             ++GPR_idx;
6737         }
6738       } else
6739         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6740                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6741                          TailCallArguments, dl);
6742       if (isPPC64)
6743         ArgOffset += 8;
6744       else
6745         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6746       break;
6747     case MVT::v4f32:
6748     case MVT::v4i32:
6749     case MVT::v8i16:
6750     case MVT::v16i8:
6751       if (CFlags.IsVarArg) {
6752         // These go aligned on the stack, or in the corresponding R registers
6753         // when within range.  The Darwin PPC ABI doc claims they also go in
6754         // V registers; in fact gcc does this only for arguments that are
6755         // prototyped, not for those that match the ...  We do it for all
6756         // arguments, seems to work.
6757         while (ArgOffset % 16 !=0) {
6758           ArgOffset += PtrByteSize;
6759           if (GPR_idx != NumGPRs)
6760             GPR_idx++;
6761         }
6762         // We could elide this store in the case where the object fits
6763         // entirely in R registers.  Maybe later.
6764         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6765                              DAG.getConstant(ArgOffset, dl, PtrVT));
6766         SDValue Store =
6767             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6768         MemOpChains.push_back(Store);
6769         if (VR_idx != NumVRs) {
6770           SDValue Load =
6771               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6772           MemOpChains.push_back(Load.getValue(1));
6773           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6774         }
6775         ArgOffset += 16;
6776         for (unsigned i=0; i<16; i+=PtrByteSize) {
6777           if (GPR_idx == NumGPRs)
6778             break;
6779           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6780                                    DAG.getConstant(i, dl, PtrVT));
6781           SDValue Load =
6782               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6783           MemOpChains.push_back(Load.getValue(1));
6784           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6785         }
6786         break;
6787       }
6788 
6789       // Non-varargs Altivec params generally go in registers, but have
6790       // stack space allocated at the end.
6791       if (VR_idx != NumVRs) {
6792         // Doesn't have GPR space allocated.
6793         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6794       } else if (nAltivecParamsAtEnd==0) {
6795         // We are emitting Altivec params in order.
6796         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6797                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6798                          TailCallArguments, dl);
6799         ArgOffset += 16;
6800       }
6801       break;
6802     }
6803   }
6804   // If all Altivec parameters fit in registers, as they usually do,
6805   // they get stack space following the non-Altivec parameters.  We
6806   // don't track this here because nobody below needs it.
6807   // If there are more Altivec parameters than fit in registers emit
6808   // the stores here.
6809   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6810     unsigned j = 0;
6811     // Offset is aligned; skip 1st 12 params which go in V registers.
6812     ArgOffset = ((ArgOffset+15)/16)*16;
6813     ArgOffset += 12*16;
6814     for (unsigned i = 0; i != NumOps; ++i) {
6815       SDValue Arg = OutVals[i];
6816       EVT ArgType = Outs[i].VT;
6817       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6818           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6819         if (++j > NumVRs) {
6820           SDValue PtrOff;
6821           // We are emitting Altivec params in order.
6822           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6823                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
6824                            TailCallArguments, dl);
6825           ArgOffset += 16;
6826         }
6827       }
6828     }
6829   }
6830 
6831   if (!MemOpChains.empty())
6832     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6833 
6834   // On Darwin, R12 must contain the address of an indirect callee.  This does
6835   // not mean the MTCTR instruction must use R12; it's easier to model this as
6836   // an extra parameter, so do that.
6837   if (CFlags.IsIndirect) {
6838     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6839     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6840                                                    PPC::R12), Callee));
6841   }
6842 
6843   // Build a sequence of copy-to-reg nodes chained together with token chain
6844   // and flag operands which copy the outgoing args into the appropriate regs.
6845   SDValue InFlag;
6846   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6847     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6848                              RegsToPass[i].second, InFlag);
6849     InFlag = Chain.getValue(1);
6850   }
6851 
6852   if (CFlags.IsTailCall)
6853     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6854                     TailCallArguments);
6855 
6856   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6857                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6858 }
6859 
6860 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6861                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6862                    CCState &State) {
6863 
6864   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6865       State.getMachineFunction().getSubtarget());
6866   const bool IsPPC64 = Subtarget.isPPC64();
6867   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6868   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6869 
6870   assert((!ValVT.isInteger() ||
6871           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
6872          "Integer argument exceeds register size: should have been legalized");
6873 
6874   if (ValVT == MVT::f128)
6875     report_fatal_error("f128 is unimplemented on AIX.");
6876 
6877   if (ArgFlags.isNest())
6878     report_fatal_error("Nest arguments are unimplemented.");
6879 
6880   if (ValVT.isVector() || LocVT.isVector())
6881     report_fatal_error("Vector arguments are unimplemented on AIX.");
6882 
6883   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6884                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6885                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6886   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6887                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6888                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6889 
6890   if (ArgFlags.isByVal()) {
6891     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6892       report_fatal_error("Pass-by-value arguments with alignment greater than "
6893                          "register width are not supported.");
6894 
6895     const unsigned ByValSize = ArgFlags.getByValSize();
6896 
6897     // An empty aggregate parameter takes up no storage and no registers,
6898     // but needs a MemLoc for a stack slot for the formal arguments side.
6899     if (ByValSize == 0) {
6900       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6901                                        State.getNextStackOffset(), RegVT,
6902                                        LocInfo));
6903       return false;
6904     }
6905 
6906     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6907     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6908     for (const unsigned E = Offset + StackSize; Offset < E;
6909          Offset += PtrAlign.value()) {
6910       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6911         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6912       else {
6913         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6914                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6915                                          LocInfo));
6916         break;
6917       }
6918     }
6919     return false;
6920   }
6921 
6922   // Arguments always reserve parameter save area.
6923   switch (ValVT.SimpleTy) {
6924   default:
6925     report_fatal_error("Unhandled value type for argument.");
6926   case MVT::i64:
6927     // i64 arguments should have been split to i32 for PPC32.
6928     assert(IsPPC64 && "PPC32 should have split i64 values.");
6929     LLVM_FALLTHROUGH;
6930   case MVT::i1:
6931   case MVT::i32: {
6932     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6933     // AIX integer arguments are always passed in register width.
6934     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6935       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6936                                   : CCValAssign::LocInfo::ZExt;
6937     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6938       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6939     else
6940       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6941 
6942     return false;
6943   }
6944   case MVT::f32:
6945   case MVT::f64: {
6946     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6947     const unsigned StoreSize = LocVT.getStoreSize();
6948     // Floats are always 4-byte aligned in the PSA on AIX.
6949     // This includes f64 in 64-bit mode for ABI compatibility.
6950     const unsigned Offset =
6951         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6952     unsigned FReg = State.AllocateReg(FPR);
6953     if (FReg)
6954       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6955 
6956     // Reserve and initialize GPRs or initialize the PSA as required.
6957     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6958       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6959         assert(FReg && "An FPR should be available when a GPR is reserved.");
6960         if (State.isVarArg()) {
6961           // Successfully reserved GPRs are only initialized for vararg calls.
6962           // Custom handling is required for:
6963           //   f64 in PPC32 needs to be split into 2 GPRs.
6964           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6965           State.addLoc(
6966               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6967         }
6968       } else {
6969         // If there are insufficient GPRs, the PSA needs to be initialized.
6970         // Initialization occurs even if an FPR was initialized for
6971         // compatibility with the AIX XL compiler. The full memory for the
6972         // argument will be initialized even if a prior word is saved in GPR.
6973         // A custom memLoc is used when the argument also passes in FPR so
6974         // that the callee handling can skip over it easily.
6975         State.addLoc(
6976             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6977                                              LocInfo)
6978                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6979         break;
6980       }
6981     }
6982 
6983     return false;
6984   }
6985   }
6986   return true;
6987 }
6988 
6989 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6990                                                     bool IsPPC64) {
6991   assert((IsPPC64 || SVT != MVT::i64) &&
6992          "i64 should have been split for 32-bit codegen.");
6993 
6994   switch (SVT) {
6995   default:
6996     report_fatal_error("Unexpected value type for formal argument");
6997   case MVT::i1:
6998   case MVT::i32:
6999   case MVT::i64:
7000     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7001   case MVT::f32:
7002     return &PPC::F4RCRegClass;
7003   case MVT::f64:
7004     return &PPC::F8RCRegClass;
7005   }
7006 }
7007 
7008 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7009                                         SelectionDAG &DAG, SDValue ArgValue,
7010                                         MVT LocVT, const SDLoc &dl) {
7011   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7012   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7013 
7014   if (Flags.isSExt())
7015     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7016                            DAG.getValueType(ValVT));
7017   else if (Flags.isZExt())
7018     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7019                            DAG.getValueType(ValVT));
7020 
7021   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7022 }
7023 
7024 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7025   const unsigned LASize = FL->getLinkageSize();
7026 
7027   if (PPC::GPRCRegClass.contains(Reg)) {
7028     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7029            "Reg must be a valid argument register!");
7030     return LASize + 4 * (Reg - PPC::R3);
7031   }
7032 
7033   if (PPC::G8RCRegClass.contains(Reg)) {
7034     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7035            "Reg must be a valid argument register!");
7036     return LASize + 8 * (Reg - PPC::X3);
7037   }
7038 
7039   llvm_unreachable("Only general purpose registers expected.");
7040 }
7041 
7042 //   AIX ABI Stack Frame Layout:
7043 //
7044 //   Low Memory +--------------------------------------------+
7045 //   SP   +---> | Back chain                                 | ---+
7046 //        |     +--------------------------------------------+    |
7047 //        |     | Saved Condition Register                   |    |
7048 //        |     +--------------------------------------------+    |
7049 //        |     | Saved Linkage Register                     |    |
7050 //        |     +--------------------------------------------+    | Linkage Area
7051 //        |     | Reserved for compilers                     |    |
7052 //        |     +--------------------------------------------+    |
7053 //        |     | Reserved for binders                       |    |
7054 //        |     +--------------------------------------------+    |
7055 //        |     | Saved TOC pointer                          | ---+
7056 //        |     +--------------------------------------------+
7057 //        |     | Parameter save area                        |
7058 //        |     +--------------------------------------------+
7059 //        |     | Alloca space                               |
7060 //        |     +--------------------------------------------+
7061 //        |     | Local variable space                       |
7062 //        |     +--------------------------------------------+
7063 //        |     | Float/int conversion temporary             |
7064 //        |     +--------------------------------------------+
7065 //        |     | Save area for AltiVec registers            |
7066 //        |     +--------------------------------------------+
7067 //        |     | AltiVec alignment padding                  |
7068 //        |     +--------------------------------------------+
7069 //        |     | Save area for VRSAVE register              |
7070 //        |     +--------------------------------------------+
7071 //        |     | Save area for General Purpose registers    |
7072 //        |     +--------------------------------------------+
7073 //        |     | Save area for Floating Point registers     |
7074 //        |     +--------------------------------------------+
7075 //        +---- | Back chain                                 |
7076 // High Memory  +--------------------------------------------+
7077 //
7078 //  Specifications:
7079 //  AIX 7.2 Assembler Language Reference
7080 //  Subroutine linkage convention
7081 
7082 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7083     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7084     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7085     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7086 
7087   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7088           CallConv == CallingConv::Fast) &&
7089          "Unexpected calling convention!");
7090 
7091   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7092     report_fatal_error("Tail call support is unimplemented on AIX.");
7093 
7094   if (useSoftFloat())
7095     report_fatal_error("Soft float support is unimplemented on AIX.");
7096 
7097   const PPCSubtarget &Subtarget =
7098       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7099 
7100   const bool IsPPC64 = Subtarget.isPPC64();
7101   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7102 
7103   // Assign locations to all of the incoming arguments.
7104   SmallVector<CCValAssign, 16> ArgLocs;
7105   MachineFunction &MF = DAG.getMachineFunction();
7106   MachineFrameInfo &MFI = MF.getFrameInfo();
7107   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7108 
7109   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7110   // Reserve space for the linkage area on the stack.
7111   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7112   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7113   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7114 
7115   SmallVector<SDValue, 8> MemOps;
7116 
7117   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7118     CCValAssign &VA = ArgLocs[I++];
7119     MVT LocVT = VA.getLocVT();
7120     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7121 
7122     // For compatibility with the AIX XL compiler, the float args in the
7123     // parameter save area are initialized even if the argument is available
7124     // in register.  The caller is required to initialize both the register
7125     // and memory, however, the callee can choose to expect it in either.
7126     // The memloc is dismissed here because the argument is retrieved from
7127     // the register.
7128     if (VA.isMemLoc() && VA.needsCustom())
7129       continue;
7130 
7131     if (Flags.isByVal() && VA.isMemLoc()) {
7132       const unsigned Size =
7133           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7134                   PtrByteSize);
7135       const int FI = MF.getFrameInfo().CreateFixedObject(
7136           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7137           /* IsAliased */ true);
7138       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7139       InVals.push_back(FIN);
7140 
7141       continue;
7142     }
7143 
7144     if (Flags.isByVal()) {
7145       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7146 
7147       const MCPhysReg ArgReg = VA.getLocReg();
7148       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7149 
7150       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7151         report_fatal_error("Over aligned byvals not supported yet.");
7152 
7153       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7154       const int FI = MF.getFrameInfo().CreateFixedObject(
7155           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7156           /* IsAliased */ true);
7157       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7158       InVals.push_back(FIN);
7159 
7160       // Add live ins for all the RegLocs for the same ByVal.
7161       const TargetRegisterClass *RegClass =
7162           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7163 
7164       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7165                                                unsigned Offset) {
7166         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7167         // Since the callers side has left justified the aggregate in the
7168         // register, we can simply store the entire register into the stack
7169         // slot.
7170         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7171         // The store to the fixedstack object is needed becuase accessing a
7172         // field of the ByVal will use a gep and load. Ideally we will optimize
7173         // to extracting the value from the register directly, and elide the
7174         // stores when the arguments address is not taken, but that will need to
7175         // be future work.
7176         SDValue Store = DAG.getStore(
7177             CopyFrom.getValue(1), dl, CopyFrom,
7178             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
7179             MachinePointerInfo::getFixedStack(MF, FI, Offset));
7180 
7181         MemOps.push_back(Store);
7182       };
7183 
7184       unsigned Offset = 0;
7185       HandleRegLoc(VA.getLocReg(), Offset);
7186       Offset += PtrByteSize;
7187       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7188            Offset += PtrByteSize) {
7189         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7190                "RegLocs should be for ByVal argument.");
7191 
7192         const CCValAssign RL = ArgLocs[I++];
7193         HandleRegLoc(RL.getLocReg(), Offset);
7194       }
7195 
7196       if (Offset != StackSize) {
7197         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7198                "Expected MemLoc for remaining bytes.");
7199         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7200         // Consume the MemLoc.The InVal has already been emitted, so nothing
7201         // more needs to be done.
7202         ++I;
7203       }
7204 
7205       continue;
7206     }
7207 
7208     EVT ValVT = VA.getValVT();
7209     if (VA.isRegLoc() && !VA.needsCustom()) {
7210       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7211       unsigned VReg =
7212           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7213       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7214       if (ValVT.isScalarInteger() &&
7215           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7216         ArgValue =
7217             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7218       }
7219       InVals.push_back(ArgValue);
7220       continue;
7221     }
7222     if (VA.isMemLoc()) {
7223       const unsigned LocSize = LocVT.getStoreSize();
7224       const unsigned ValSize = ValVT.getStoreSize();
7225       assert((ValSize <= LocSize) &&
7226              "Object size is larger than size of MemLoc");
7227       int CurArgOffset = VA.getLocMemOffset();
7228       // Objects are right-justified because AIX is big-endian.
7229       if (LocSize > ValSize)
7230         CurArgOffset += LocSize - ValSize;
7231       // Potential tail calls could cause overwriting of argument stack slots.
7232       const bool IsImmutable =
7233           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7234             (CallConv == CallingConv::Fast));
7235       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7236       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7237       SDValue ArgValue =
7238           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7239       InVals.push_back(ArgValue);
7240       continue;
7241     }
7242   }
7243 
7244   // On AIX a minimum of 8 words is saved to the parameter save area.
7245   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7246   // Area that is at least reserved in the caller of this function.
7247   unsigned CallerReservedArea =
7248       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7249 
7250   // Set the size that is at least reserved in caller of this function. Tail
7251   // call optimized function's reserved stack space needs to be aligned so
7252   // that taking the difference between two stack areas will result in an
7253   // aligned stack.
7254   CallerReservedArea =
7255       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7256   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7257   FuncInfo->setMinReservedArea(CallerReservedArea);
7258 
7259   if (isVarArg) {
7260     FuncInfo->setVarArgsFrameIndex(
7261         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7262     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7263 
7264     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7265                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7266 
7267     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7268                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7269     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7270 
7271     // The fixed integer arguments of a variadic function are stored to the
7272     // VarArgsFrameIndex on the stack so that they may be loaded by
7273     // dereferencing the result of va_next.
7274     for (unsigned GPRIndex =
7275              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7276          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7277 
7278       const unsigned VReg =
7279           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7280                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7281 
7282       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7283       SDValue Store =
7284           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7285       MemOps.push_back(Store);
7286       // Increment the address for the next argument to store.
7287       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7288       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7289     }
7290   }
7291 
7292   if (!MemOps.empty())
7293     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7294 
7295   return Chain;
7296 }
7297 
7298 SDValue PPCTargetLowering::LowerCall_AIX(
7299     SDValue Chain, SDValue Callee, CallFlags CFlags,
7300     const SmallVectorImpl<ISD::OutputArg> &Outs,
7301     const SmallVectorImpl<SDValue> &OutVals,
7302     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7303     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7304     const CallBase *CB) const {
7305   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7306   // AIX ABI stack frame layout.
7307 
7308   assert((CFlags.CallConv == CallingConv::C ||
7309           CFlags.CallConv == CallingConv::Cold ||
7310           CFlags.CallConv == CallingConv::Fast) &&
7311          "Unexpected calling convention!");
7312 
7313   if (CFlags.IsPatchPoint)
7314     report_fatal_error("This call type is unimplemented on AIX.");
7315 
7316   const PPCSubtarget& Subtarget =
7317       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7318   if (Subtarget.hasAltivec())
7319     report_fatal_error("Altivec support is unimplemented on AIX.");
7320 
7321   MachineFunction &MF = DAG.getMachineFunction();
7322   SmallVector<CCValAssign, 16> ArgLocs;
7323   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7324                  *DAG.getContext());
7325 
7326   // Reserve space for the linkage save area (LSA) on the stack.
7327   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7328   //   [SP][CR][LR][2 x reserved][TOC].
7329   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7330   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7331   const bool IsPPC64 = Subtarget.isPPC64();
7332   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7333   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7334   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7335   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7336 
7337   // The prolog code of the callee may store up to 8 GPR argument registers to
7338   // the stack, allowing va_start to index over them in memory if the callee
7339   // is variadic.
7340   // Because we cannot tell if this is needed on the caller side, we have to
7341   // conservatively assume that it is needed.  As such, make sure we have at
7342   // least enough stack space for the caller to store the 8 GPRs.
7343   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7344   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7345                                      CCInfo.getNextStackOffset());
7346 
7347   // Adjust the stack pointer for the new arguments...
7348   // These operations are automatically eliminated by the prolog/epilog pass.
7349   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7350   SDValue CallSeqStart = Chain;
7351 
7352   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7353   SmallVector<SDValue, 8> MemOpChains;
7354 
7355   // Set up a copy of the stack pointer for loading and storing any
7356   // arguments that may not fit in the registers available for argument
7357   // passing.
7358   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7359                                    : DAG.getRegister(PPC::R1, MVT::i32);
7360 
7361   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7362     const unsigned ValNo = ArgLocs[I].getValNo();
7363     SDValue Arg = OutVals[ValNo];
7364     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7365 
7366     if (Flags.isByVal()) {
7367       const unsigned ByValSize = Flags.getByValSize();
7368 
7369       // Nothing to do for zero-sized ByVals on the caller side.
7370       if (!ByValSize) {
7371         ++I;
7372         continue;
7373       }
7374 
7375       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7376         return DAG.getExtLoad(
7377             ISD::ZEXTLOAD, dl, PtrVT, Chain,
7378             (LoadOffset != 0)
7379                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7380                 : Arg,
7381             MachinePointerInfo(), VT);
7382       };
7383 
7384       unsigned LoadOffset = 0;
7385 
7386       // Initialize registers, which are fully occupied by the by-val argument.
7387       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7388         SDValue Load = GetLoad(PtrVT, LoadOffset);
7389         MemOpChains.push_back(Load.getValue(1));
7390         LoadOffset += PtrByteSize;
7391         const CCValAssign &ByValVA = ArgLocs[I++];
7392         assert(ByValVA.getValNo() == ValNo &&
7393                "Unexpected location for pass-by-value argument.");
7394         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7395       }
7396 
7397       if (LoadOffset == ByValSize)
7398         continue;
7399 
7400       // There must be one more loc to handle the remainder.
7401       assert(ArgLocs[I].getValNo() == ValNo &&
7402              "Expected additional location for by-value argument.");
7403 
7404       if (ArgLocs[I].isMemLoc()) {
7405         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7406         const CCValAssign &ByValVA = ArgLocs[I++];
7407         ISD::ArgFlagsTy MemcpyFlags = Flags;
7408         // Only memcpy the bytes that don't pass in register.
7409         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7410         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7411             (LoadOffset != 0)
7412                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7413                 : Arg,
7414             DAG.getObjectPtrOffset(dl, StackPtr,
7415                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
7416             CallSeqStart, MemcpyFlags, DAG, dl);
7417         continue;
7418       }
7419 
7420       // Initialize the final register residue.
7421       // Any residue that occupies the final by-val arg register must be
7422       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7423       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7424       // 2 and 1 byte loads.
7425       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7426       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7427              "Unexpected register residue for by-value argument.");
7428       SDValue ResidueVal;
7429       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7430         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7431         const MVT VT =
7432             N == 1 ? MVT::i8
7433                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7434         SDValue Load = GetLoad(VT, LoadOffset);
7435         MemOpChains.push_back(Load.getValue(1));
7436         LoadOffset += N;
7437         Bytes += N;
7438 
7439         // By-val arguments are passed left-justfied in register.
7440         // Every load here needs to be shifted, otherwise a full register load
7441         // should have been used.
7442         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7443                "Unexpected load emitted during handling of pass-by-value "
7444                "argument.");
7445         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7446         EVT ShiftAmountTy =
7447             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7448         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7449         SDValue ShiftedLoad =
7450             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7451         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7452                                               ShiftedLoad)
7453                                 : ShiftedLoad;
7454       }
7455 
7456       const CCValAssign &ByValVA = ArgLocs[I++];
7457       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7458       continue;
7459     }
7460 
7461     CCValAssign &VA = ArgLocs[I++];
7462     const MVT LocVT = VA.getLocVT();
7463     const MVT ValVT = VA.getValVT();
7464 
7465     switch (VA.getLocInfo()) {
7466     default:
7467       report_fatal_error("Unexpected argument extension type.");
7468     case CCValAssign::Full:
7469       break;
7470     case CCValAssign::ZExt:
7471       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7472       break;
7473     case CCValAssign::SExt:
7474       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7475       break;
7476     }
7477 
7478     if (VA.isRegLoc() && !VA.needsCustom()) {
7479       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7480       continue;
7481     }
7482 
7483     if (VA.isMemLoc()) {
7484       SDValue PtrOff =
7485           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7486       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7487       MemOpChains.push_back(
7488           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7489 
7490       continue;
7491     }
7492 
7493     // Custom handling is used for GPR initializations for vararg float
7494     // arguments.
7495     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7496            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7497            "Unexpected register handling for calling convention.");
7498 
7499     SDValue ArgAsInt =
7500         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7501 
7502     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7503       // f32 in 32-bit GPR
7504       // f64 in 64-bit GPR
7505       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7506     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7507       // f32 in 64-bit GPR.
7508       RegsToPass.push_back(std::make_pair(
7509           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7510     else {
7511       // f64 in two 32-bit GPRs
7512       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7513       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7514              "Unexpected custom register for argument!");
7515       CCValAssign &GPR1 = VA;
7516       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7517                                      DAG.getConstant(32, dl, MVT::i8));
7518       RegsToPass.push_back(std::make_pair(
7519           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7520 
7521       if (I != E) {
7522         // If only 1 GPR was available, there will only be one custom GPR and
7523         // the argument will also pass in memory.
7524         CCValAssign &PeekArg = ArgLocs[I];
7525         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7526           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7527           CCValAssign &GPR2 = ArgLocs[I++];
7528           RegsToPass.push_back(std::make_pair(
7529               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7530         }
7531       }
7532     }
7533   }
7534 
7535   if (!MemOpChains.empty())
7536     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7537 
7538   // For indirect calls, we need to save the TOC base to the stack for
7539   // restoration after the call.
7540   if (CFlags.IsIndirect) {
7541     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7542     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7543     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7544     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7545     const unsigned TOCSaveOffset =
7546         Subtarget.getFrameLowering()->getTOCSaveOffset();
7547 
7548     setUsesTOCBasePtr(DAG);
7549     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7550     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7551     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7552     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7553     Chain = DAG.getStore(
7554         Val.getValue(1), dl, Val, AddPtr,
7555         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7556   }
7557 
7558   // Build a sequence of copy-to-reg nodes chained together with token chain
7559   // and flag operands which copy the outgoing args into the appropriate regs.
7560   SDValue InFlag;
7561   for (auto Reg : RegsToPass) {
7562     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7563     InFlag = Chain.getValue(1);
7564   }
7565 
7566   const int SPDiff = 0;
7567   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7568                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7569 }
7570 
7571 bool
7572 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7573                                   MachineFunction &MF, bool isVarArg,
7574                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7575                                   LLVMContext &Context) const {
7576   SmallVector<CCValAssign, 16> RVLocs;
7577   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7578   return CCInfo.CheckReturn(
7579       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7580                 ? RetCC_PPC_Cold
7581                 : RetCC_PPC);
7582 }
7583 
7584 SDValue
7585 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7586                                bool isVarArg,
7587                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7588                                const SmallVectorImpl<SDValue> &OutVals,
7589                                const SDLoc &dl, SelectionDAG &DAG) const {
7590   SmallVector<CCValAssign, 16> RVLocs;
7591   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7592                  *DAG.getContext());
7593   CCInfo.AnalyzeReturn(Outs,
7594                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7595                            ? RetCC_PPC_Cold
7596                            : RetCC_PPC);
7597 
7598   SDValue Flag;
7599   SmallVector<SDValue, 4> RetOps(1, Chain);
7600 
7601   // Copy the result values into the output registers.
7602   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7603     CCValAssign &VA = RVLocs[i];
7604     assert(VA.isRegLoc() && "Can only return in registers!");
7605 
7606     SDValue Arg = OutVals[RealResIdx];
7607 
7608     switch (VA.getLocInfo()) {
7609     default: llvm_unreachable("Unknown loc info!");
7610     case CCValAssign::Full: break;
7611     case CCValAssign::AExt:
7612       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7613       break;
7614     case CCValAssign::ZExt:
7615       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7616       break;
7617     case CCValAssign::SExt:
7618       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7619       break;
7620     }
7621     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7622       bool isLittleEndian = Subtarget.isLittleEndian();
7623       // Legalize ret f64 -> ret 2 x i32.
7624       SDValue SVal =
7625           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7626                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7627       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7628       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7629       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7630                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7631       Flag = Chain.getValue(1);
7632       VA = RVLocs[++i]; // skip ahead to next loc
7633       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7634     } else
7635       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7636     Flag = Chain.getValue(1);
7637     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7638   }
7639 
7640   RetOps[0] = Chain;  // Update chain.
7641 
7642   // Add the flag if we have it.
7643   if (Flag.getNode())
7644     RetOps.push_back(Flag);
7645 
7646   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7647 }
7648 
7649 SDValue
7650 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7651                                                 SelectionDAG &DAG) const {
7652   SDLoc dl(Op);
7653 
7654   // Get the correct type for integers.
7655   EVT IntVT = Op.getValueType();
7656 
7657   // Get the inputs.
7658   SDValue Chain = Op.getOperand(0);
7659   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7660   // Build a DYNAREAOFFSET node.
7661   SDValue Ops[2] = {Chain, FPSIdx};
7662   SDVTList VTs = DAG.getVTList(IntVT);
7663   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7664 }
7665 
7666 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7667                                              SelectionDAG &DAG) const {
7668   // When we pop the dynamic allocation we need to restore the SP link.
7669   SDLoc dl(Op);
7670 
7671   // Get the correct type for pointers.
7672   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7673 
7674   // Construct the stack pointer operand.
7675   bool isPPC64 = Subtarget.isPPC64();
7676   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7677   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7678 
7679   // Get the operands for the STACKRESTORE.
7680   SDValue Chain = Op.getOperand(0);
7681   SDValue SaveSP = Op.getOperand(1);
7682 
7683   // Load the old link SP.
7684   SDValue LoadLinkSP =
7685       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7686 
7687   // Restore the stack pointer.
7688   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7689 
7690   // Store the old link SP.
7691   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7692 }
7693 
7694 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7695   MachineFunction &MF = DAG.getMachineFunction();
7696   bool isPPC64 = Subtarget.isPPC64();
7697   EVT PtrVT = getPointerTy(MF.getDataLayout());
7698 
7699   // Get current frame pointer save index.  The users of this index will be
7700   // primarily DYNALLOC instructions.
7701   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7702   int RASI = FI->getReturnAddrSaveIndex();
7703 
7704   // If the frame pointer save index hasn't been defined yet.
7705   if (!RASI) {
7706     // Find out what the fix offset of the frame pointer save area.
7707     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7708     // Allocate the frame index for frame pointer save area.
7709     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7710     // Save the result.
7711     FI->setReturnAddrSaveIndex(RASI);
7712   }
7713   return DAG.getFrameIndex(RASI, PtrVT);
7714 }
7715 
7716 SDValue
7717 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7718   MachineFunction &MF = DAG.getMachineFunction();
7719   bool isPPC64 = Subtarget.isPPC64();
7720   EVT PtrVT = getPointerTy(MF.getDataLayout());
7721 
7722   // Get current frame pointer save index.  The users of this index will be
7723   // primarily DYNALLOC instructions.
7724   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7725   int FPSI = FI->getFramePointerSaveIndex();
7726 
7727   // If the frame pointer save index hasn't been defined yet.
7728   if (!FPSI) {
7729     // Find out what the fix offset of the frame pointer save area.
7730     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7731     // Allocate the frame index for frame pointer save area.
7732     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7733     // Save the result.
7734     FI->setFramePointerSaveIndex(FPSI);
7735   }
7736   return DAG.getFrameIndex(FPSI, PtrVT);
7737 }
7738 
7739 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7740                                                    SelectionDAG &DAG) const {
7741   MachineFunction &MF = DAG.getMachineFunction();
7742   // Get the inputs.
7743   SDValue Chain = Op.getOperand(0);
7744   SDValue Size  = Op.getOperand(1);
7745   SDLoc dl(Op);
7746 
7747   // Get the correct type for pointers.
7748   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7749   // Negate the size.
7750   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7751                                 DAG.getConstant(0, dl, PtrVT), Size);
7752   // Construct a node for the frame pointer save index.
7753   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7754   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7755   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7756   if (hasInlineStackProbe(MF))
7757     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7758   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7759 }
7760 
7761 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7762                                                      SelectionDAG &DAG) const {
7763   MachineFunction &MF = DAG.getMachineFunction();
7764 
7765   bool isPPC64 = Subtarget.isPPC64();
7766   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7767 
7768   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7769   return DAG.getFrameIndex(FI, PtrVT);
7770 }
7771 
7772 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7773                                                SelectionDAG &DAG) const {
7774   SDLoc DL(Op);
7775   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7776                      DAG.getVTList(MVT::i32, MVT::Other),
7777                      Op.getOperand(0), Op.getOperand(1));
7778 }
7779 
7780 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7781                                                 SelectionDAG &DAG) const {
7782   SDLoc DL(Op);
7783   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7784                      Op.getOperand(0), Op.getOperand(1));
7785 }
7786 
7787 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7788 
7789   assert(Op.getValueType() == MVT::i1 &&
7790          "Custom lowering only for i1 loads");
7791 
7792   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7793 
7794   SDLoc dl(Op);
7795   LoadSDNode *LD = cast<LoadSDNode>(Op);
7796 
7797   SDValue Chain = LD->getChain();
7798   SDValue BasePtr = LD->getBasePtr();
7799   MachineMemOperand *MMO = LD->getMemOperand();
7800 
7801   SDValue NewLD =
7802       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7803                      BasePtr, MVT::i8, MMO);
7804   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7805 
7806   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7807   return DAG.getMergeValues(Ops, dl);
7808 }
7809 
7810 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7811   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7812          "Custom lowering only for i1 stores");
7813 
7814   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7815 
7816   SDLoc dl(Op);
7817   StoreSDNode *ST = cast<StoreSDNode>(Op);
7818 
7819   SDValue Chain = ST->getChain();
7820   SDValue BasePtr = ST->getBasePtr();
7821   SDValue Value = ST->getValue();
7822   MachineMemOperand *MMO = ST->getMemOperand();
7823 
7824   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7825                       Value);
7826   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7827 }
7828 
7829 // FIXME: Remove this once the ANDI glue bug is fixed:
7830 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7831   assert(Op.getValueType() == MVT::i1 &&
7832          "Custom lowering only for i1 results");
7833 
7834   SDLoc DL(Op);
7835   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7836 }
7837 
7838 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7839                                                SelectionDAG &DAG) const {
7840 
7841   // Implements a vector truncate that fits in a vector register as a shuffle.
7842   // We want to legalize vector truncates down to where the source fits in
7843   // a vector register (and target is therefore smaller than vector register
7844   // size).  At that point legalization will try to custom lower the sub-legal
7845   // result and get here - where we can contain the truncate as a single target
7846   // operation.
7847 
7848   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7849   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7850   //
7851   // We will implement it for big-endian ordering as this (where x denotes
7852   // undefined):
7853   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7854   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7855   //
7856   // The same operation in little-endian ordering will be:
7857   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7858   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7859 
7860   EVT TrgVT = Op.getValueType();
7861   assert(TrgVT.isVector() && "Vector type expected.");
7862   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7863   EVT EltVT = TrgVT.getVectorElementType();
7864   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7865       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7866       !isPowerOf2_32(EltVT.getSizeInBits()))
7867     return SDValue();
7868 
7869   SDValue N1 = Op.getOperand(0);
7870   EVT SrcVT = N1.getValueType();
7871   unsigned SrcSize = SrcVT.getSizeInBits();
7872   if (SrcSize > 256 ||
7873       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7874       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7875     return SDValue();
7876   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7877     return SDValue();
7878 
7879   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7880   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7881 
7882   SDLoc DL(Op);
7883   SDValue Op1, Op2;
7884   if (SrcSize == 256) {
7885     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7886     EVT SplitVT =
7887         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7888     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7889     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7890                       DAG.getConstant(0, DL, VecIdxTy));
7891     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7892                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7893   }
7894   else {
7895     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7896     Op2 = DAG.getUNDEF(WideVT);
7897   }
7898 
7899   // First list the elements we want to keep.
7900   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7901   SmallVector<int, 16> ShuffV;
7902   if (Subtarget.isLittleEndian())
7903     for (unsigned i = 0; i < TrgNumElts; ++i)
7904       ShuffV.push_back(i * SizeMult);
7905   else
7906     for (unsigned i = 1; i <= TrgNumElts; ++i)
7907       ShuffV.push_back(i * SizeMult - 1);
7908 
7909   // Populate the remaining elements with undefs.
7910   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7911     // ShuffV.push_back(i + WideNumElts);
7912     ShuffV.push_back(WideNumElts + 1);
7913 
7914   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7915   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7916   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7917 }
7918 
7919 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7920 /// possible.
7921 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7922   // Not FP, or using SPE? Not a fsel.
7923   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7924       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
7925     return Op;
7926 
7927   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7928 
7929   EVT ResVT = Op.getValueType();
7930   EVT CmpVT = Op.getOperand(0).getValueType();
7931   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7932   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7933   SDLoc dl(Op);
7934   SDNodeFlags Flags = Op.getNode()->getFlags();
7935 
7936   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7937   // presence of infinities.
7938   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7939     switch (CC) {
7940     default:
7941       break;
7942     case ISD::SETOGT:
7943     case ISD::SETGT:
7944       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7945     case ISD::SETOLT:
7946     case ISD::SETLT:
7947       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7948     }
7949   }
7950 
7951   // We might be able to do better than this under some circumstances, but in
7952   // general, fsel-based lowering of select is a finite-math-only optimization.
7953   // For more information, see section F.3 of the 2.06 ISA specification.
7954   // With ISA 3.0
7955   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7956       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7957     return Op;
7958 
7959   // If the RHS of the comparison is a 0.0, we don't need to do the
7960   // subtraction at all.
7961   SDValue Sel1;
7962   if (isFloatingPointZero(RHS))
7963     switch (CC) {
7964     default: break;       // SETUO etc aren't handled by fsel.
7965     case ISD::SETNE:
7966       std::swap(TV, FV);
7967       LLVM_FALLTHROUGH;
7968     case ISD::SETEQ:
7969       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7970         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7971       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7972       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7973         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7974       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7975                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7976     case ISD::SETULT:
7977     case ISD::SETLT:
7978       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7979       LLVM_FALLTHROUGH;
7980     case ISD::SETOGE:
7981     case ISD::SETGE:
7982       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7983         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7984       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7985     case ISD::SETUGT:
7986     case ISD::SETGT:
7987       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7988       LLVM_FALLTHROUGH;
7989     case ISD::SETOLE:
7990     case ISD::SETLE:
7991       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7992         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7993       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7994                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7995     }
7996 
7997   SDValue Cmp;
7998   switch (CC) {
7999   default: break;       // SETUO etc aren't handled by fsel.
8000   case ISD::SETNE:
8001     std::swap(TV, FV);
8002     LLVM_FALLTHROUGH;
8003   case ISD::SETEQ:
8004     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8005     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8006       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8007     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8008     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8009       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8010     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8011                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8012   case ISD::SETULT:
8013   case ISD::SETLT:
8014     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8015     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8016       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8017     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8018   case ISD::SETOGE:
8019   case ISD::SETGE:
8020     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8021     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8022       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8023     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8024   case ISD::SETUGT:
8025   case ISD::SETGT:
8026     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8027     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8028       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8029     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8030   case ISD::SETOLE:
8031   case ISD::SETLE:
8032     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8033     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8034       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8035     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8036   }
8037   return Op;
8038 }
8039 
8040 static unsigned getPPCStrictOpcode(unsigned Opc) {
8041   switch (Opc) {
8042   default:
8043     llvm_unreachable("No strict version of this opcode!");
8044   case PPCISD::FCTIDZ:
8045     return PPCISD::STRICT_FCTIDZ;
8046   case PPCISD::FCTIWZ:
8047     return PPCISD::STRICT_FCTIWZ;
8048   case PPCISD::FCTIDUZ:
8049     return PPCISD::STRICT_FCTIDUZ;
8050   case PPCISD::FCTIWUZ:
8051     return PPCISD::STRICT_FCTIWUZ;
8052   case PPCISD::FCFID:
8053     return PPCISD::STRICT_FCFID;
8054   case PPCISD::FCFIDU:
8055     return PPCISD::STRICT_FCFIDU;
8056   case PPCISD::FCFIDS:
8057     return PPCISD::STRICT_FCFIDS;
8058   case PPCISD::FCFIDUS:
8059     return PPCISD::STRICT_FCFIDUS;
8060   }
8061 }
8062 
8063 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
8064                               const PPCSubtarget &Subtarget) {
8065   SDLoc dl(Op);
8066   bool IsStrict = Op->isStrictFPOpcode();
8067   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8068                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8069   // For strict nodes, source is the second operand.
8070   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8071   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
8072   assert(Src.getValueType().isFloatingPoint());
8073   if (Src.getValueType() == MVT::f32) {
8074     if (IsStrict) {
8075       Src = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f64, MVT::Other},
8076                         {Chain, Src});
8077       Chain = Src.getValue(1);
8078     } else
8079       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8080   }
8081   SDValue Conv;
8082   unsigned Opc = ISD::DELETED_NODE;
8083   switch (Op.getSimpleValueType().SimpleTy) {
8084   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8085   case MVT::i32:
8086     Opc = IsSigned ? PPCISD::FCTIWZ
8087                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
8088     break;
8089   case MVT::i64:
8090     assert((IsSigned || Subtarget.hasFPCVT()) &&
8091            "i64 FP_TO_UINT is supported only with FPCVT");
8092     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
8093   }
8094   if (IsStrict) {
8095     Opc = getPPCStrictOpcode(Opc);
8096     Conv = DAG.getNode(Opc, dl, {MVT::f64, MVT::Other}, {Chain, Src});
8097   } else {
8098     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
8099   }
8100   return Conv;
8101 }
8102 
8103 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8104                                                SelectionDAG &DAG,
8105                                                const SDLoc &dl) const {
8106   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
8107   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8108                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8109   bool IsStrict = Op->isStrictFPOpcode();
8110 
8111   // Convert the FP value to an int value through memory.
8112   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8113                   (IsSigned || Subtarget.hasFPCVT());
8114   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8115   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8116   MachinePointerInfo MPI =
8117       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8118 
8119   // Emit a store to the stack slot.
8120   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8121   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8122   if (i32Stack) {
8123     MachineFunction &MF = DAG.getMachineFunction();
8124     Alignment = Align(4);
8125     MachineMemOperand *MMO =
8126         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8127     SDValue Ops[] = { Chain, Tmp, FIPtr };
8128     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8129               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8130   } else
8131     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8132 
8133   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8134   // add in a bias on big endian.
8135   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8136     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8137                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8138     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8139   }
8140 
8141   RLI.Chain = Chain;
8142   RLI.Ptr = FIPtr;
8143   RLI.MPI = MPI;
8144   RLI.Alignment = Alignment;
8145 }
8146 
8147 /// Custom lowers floating point to integer conversions to use
8148 /// the direct move instructions available in ISA 2.07 to avoid the
8149 /// need for load/store combinations.
8150 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8151                                                     SelectionDAG &DAG,
8152                                                     const SDLoc &dl) const {
8153   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8154   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8155   if (Op->isStrictFPOpcode())
8156     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8157   else
8158     return Mov;
8159 }
8160 
8161 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8162                                           const SDLoc &dl) const {
8163   bool IsStrict = Op->isStrictFPOpcode();
8164   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8165                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8166   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8167   // FP to INT conversions are legal for f128.
8168   if (Src.getValueType() == MVT::f128)
8169     return Op;
8170 
8171   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8172   // PPC (the libcall is not available).
8173   if (Src.getValueType() == MVT::ppcf128 && !IsStrict) {
8174     if (Op.getValueType() == MVT::i32) {
8175       if (IsSigned) {
8176         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8177                                  DAG.getIntPtrConstant(0, dl));
8178         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8179                                  DAG.getIntPtrConstant(1, dl));
8180 
8181         // Add the two halves of the long double in round-to-zero mode.
8182         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8183 
8184         // Now use a smaller FP_TO_SINT.
8185         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8186       } else {
8187         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8188         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8189         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8190         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8191         // FIXME: generated code sucks.
8192         // TODO: Are there fast-math-flags to propagate to this FSUB?
8193         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Tmp);
8194         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8195         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8196                            DAG.getConstant(0x80000000, dl, MVT::i32));
8197         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8198         return DAG.getSelectCC(dl, Src, Tmp, True, False, ISD::SETGE);
8199       }
8200     }
8201 
8202     return SDValue();
8203   }
8204 
8205   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8206     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8207 
8208   ReuseLoadInfo RLI;
8209   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8210 
8211   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8212                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8213 }
8214 
8215 // We're trying to insert a regular store, S, and then a load, L. If the
8216 // incoming value, O, is a load, we might just be able to have our load use the
8217 // address used by O. However, we don't know if anything else will store to
8218 // that address before we can load from it. To prevent this situation, we need
8219 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8220 // the same chain operand as O, we create a token factor from the chain results
8221 // of O and L, and we replace all uses of O's chain result with that token
8222 // factor (see spliceIntoChain below for this last part).
8223 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8224                                             ReuseLoadInfo &RLI,
8225                                             SelectionDAG &DAG,
8226                                             ISD::LoadExtType ET) const {
8227   // Conservatively skip reusing for constrained FP nodes.
8228   if (Op->isStrictFPOpcode())
8229     return false;
8230 
8231   SDLoc dl(Op);
8232   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8233                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8234   if (ET == ISD::NON_EXTLOAD &&
8235       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8236       isOperationLegalOrCustom(Op.getOpcode(),
8237                                Op.getOperand(0).getValueType())) {
8238 
8239     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8240     return true;
8241   }
8242 
8243   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8244   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8245       LD->isNonTemporal())
8246     return false;
8247   if (LD->getMemoryVT() != MemVT)
8248     return false;
8249 
8250   RLI.Ptr = LD->getBasePtr();
8251   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8252     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8253            "Non-pre-inc AM on PPC?");
8254     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8255                           LD->getOffset());
8256   }
8257 
8258   RLI.Chain = LD->getChain();
8259   RLI.MPI = LD->getPointerInfo();
8260   RLI.IsDereferenceable = LD->isDereferenceable();
8261   RLI.IsInvariant = LD->isInvariant();
8262   RLI.Alignment = LD->getAlign();
8263   RLI.AAInfo = LD->getAAInfo();
8264   RLI.Ranges = LD->getRanges();
8265 
8266   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8267   return true;
8268 }
8269 
8270 // Given the head of the old chain, ResChain, insert a token factor containing
8271 // it and NewResChain, and make users of ResChain now be users of that token
8272 // factor.
8273 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8274 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8275                                         SDValue NewResChain,
8276                                         SelectionDAG &DAG) const {
8277   if (!ResChain)
8278     return;
8279 
8280   SDLoc dl(NewResChain);
8281 
8282   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8283                            NewResChain, DAG.getUNDEF(MVT::Other));
8284   assert(TF.getNode() != NewResChain.getNode() &&
8285          "A new TF really is required here");
8286 
8287   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8288   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8289 }
8290 
8291 /// Analyze profitability of direct move
8292 /// prefer float load to int load plus direct move
8293 /// when there is no integer use of int load
8294 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8295   SDNode *Origin = Op.getOperand(0).getNode();
8296   if (Origin->getOpcode() != ISD::LOAD)
8297     return true;
8298 
8299   // If there is no LXSIBZX/LXSIHZX, like Power8,
8300   // prefer direct move if the memory size is 1 or 2 bytes.
8301   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8302   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8303     return true;
8304 
8305   for (SDNode::use_iterator UI = Origin->use_begin(),
8306                             UE = Origin->use_end();
8307        UI != UE; ++UI) {
8308 
8309     // Only look at the users of the loaded value.
8310     if (UI.getUse().get().getResNo() != 0)
8311       continue;
8312 
8313     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8314         UI->getOpcode() != ISD::UINT_TO_FP &&
8315         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8316         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8317       return true;
8318   }
8319 
8320   return false;
8321 }
8322 
8323 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8324                               const PPCSubtarget &Subtarget,
8325                               SDValue Chain = SDValue()) {
8326   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8327                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8328   SDLoc dl(Op);
8329   // If we have FCFIDS, then use it when converting to single-precision.
8330   // Otherwise, convert to double-precision and then round.
8331   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8332   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8333                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8334   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8335   if (Op->isStrictFPOpcode()) {
8336     if (!Chain)
8337       Chain = Op.getOperand(0);
8338     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, {ConvTy, MVT::Other},
8339                        {Chain, Src});
8340   } else
8341     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8342 }
8343 
8344 /// Custom lowers integer to floating point conversions to use
8345 /// the direct move instructions available in ISA 2.07 to avoid the
8346 /// need for load/store combinations.
8347 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8348                                                     SelectionDAG &DAG,
8349                                                     const SDLoc &dl) const {
8350   assert((Op.getValueType() == MVT::f32 ||
8351           Op.getValueType() == MVT::f64) &&
8352          "Invalid floating point type as target of conversion");
8353   assert(Subtarget.hasFPCVT() &&
8354          "Int to FP conversions with direct moves require FPCVT");
8355   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8356   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8357   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8358                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8359   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8360   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8361   return convertIntToFP(Op, Mov, DAG, Subtarget);
8362 }
8363 
8364 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8365 
8366   EVT VecVT = Vec.getValueType();
8367   assert(VecVT.isVector() && "Expected a vector type.");
8368   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8369 
8370   EVT EltVT = VecVT.getVectorElementType();
8371   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8372   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8373 
8374   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8375   SmallVector<SDValue, 16> Ops(NumConcat);
8376   Ops[0] = Vec;
8377   SDValue UndefVec = DAG.getUNDEF(VecVT);
8378   for (unsigned i = 1; i < NumConcat; ++i)
8379     Ops[i] = UndefVec;
8380 
8381   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8382 }
8383 
8384 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8385                                                 const SDLoc &dl) const {
8386   bool IsStrict = Op->isStrictFPOpcode();
8387   unsigned Opc = Op.getOpcode();
8388   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8389   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8390           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
8391          "Unexpected conversion type");
8392   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8393          "Supports conversions to v2f64/v4f32 only.");
8394 
8395   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8396   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8397 
8398   SDValue Wide = widenVec(DAG, Src, dl);
8399   EVT WideVT = Wide.getValueType();
8400   unsigned WideNumElts = WideVT.getVectorNumElements();
8401   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8402 
8403   SmallVector<int, 16> ShuffV;
8404   for (unsigned i = 0; i < WideNumElts; ++i)
8405     ShuffV.push_back(i + WideNumElts);
8406 
8407   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8408   int SaveElts = FourEltRes ? 4 : 2;
8409   if (Subtarget.isLittleEndian())
8410     for (int i = 0; i < SaveElts; i++)
8411       ShuffV[i * Stride] = i;
8412   else
8413     for (int i = 1; i <= SaveElts; i++)
8414       ShuffV[i * Stride - 1] = i - 1;
8415 
8416   SDValue ShuffleSrc2 =
8417       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8418   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8419 
8420   SDValue Extend;
8421   if (SignedConv) {
8422     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8423     EVT ExtVT = Src.getValueType();
8424     if (Subtarget.hasP9Altivec())
8425       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8426                                IntermediateVT.getVectorNumElements());
8427 
8428     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8429                          DAG.getValueType(ExtVT));
8430   } else
8431     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8432 
8433   if (IsStrict)
8434     return DAG.getNode(Opc, dl, {Op.getValueType(), MVT::Other},
8435                        {Op.getOperand(0), Extend});
8436 
8437   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8438 }
8439 
8440 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8441                                           SelectionDAG &DAG) const {
8442   SDLoc dl(Op);
8443   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8444                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8445   bool IsStrict = Op->isStrictFPOpcode();
8446   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8447   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8448 
8449   EVT InVT = Src.getValueType();
8450   EVT OutVT = Op.getValueType();
8451   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8452       isOperationCustom(Op.getOpcode(), InVT))
8453     return LowerINT_TO_FPVector(Op, DAG, dl);
8454 
8455   // Conversions to f128 are legal.
8456   if (Op.getValueType() == MVT::f128)
8457     return Op;
8458 
8459   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8460   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8461     return SDValue();
8462 
8463   if (Src.getValueType() == MVT::i1)
8464     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8465                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8466                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8467 
8468   // If we have direct moves, we can do all the conversion, skip the store/load
8469   // however, without FPCVT we can't do most conversions.
8470   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8471       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8472     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8473 
8474   assert((IsSigned || Subtarget.hasFPCVT()) &&
8475          "UINT_TO_FP is supported only with FPCVT");
8476 
8477   if (Src.getValueType() == MVT::i64) {
8478     SDValue SINT = Src;
8479     // When converting to single-precision, we actually need to convert
8480     // to double-precision first and then round to single-precision.
8481     // To avoid double-rounding effects during that operation, we have
8482     // to prepare the input operand.  Bits that might be truncated when
8483     // converting to double-precision are replaced by a bit that won't
8484     // be lost at this stage, but is below the single-precision rounding
8485     // position.
8486     //
8487     // However, if -enable-unsafe-fp-math is in effect, accept double
8488     // rounding to avoid the extra overhead.
8489     if (Op.getValueType() == MVT::f32 &&
8490         !Subtarget.hasFPCVT() &&
8491         !DAG.getTarget().Options.UnsafeFPMath) {
8492 
8493       // Twiddle input to make sure the low 11 bits are zero.  (If this
8494       // is the case, we are guaranteed the value will fit into the 53 bit
8495       // mantissa of an IEEE double-precision value without rounding.)
8496       // If any of those low 11 bits were not zero originally, make sure
8497       // bit 12 (value 2048) is set instead, so that the final rounding
8498       // to single-precision gets the correct result.
8499       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8500                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8501       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8502                           Round, DAG.getConstant(2047, dl, MVT::i64));
8503       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8504       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8505                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8506 
8507       // However, we cannot use that value unconditionally: if the magnitude
8508       // of the input value is small, the bit-twiddling we did above might
8509       // end up visibly changing the output.  Fortunately, in that case, we
8510       // don't need to twiddle bits since the original input will convert
8511       // exactly to double-precision floating-point already.  Therefore,
8512       // construct a conditional to use the original value if the top 11
8513       // bits are all sign-bit copies, and use the rounded value computed
8514       // above otherwise.
8515       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8516                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8517       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8518                          Cond, DAG.getConstant(1, dl, MVT::i64));
8519       Cond = DAG.getSetCC(
8520           dl,
8521           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8522           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8523 
8524       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8525     }
8526 
8527     ReuseLoadInfo RLI;
8528     SDValue Bits;
8529 
8530     MachineFunction &MF = DAG.getMachineFunction();
8531     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8532       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8533                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8534       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8535     } else if (Subtarget.hasLFIWAX() &&
8536                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8537       MachineMemOperand *MMO =
8538         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8539                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8540       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8541       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8542                                      DAG.getVTList(MVT::f64, MVT::Other),
8543                                      Ops, MVT::i32, MMO);
8544       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8545     } else if (Subtarget.hasFPCVT() &&
8546                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8547       MachineMemOperand *MMO =
8548         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8549                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8550       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8551       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8552                                      DAG.getVTList(MVT::f64, MVT::Other),
8553                                      Ops, MVT::i32, MMO);
8554       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8555     } else if (((Subtarget.hasLFIWAX() &&
8556                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8557                 (Subtarget.hasFPCVT() &&
8558                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8559                SINT.getOperand(0).getValueType() == MVT::i32) {
8560       MachineFrameInfo &MFI = MF.getFrameInfo();
8561       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8562 
8563       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8564       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8565 
8566       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8567                                    MachinePointerInfo::getFixedStack(
8568                                        DAG.getMachineFunction(), FrameIdx));
8569       Chain = Store;
8570 
8571       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8572              "Expected an i32 store");
8573 
8574       RLI.Ptr = FIdx;
8575       RLI.Chain = Chain;
8576       RLI.MPI =
8577           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8578       RLI.Alignment = Align(4);
8579 
8580       MachineMemOperand *MMO =
8581         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8582                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8583       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8584       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8585                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8586                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8587                                      Ops, MVT::i32, MMO);
8588       Chain = Bits.getValue(1);
8589     } else
8590       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8591 
8592     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8593     if (IsStrict)
8594       Chain = FP.getValue(1);
8595 
8596     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8597       if (IsStrict)
8598         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
8599                          {Chain, FP, DAG.getIntPtrConstant(0, dl)});
8600       else
8601         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8602                          DAG.getIntPtrConstant(0, dl));
8603     }
8604     return FP;
8605   }
8606 
8607   assert(Src.getValueType() == MVT::i32 &&
8608          "Unhandled INT_TO_FP type in custom expander!");
8609   // Since we only generate this in 64-bit mode, we can take advantage of
8610   // 64-bit registers.  In particular, sign extend the input value into the
8611   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8612   // then lfd it and fcfid it.
8613   MachineFunction &MF = DAG.getMachineFunction();
8614   MachineFrameInfo &MFI = MF.getFrameInfo();
8615   EVT PtrVT = getPointerTy(MF.getDataLayout());
8616 
8617   SDValue Ld;
8618   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8619     ReuseLoadInfo RLI;
8620     bool ReusingLoad;
8621     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8622       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8623       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8624 
8625       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8626                                    MachinePointerInfo::getFixedStack(
8627                                        DAG.getMachineFunction(), FrameIdx));
8628       Chain = Store;
8629 
8630       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8631              "Expected an i32 store");
8632 
8633       RLI.Ptr = FIdx;
8634       RLI.Chain = Chain;
8635       RLI.MPI =
8636           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8637       RLI.Alignment = Align(4);
8638     }
8639 
8640     MachineMemOperand *MMO =
8641       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8642                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8643     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8644     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8645                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8646                                  MVT::i32, MMO);
8647     Chain = Ld.getValue(1);
8648     if (ReusingLoad)
8649       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8650   } else {
8651     assert(Subtarget.isPPC64() &&
8652            "i32->FP without LFIWAX supported only on PPC64");
8653 
8654     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8655     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8656 
8657     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8658 
8659     // STD the extended value into the stack slot.
8660     SDValue Store = DAG.getStore(
8661         Chain, dl, Ext64, FIdx,
8662         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8663     Chain = Store;
8664 
8665     // Load the value as a double.
8666     Ld = DAG.getLoad(
8667         MVT::f64, dl, Chain, FIdx,
8668         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8669     Chain = Ld.getValue(1);
8670   }
8671 
8672   // FCFID it and return it.
8673   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8674   if (IsStrict)
8675     Chain = FP.getValue(1);
8676   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8677     if (IsStrict)
8678       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {MVT::f32, MVT::Other},
8679                        {Chain, FP, DAG.getIntPtrConstant(0, dl)});
8680     else
8681       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8682                        DAG.getIntPtrConstant(0, dl));
8683   }
8684   return FP;
8685 }
8686 
8687 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8688                                             SelectionDAG &DAG) const {
8689   SDLoc dl(Op);
8690   /*
8691    The rounding mode is in bits 30:31 of FPSR, and has the following
8692    settings:
8693      00 Round to nearest
8694      01 Round to 0
8695      10 Round to +inf
8696      11 Round to -inf
8697 
8698   FLT_ROUNDS, on the other hand, expects the following:
8699     -1 Undefined
8700      0 Round to 0
8701      1 Round to nearest
8702      2 Round to +inf
8703      3 Round to -inf
8704 
8705   To perform the conversion, we do:
8706     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8707   */
8708 
8709   MachineFunction &MF = DAG.getMachineFunction();
8710   EVT VT = Op.getValueType();
8711   EVT PtrVT = getPointerTy(MF.getDataLayout());
8712 
8713   // Save FP Control Word to register
8714   SDValue Chain = Op.getOperand(0);
8715   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8716   Chain = MFFS.getValue(1);
8717 
8718   // Save FP register to stack slot
8719   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8720   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8721   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8722 
8723   // Load FP Control Word from low 32 bits of stack slot.
8724   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8725   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8726   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8727   Chain = CWD.getValue(1);
8728 
8729   // Transform as necessary
8730   SDValue CWD1 =
8731     DAG.getNode(ISD::AND, dl, MVT::i32,
8732                 CWD, DAG.getConstant(3, dl, MVT::i32));
8733   SDValue CWD2 =
8734     DAG.getNode(ISD::SRL, dl, MVT::i32,
8735                 DAG.getNode(ISD::AND, dl, MVT::i32,
8736                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8737                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8738                             DAG.getConstant(3, dl, MVT::i32)),
8739                 DAG.getConstant(1, dl, MVT::i32));
8740 
8741   SDValue RetVal =
8742     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8743 
8744   RetVal =
8745       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8746                   dl, VT, RetVal);
8747 
8748   return DAG.getMergeValues({RetVal, Chain}, dl);
8749 }
8750 
8751 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8752   EVT VT = Op.getValueType();
8753   unsigned BitWidth = VT.getSizeInBits();
8754   SDLoc dl(Op);
8755   assert(Op.getNumOperands() == 3 &&
8756          VT == Op.getOperand(1).getValueType() &&
8757          "Unexpected SHL!");
8758 
8759   // Expand into a bunch of logical ops.  Note that these ops
8760   // depend on the PPC behavior for oversized shift amounts.
8761   SDValue Lo = Op.getOperand(0);
8762   SDValue Hi = Op.getOperand(1);
8763   SDValue Amt = Op.getOperand(2);
8764   EVT AmtVT = Amt.getValueType();
8765 
8766   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8767                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8768   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8769   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8770   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8771   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8772                              DAG.getConstant(-BitWidth, dl, AmtVT));
8773   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8774   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8775   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8776   SDValue OutOps[] = { OutLo, OutHi };
8777   return DAG.getMergeValues(OutOps, dl);
8778 }
8779 
8780 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8781   EVT VT = Op.getValueType();
8782   SDLoc dl(Op);
8783   unsigned BitWidth = VT.getSizeInBits();
8784   assert(Op.getNumOperands() == 3 &&
8785          VT == Op.getOperand(1).getValueType() &&
8786          "Unexpected SRL!");
8787 
8788   // Expand into a bunch of logical ops.  Note that these ops
8789   // depend on the PPC behavior for oversized shift amounts.
8790   SDValue Lo = Op.getOperand(0);
8791   SDValue Hi = Op.getOperand(1);
8792   SDValue Amt = Op.getOperand(2);
8793   EVT AmtVT = Amt.getValueType();
8794 
8795   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8796                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8797   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8798   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8799   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8800   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8801                              DAG.getConstant(-BitWidth, dl, AmtVT));
8802   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8803   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8804   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8805   SDValue OutOps[] = { OutLo, OutHi };
8806   return DAG.getMergeValues(OutOps, dl);
8807 }
8808 
8809 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8810   SDLoc dl(Op);
8811   EVT VT = Op.getValueType();
8812   unsigned BitWidth = VT.getSizeInBits();
8813   assert(Op.getNumOperands() == 3 &&
8814          VT == Op.getOperand(1).getValueType() &&
8815          "Unexpected SRA!");
8816 
8817   // Expand into a bunch of logical ops, followed by a select_cc.
8818   SDValue Lo = Op.getOperand(0);
8819   SDValue Hi = Op.getOperand(1);
8820   SDValue Amt = Op.getOperand(2);
8821   EVT AmtVT = Amt.getValueType();
8822 
8823   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8824                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8825   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8826   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8827   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8828   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8829                              DAG.getConstant(-BitWidth, dl, AmtVT));
8830   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8831   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8832   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8833                                   Tmp4, Tmp6, ISD::SETLE);
8834   SDValue OutOps[] = { OutLo, OutHi };
8835   return DAG.getMergeValues(OutOps, dl);
8836 }
8837 
8838 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8839                                             SelectionDAG &DAG) const {
8840   SDLoc dl(Op);
8841   EVT VT = Op.getValueType();
8842   unsigned BitWidth = VT.getSizeInBits();
8843 
8844   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8845   SDValue X = Op.getOperand(0);
8846   SDValue Y = Op.getOperand(1);
8847   SDValue Z = Op.getOperand(2);
8848   EVT AmtVT = Z.getValueType();
8849 
8850   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8851   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8852   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8853   // on PowerPC shift by BW being well defined.
8854   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8855                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8856   SDValue SubZ =
8857       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8858   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8859   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8860   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8861 }
8862 
8863 //===----------------------------------------------------------------------===//
8864 // Vector related lowering.
8865 //
8866 
8867 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8868 /// element size of SplatSize. Cast the result to VT.
8869 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8870                                       SelectionDAG &DAG, const SDLoc &dl) {
8871   static const MVT VTys[] = { // canonical VT to use for each size.
8872     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8873   };
8874 
8875   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8876 
8877   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8878   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8879     SplatSize = 1;
8880     Val = 0xFF;
8881   }
8882 
8883   EVT CanonicalVT = VTys[SplatSize-1];
8884 
8885   // Build a canonical splat for this value.
8886   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8887 }
8888 
8889 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8890 /// specified intrinsic ID.
8891 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8892                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8893   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8894   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8895                      DAG.getConstant(IID, dl, MVT::i32), Op);
8896 }
8897 
8898 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8899 /// specified intrinsic ID.
8900 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8901                                 SelectionDAG &DAG, const SDLoc &dl,
8902                                 EVT DestVT = MVT::Other) {
8903   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8904   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8905                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8906 }
8907 
8908 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8909 /// specified intrinsic ID.
8910 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8911                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8912                                 EVT DestVT = MVT::Other) {
8913   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8914   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8915                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8916 }
8917 
8918 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8919 /// amount.  The result has the specified value type.
8920 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8921                            SelectionDAG &DAG, const SDLoc &dl) {
8922   // Force LHS/RHS to be the right type.
8923   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8924   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8925 
8926   int Ops[16];
8927   for (unsigned i = 0; i != 16; ++i)
8928     Ops[i] = i + Amt;
8929   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8930   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8931 }
8932 
8933 /// Do we have an efficient pattern in a .td file for this node?
8934 ///
8935 /// \param V - pointer to the BuildVectorSDNode being matched
8936 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8937 ///
8938 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8939 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8940 /// the opposite is true (expansion is beneficial) are:
8941 /// - The node builds a vector out of integers that are not 32 or 64-bits
8942 /// - The node builds a vector out of constants
8943 /// - The node is a "load-and-splat"
8944 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8945 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8946                                             bool HasDirectMove,
8947                                             bool HasP8Vector) {
8948   EVT VecVT = V->getValueType(0);
8949   bool RightType = VecVT == MVT::v2f64 ||
8950     (HasP8Vector && VecVT == MVT::v4f32) ||
8951     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8952   if (!RightType)
8953     return false;
8954 
8955   bool IsSplat = true;
8956   bool IsLoad = false;
8957   SDValue Op0 = V->getOperand(0);
8958 
8959   // This function is called in a block that confirms the node is not a constant
8960   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8961   // different constants.
8962   if (V->isConstant())
8963     return false;
8964   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8965     if (V->getOperand(i).isUndef())
8966       return false;
8967     // We want to expand nodes that represent load-and-splat even if the
8968     // loaded value is a floating point truncation or conversion to int.
8969     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8970         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8971          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8972         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8973          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8974         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8975          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8976       IsLoad = true;
8977     // If the operands are different or the input is not a load and has more
8978     // uses than just this BV node, then it isn't a splat.
8979     if (V->getOperand(i) != Op0 ||
8980         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8981       IsSplat = false;
8982   }
8983   return !(IsSplat && IsLoad);
8984 }
8985 
8986 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8987 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8988 
8989   SDLoc dl(Op);
8990   SDValue Op0 = Op->getOperand(0);
8991 
8992   if ((Op.getValueType() != MVT::f128) ||
8993       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8994       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8995       (Op0.getOperand(1).getValueType() != MVT::i64))
8996     return SDValue();
8997 
8998   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8999                      Op0.getOperand(1));
9000 }
9001 
9002 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
9003   const SDValue *InputLoad = &Op;
9004   if (InputLoad->getOpcode() == ISD::BITCAST)
9005     InputLoad = &InputLoad->getOperand(0);
9006   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9007       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
9008     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
9009     InputLoad = &InputLoad->getOperand(0);
9010   }
9011   if (InputLoad->getOpcode() != ISD::LOAD)
9012     return nullptr;
9013   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9014   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9015 }
9016 
9017 // Convert the argument APFloat to a single precision APFloat if there is no
9018 // loss in information during the conversion to single precision APFloat and the
9019 // resulting number is not a denormal number. Return true if successful.
9020 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9021   APFloat APFloatToConvert = ArgAPFloat;
9022   bool LosesInfo = true;
9023   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9024                            &LosesInfo);
9025   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9026   if (Success)
9027     ArgAPFloat = APFloatToConvert;
9028   return Success;
9029 }
9030 
9031 // Bitcast the argument APInt to a double and convert it to a single precision
9032 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9033 // argument if there is no loss in information during the conversion from
9034 // double to single precision APFloat and the resulting number is not a denormal
9035 // number. Return true if successful.
9036 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9037   double DpValue = ArgAPInt.bitsToDouble();
9038   APFloat APFloatDp(DpValue);
9039   bool Success = convertToNonDenormSingle(APFloatDp);
9040   if (Success)
9041     ArgAPInt = APFloatDp.bitcastToAPInt();
9042   return Success;
9043 }
9044 
9045 // If this is a case we can't handle, return null and let the default
9046 // expansion code take care of it.  If we CAN select this case, and if it
9047 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9048 // this case more efficiently than a constant pool load, lower it to the
9049 // sequence of ops that should be used.
9050 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9051                                              SelectionDAG &DAG) const {
9052   SDLoc dl(Op);
9053   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9054   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9055 
9056   // Check if this is a splat of a constant value.
9057   APInt APSplatBits, APSplatUndef;
9058   unsigned SplatBitSize;
9059   bool HasAnyUndefs;
9060   bool BVNIsConstantSplat =
9061       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9062                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9063 
9064   // If it is a splat of a double, check if we can shrink it to a 32 bit
9065   // non-denormal float which when converted back to double gives us the same
9066   // double. This is to exploit the XXSPLTIDP instruction.
9067   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9068       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9069       convertToNonDenormSingle(APSplatBits)) {
9070     SDValue SplatNode = DAG.getNode(
9071         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9072         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9073     return DAG.getBitcast(Op.getValueType(), SplatNode);
9074   }
9075 
9076   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9077 
9078     bool IsPermutedLoad = false;
9079     const SDValue *InputLoad =
9080         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9081     // Handle load-and-splat patterns as we have instructions that will do this
9082     // in one go.
9083     if (InputLoad && DAG.isSplatValue(Op, true)) {
9084       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9085 
9086       // We have handling for 4 and 8 byte elements.
9087       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9088 
9089       // Checking for a single use of this load, we have to check for vector
9090       // width (128 bits) / ElementSize uses (since each operand of the
9091       // BUILD_VECTOR is a separate use of the value.
9092       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9093           ((Subtarget.hasVSX() && ElementSize == 64) ||
9094            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9095         SDValue Ops[] = {
9096           LD->getChain(),    // Chain
9097           LD->getBasePtr(),  // Ptr
9098           DAG.getValueType(Op.getValueType()) // VT
9099         };
9100         return
9101           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9102                                   DAG.getVTList(Op.getValueType(), MVT::Other),
9103                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
9104       }
9105     }
9106 
9107     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9108     // lowered to VSX instructions under certain conditions.
9109     // Without VSX, there is no pattern more efficient than expanding the node.
9110     if (Subtarget.hasVSX() &&
9111         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9112                                         Subtarget.hasP8Vector()))
9113       return Op;
9114     return SDValue();
9115   }
9116 
9117   uint64_t SplatBits = APSplatBits.getZExtValue();
9118   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9119   unsigned SplatSize = SplatBitSize / 8;
9120 
9121   // First, handle single instruction cases.
9122 
9123   // All zeros?
9124   if (SplatBits == 0) {
9125     // Canonicalize all zero vectors to be v4i32.
9126     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9127       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9128       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9129     }
9130     return Op;
9131   }
9132 
9133   // We have XXSPLTIW for constant splats four bytes wide.
9134   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9135   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9136   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9137   // turned into a 4-byte splat of 0xABABABAB.
9138   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9139     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9140                                   Op.getValueType(), DAG, dl);
9141 
9142   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9143     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9144                                   dl);
9145 
9146   // We have XXSPLTIB for constant splats one byte wide.
9147   if (Subtarget.hasP9Vector() && SplatSize == 1)
9148     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9149                                   dl);
9150 
9151   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9152   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9153                     (32-SplatBitSize));
9154   if (SextVal >= -16 && SextVal <= 15)
9155     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9156                                   dl);
9157 
9158   // Two instruction sequences.
9159 
9160   // If this value is in the range [-32,30] and is even, use:
9161   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9162   // If this value is in the range [17,31] and is odd, use:
9163   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9164   // If this value is in the range [-31,-17] and is odd, use:
9165   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9166   // Note the last two are three-instruction sequences.
9167   if (SextVal >= -32 && SextVal <= 31) {
9168     // To avoid having these optimizations undone by constant folding,
9169     // we convert to a pseudo that will be expanded later into one of
9170     // the above forms.
9171     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9172     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9173               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9174     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9175     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9176     if (VT == Op.getValueType())
9177       return RetVal;
9178     else
9179       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9180   }
9181 
9182   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9183   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9184   // for fneg/fabs.
9185   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9186     // Make -1 and vspltisw -1:
9187     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9188 
9189     // Make the VSLW intrinsic, computing 0x8000_0000.
9190     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9191                                    OnesV, DAG, dl);
9192 
9193     // xor by OnesV to invert it.
9194     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9195     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9196   }
9197 
9198   // Check to see if this is a wide variety of vsplti*, binop self cases.
9199   static const signed char SplatCsts[] = {
9200     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9201     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9202   };
9203 
9204   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9205     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9206     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9207     int i = SplatCsts[idx];
9208 
9209     // Figure out what shift amount will be used by altivec if shifted by i in
9210     // this splat size.
9211     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9212 
9213     // vsplti + shl self.
9214     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9215       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9216       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9217         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9218         Intrinsic::ppc_altivec_vslw
9219       };
9220       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9221       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9222     }
9223 
9224     // vsplti + srl self.
9225     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9226       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9227       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9228         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9229         Intrinsic::ppc_altivec_vsrw
9230       };
9231       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9232       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9233     }
9234 
9235     // vsplti + sra self.
9236     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9237       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9238       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9239         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9240         Intrinsic::ppc_altivec_vsraw
9241       };
9242       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9243       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9244     }
9245 
9246     // vsplti + rol self.
9247     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9248                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9249       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9250       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9251         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9252         Intrinsic::ppc_altivec_vrlw
9253       };
9254       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9255       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9256     }
9257 
9258     // t = vsplti c, result = vsldoi t, t, 1
9259     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9260       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9261       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9262       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9263     }
9264     // t = vsplti c, result = vsldoi t, t, 2
9265     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9266       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9267       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9268       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9269     }
9270     // t = vsplti c, result = vsldoi t, t, 3
9271     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9272       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9273       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9274       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9275     }
9276   }
9277 
9278   return SDValue();
9279 }
9280 
9281 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9282 /// the specified operations to build the shuffle.
9283 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9284                                       SDValue RHS, SelectionDAG &DAG,
9285                                       const SDLoc &dl) {
9286   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9287   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9288   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9289 
9290   enum {
9291     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9292     OP_VMRGHW,
9293     OP_VMRGLW,
9294     OP_VSPLTISW0,
9295     OP_VSPLTISW1,
9296     OP_VSPLTISW2,
9297     OP_VSPLTISW3,
9298     OP_VSLDOI4,
9299     OP_VSLDOI8,
9300     OP_VSLDOI12
9301   };
9302 
9303   if (OpNum == OP_COPY) {
9304     if (LHSID == (1*9+2)*9+3) return LHS;
9305     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9306     return RHS;
9307   }
9308 
9309   SDValue OpLHS, OpRHS;
9310   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9311   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9312 
9313   int ShufIdxs[16];
9314   switch (OpNum) {
9315   default: llvm_unreachable("Unknown i32 permute!");
9316   case OP_VMRGHW:
9317     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9318     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9319     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9320     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9321     break;
9322   case OP_VMRGLW:
9323     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9324     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9325     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9326     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9327     break;
9328   case OP_VSPLTISW0:
9329     for (unsigned i = 0; i != 16; ++i)
9330       ShufIdxs[i] = (i&3)+0;
9331     break;
9332   case OP_VSPLTISW1:
9333     for (unsigned i = 0; i != 16; ++i)
9334       ShufIdxs[i] = (i&3)+4;
9335     break;
9336   case OP_VSPLTISW2:
9337     for (unsigned i = 0; i != 16; ++i)
9338       ShufIdxs[i] = (i&3)+8;
9339     break;
9340   case OP_VSPLTISW3:
9341     for (unsigned i = 0; i != 16; ++i)
9342       ShufIdxs[i] = (i&3)+12;
9343     break;
9344   case OP_VSLDOI4:
9345     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9346   case OP_VSLDOI8:
9347     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9348   case OP_VSLDOI12:
9349     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9350   }
9351   EVT VT = OpLHS.getValueType();
9352   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9353   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9354   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9355   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9356 }
9357 
9358 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9359 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9360 /// SDValue.
9361 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9362                                            SelectionDAG &DAG) const {
9363   const unsigned BytesInVector = 16;
9364   bool IsLE = Subtarget.isLittleEndian();
9365   SDLoc dl(N);
9366   SDValue V1 = N->getOperand(0);
9367   SDValue V2 = N->getOperand(1);
9368   unsigned ShiftElts = 0, InsertAtByte = 0;
9369   bool Swap = false;
9370 
9371   // Shifts required to get the byte we want at element 7.
9372   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9373                                    0, 15, 14, 13, 12, 11, 10, 9};
9374   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9375                                 1, 2,  3,  4,  5,  6,  7,  8};
9376 
9377   ArrayRef<int> Mask = N->getMask();
9378   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9379 
9380   // For each mask element, find out if we're just inserting something
9381   // from V2 into V1 or vice versa.
9382   // Possible permutations inserting an element from V2 into V1:
9383   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9384   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9385   //   ...
9386   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9387   // Inserting from V1 into V2 will be similar, except mask range will be
9388   // [16,31].
9389 
9390   bool FoundCandidate = false;
9391   // If both vector operands for the shuffle are the same vector, the mask
9392   // will contain only elements from the first one and the second one will be
9393   // undef.
9394   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9395   // Go through the mask of half-words to find an element that's being moved
9396   // from one vector to the other.
9397   for (unsigned i = 0; i < BytesInVector; ++i) {
9398     unsigned CurrentElement = Mask[i];
9399     // If 2nd operand is undefined, we should only look for element 7 in the
9400     // Mask.
9401     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9402       continue;
9403 
9404     bool OtherElementsInOrder = true;
9405     // Examine the other elements in the Mask to see if they're in original
9406     // order.
9407     for (unsigned j = 0; j < BytesInVector; ++j) {
9408       if (j == i)
9409         continue;
9410       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9411       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9412       // in which we always assume we're always picking from the 1st operand.
9413       int MaskOffset =
9414           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9415       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9416         OtherElementsInOrder = false;
9417         break;
9418       }
9419     }
9420     // If other elements are in original order, we record the number of shifts
9421     // we need to get the element we want into element 7. Also record which byte
9422     // in the vector we should insert into.
9423     if (OtherElementsInOrder) {
9424       // If 2nd operand is undefined, we assume no shifts and no swapping.
9425       if (V2.isUndef()) {
9426         ShiftElts = 0;
9427         Swap = false;
9428       } else {
9429         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9430         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9431                          : BigEndianShifts[CurrentElement & 0xF];
9432         Swap = CurrentElement < BytesInVector;
9433       }
9434       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9435       FoundCandidate = true;
9436       break;
9437     }
9438   }
9439 
9440   if (!FoundCandidate)
9441     return SDValue();
9442 
9443   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9444   // optionally with VECSHL if shift is required.
9445   if (Swap)
9446     std::swap(V1, V2);
9447   if (V2.isUndef())
9448     V2 = V1;
9449   if (ShiftElts) {
9450     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9451                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9452     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9453                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9454   }
9455   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9456                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9457 }
9458 
9459 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9460 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9461 /// SDValue.
9462 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9463                                            SelectionDAG &DAG) const {
9464   const unsigned NumHalfWords = 8;
9465   const unsigned BytesInVector = NumHalfWords * 2;
9466   // Check that the shuffle is on half-words.
9467   if (!isNByteElemShuffleMask(N, 2, 1))
9468     return SDValue();
9469 
9470   bool IsLE = Subtarget.isLittleEndian();
9471   SDLoc dl(N);
9472   SDValue V1 = N->getOperand(0);
9473   SDValue V2 = N->getOperand(1);
9474   unsigned ShiftElts = 0, InsertAtByte = 0;
9475   bool Swap = false;
9476 
9477   // Shifts required to get the half-word we want at element 3.
9478   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9479   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9480 
9481   uint32_t Mask = 0;
9482   uint32_t OriginalOrderLow = 0x1234567;
9483   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9484   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9485   // 32-bit space, only need 4-bit nibbles per element.
9486   for (unsigned i = 0; i < NumHalfWords; ++i) {
9487     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9488     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9489   }
9490 
9491   // For each mask element, find out if we're just inserting something
9492   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9493   // from V2 into V1:
9494   //   X, 1, 2, 3, 4, 5, 6, 7
9495   //   0, X, 2, 3, 4, 5, 6, 7
9496   //   0, 1, X, 3, 4, 5, 6, 7
9497   //   0, 1, 2, X, 4, 5, 6, 7
9498   //   0, 1, 2, 3, X, 5, 6, 7
9499   //   0, 1, 2, 3, 4, X, 6, 7
9500   //   0, 1, 2, 3, 4, 5, X, 7
9501   //   0, 1, 2, 3, 4, 5, 6, X
9502   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9503 
9504   bool FoundCandidate = false;
9505   // Go through the mask of half-words to find an element that's being moved
9506   // from one vector to the other.
9507   for (unsigned i = 0; i < NumHalfWords; ++i) {
9508     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9509     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9510     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9511     uint32_t TargetOrder = 0x0;
9512 
9513     // If both vector operands for the shuffle are the same vector, the mask
9514     // will contain only elements from the first one and the second one will be
9515     // undef.
9516     if (V2.isUndef()) {
9517       ShiftElts = 0;
9518       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9519       TargetOrder = OriginalOrderLow;
9520       Swap = false;
9521       // Skip if not the correct element or mask of other elements don't equal
9522       // to our expected order.
9523       if (MaskOneElt == VINSERTHSrcElem &&
9524           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9525         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9526         FoundCandidate = true;
9527         break;
9528       }
9529     } else { // If both operands are defined.
9530       // Target order is [8,15] if the current mask is between [0,7].
9531       TargetOrder =
9532           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9533       // Skip if mask of other elements don't equal our expected order.
9534       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9535         // We only need the last 3 bits for the number of shifts.
9536         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9537                          : BigEndianShifts[MaskOneElt & 0x7];
9538         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9539         Swap = MaskOneElt < NumHalfWords;
9540         FoundCandidate = true;
9541         break;
9542       }
9543     }
9544   }
9545 
9546   if (!FoundCandidate)
9547     return SDValue();
9548 
9549   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9550   // optionally with VECSHL if shift is required.
9551   if (Swap)
9552     std::swap(V1, V2);
9553   if (V2.isUndef())
9554     V2 = V1;
9555   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9556   if (ShiftElts) {
9557     // Double ShiftElts because we're left shifting on v16i8 type.
9558     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9559                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9560     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9561     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9562                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9563     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9564   }
9565   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9566   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9567                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9568   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9569 }
9570 
9571 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9572 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9573 /// return the default SDValue.
9574 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9575                                               SelectionDAG &DAG) const {
9576   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9577   // to v16i8. Peek through the bitcasts to get the actual operands.
9578   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9579   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9580 
9581   auto ShuffleMask = SVN->getMask();
9582   SDValue VecShuffle(SVN, 0);
9583   SDLoc DL(SVN);
9584 
9585   // Check that we have a four byte shuffle.
9586   if (!isNByteElemShuffleMask(SVN, 4, 1))
9587     return SDValue();
9588 
9589   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9590   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9591     std::swap(LHS, RHS);
9592     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9593     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9594   }
9595 
9596   // Ensure that the RHS is a vector of constants.
9597   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9598   if (!BVN)
9599     return SDValue();
9600 
9601   // Check if RHS is a splat of 4-bytes (or smaller).
9602   APInt APSplatValue, APSplatUndef;
9603   unsigned SplatBitSize;
9604   bool HasAnyUndefs;
9605   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9606                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9607       SplatBitSize > 32)
9608     return SDValue();
9609 
9610   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9611   // The instruction splats a constant C into two words of the source vector
9612   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9613   // Thus we check that the shuffle mask is the equivalent  of
9614   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9615   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9616   // within each word are consecutive, so we only need to check the first byte.
9617   SDValue Index;
9618   bool IsLE = Subtarget.isLittleEndian();
9619   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9620       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9621        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9622     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9623   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9624            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9625             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9626     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9627   else
9628     return SDValue();
9629 
9630   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9631   // for XXSPLTI32DX.
9632   unsigned SplatVal = APSplatValue.getZExtValue();
9633   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9634     SplatVal |= (SplatVal << SplatBitSize);
9635 
9636   SDValue SplatNode = DAG.getNode(
9637       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9638       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9639   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9640 }
9641 
9642 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9643 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9644 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9645 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9646 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9647   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9648   assert(Op.getValueType() == MVT::v1i128 &&
9649          "Only set v1i128 as custom, other type shouldn't reach here!");
9650   SDLoc dl(Op);
9651   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9652   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9653   unsigned SHLAmt = N1.getConstantOperandVal(0);
9654   if (SHLAmt % 8 == 0) {
9655     SmallVector<int, 16> Mask(16, 0);
9656     std::iota(Mask.begin(), Mask.end(), 0);
9657     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9658     if (SDValue Shuffle =
9659             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9660                                  DAG.getUNDEF(MVT::v16i8), Mask))
9661       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9662   }
9663   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9664   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9665                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9666   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9667                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9668   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9669   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9670 }
9671 
9672 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9673 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9674 /// return the code it can be lowered into.  Worst case, it can always be
9675 /// lowered into a vperm.
9676 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9677                                                SelectionDAG &DAG) const {
9678   SDLoc dl(Op);
9679   SDValue V1 = Op.getOperand(0);
9680   SDValue V2 = Op.getOperand(1);
9681   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9682 
9683   // Any nodes that were combined in the target-independent combiner prior
9684   // to vector legalization will not be sent to the target combine. Try to
9685   // combine it here.
9686   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9687     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9688       return NewShuffle;
9689     Op = NewShuffle;
9690     SVOp = cast<ShuffleVectorSDNode>(Op);
9691     V1 = Op.getOperand(0);
9692     V2 = Op.getOperand(1);
9693   }
9694   EVT VT = Op.getValueType();
9695   bool isLittleEndian = Subtarget.isLittleEndian();
9696 
9697   unsigned ShiftElts, InsertAtByte;
9698   bool Swap = false;
9699 
9700   // If this is a load-and-splat, we can do that with a single instruction
9701   // in some cases. However if the load has multiple uses, we don't want to
9702   // combine it because that will just produce multiple loads.
9703   bool IsPermutedLoad = false;
9704   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9705   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9706       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9707       InputLoad->hasOneUse()) {
9708     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9709     int SplatIdx =
9710       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9711 
9712     // The splat index for permuted loads will be in the left half of the vector
9713     // which is strictly wider than the loaded value by 8 bytes. So we need to
9714     // adjust the splat index to point to the correct address in memory.
9715     if (IsPermutedLoad) {
9716       assert(isLittleEndian && "Unexpected permuted load on big endian target");
9717       SplatIdx += IsFourByte ? 2 : 1;
9718       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9719              "Splat of a value outside of the loaded memory");
9720     }
9721 
9722     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9723     // For 4-byte load-and-splat, we need Power9.
9724     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9725       uint64_t Offset = 0;
9726       if (IsFourByte)
9727         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9728       else
9729         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9730 
9731       SDValue BasePtr = LD->getBasePtr();
9732       if (Offset != 0)
9733         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9734                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9735       SDValue Ops[] = {
9736         LD->getChain(),    // Chain
9737         BasePtr,           // BasePtr
9738         DAG.getValueType(Op.getValueType()) // VT
9739       };
9740       SDVTList VTL =
9741         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9742       SDValue LdSplt =
9743         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9744                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9745       if (LdSplt.getValueType() != SVOp->getValueType(0))
9746         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9747       return LdSplt;
9748     }
9749   }
9750   if (Subtarget.hasP9Vector() &&
9751       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9752                            isLittleEndian)) {
9753     if (Swap)
9754       std::swap(V1, V2);
9755     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9756     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9757     if (ShiftElts) {
9758       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9759                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9760       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9761                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9762       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9763     }
9764     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9765                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9766     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9767   }
9768 
9769   if (Subtarget.hasPrefixInstrs()) {
9770     SDValue SplatInsertNode;
9771     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9772       return SplatInsertNode;
9773   }
9774 
9775   if (Subtarget.hasP9Altivec()) {
9776     SDValue NewISDNode;
9777     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9778       return NewISDNode;
9779 
9780     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9781       return NewISDNode;
9782   }
9783 
9784   if (Subtarget.hasVSX() &&
9785       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9786     if (Swap)
9787       std::swap(V1, V2);
9788     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9789     SDValue Conv2 =
9790         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9791 
9792     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9793                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9794     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9795   }
9796 
9797   if (Subtarget.hasVSX() &&
9798     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9799     if (Swap)
9800       std::swap(V1, V2);
9801     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9802     SDValue Conv2 =
9803         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9804 
9805     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9806                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9807     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9808   }
9809 
9810   if (Subtarget.hasP9Vector()) {
9811      if (PPC::isXXBRHShuffleMask(SVOp)) {
9812       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9813       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9814       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9815     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9816       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9817       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9818       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9819     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9820       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9821       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9822       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9823     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9824       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9825       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9826       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9827     }
9828   }
9829 
9830   if (Subtarget.hasVSX()) {
9831     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9832       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9833 
9834       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9835       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9836                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9837       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9838     }
9839 
9840     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9841     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9842       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9843       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9844       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9845     }
9846   }
9847 
9848   // Cases that are handled by instructions that take permute immediates
9849   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9850   // selected by the instruction selector.
9851   if (V2.isUndef()) {
9852     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9853         PPC::isSplatShuffleMask(SVOp, 2) ||
9854         PPC::isSplatShuffleMask(SVOp, 4) ||
9855         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9856         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9857         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9858         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9859         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9860         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9861         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9862         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9863         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9864         (Subtarget.hasP8Altivec() && (
9865          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9866          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9867          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9868       return Op;
9869     }
9870   }
9871 
9872   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9873   // and produce a fixed permutation.  If any of these match, do not lower to
9874   // VPERM.
9875   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9876   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9877       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9878       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9879       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9880       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9881       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9882       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9883       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9884       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9885       (Subtarget.hasP8Altivec() && (
9886        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9887        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9888        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9889     return Op;
9890 
9891   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9892   // perfect shuffle table to emit an optimal matching sequence.
9893   ArrayRef<int> PermMask = SVOp->getMask();
9894 
9895   unsigned PFIndexes[4];
9896   bool isFourElementShuffle = true;
9897   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9898     unsigned EltNo = 8;   // Start out undef.
9899     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9900       if (PermMask[i*4+j] < 0)
9901         continue;   // Undef, ignore it.
9902 
9903       unsigned ByteSource = PermMask[i*4+j];
9904       if ((ByteSource & 3) != j) {
9905         isFourElementShuffle = false;
9906         break;
9907       }
9908 
9909       if (EltNo == 8) {
9910         EltNo = ByteSource/4;
9911       } else if (EltNo != ByteSource/4) {
9912         isFourElementShuffle = false;
9913         break;
9914       }
9915     }
9916     PFIndexes[i] = EltNo;
9917   }
9918 
9919   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9920   // perfect shuffle vector to determine if it is cost effective to do this as
9921   // discrete instructions, or whether we should use a vperm.
9922   // For now, we skip this for little endian until such time as we have a
9923   // little-endian perfect shuffle table.
9924   if (isFourElementShuffle && !isLittleEndian) {
9925     // Compute the index in the perfect shuffle table.
9926     unsigned PFTableIndex =
9927       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9928 
9929     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9930     unsigned Cost  = (PFEntry >> 30);
9931 
9932     // Determining when to avoid vperm is tricky.  Many things affect the cost
9933     // of vperm, particularly how many times the perm mask needs to be computed.
9934     // For example, if the perm mask can be hoisted out of a loop or is already
9935     // used (perhaps because there are multiple permutes with the same shuffle
9936     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9937     // the loop requires an extra register.
9938     //
9939     // As a compromise, we only emit discrete instructions if the shuffle can be
9940     // generated in 3 or fewer operations.  When we have loop information
9941     // available, if this block is within a loop, we should avoid using vperm
9942     // for 3-operation perms and use a constant pool load instead.
9943     if (Cost < 3)
9944       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9945   }
9946 
9947   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9948   // vector that will get spilled to the constant pool.
9949   if (V2.isUndef()) V2 = V1;
9950 
9951   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9952   // that it is in input element units, not in bytes.  Convert now.
9953 
9954   // For little endian, the order of the input vectors is reversed, and
9955   // the permutation mask is complemented with respect to 31.  This is
9956   // necessary to produce proper semantics with the big-endian-biased vperm
9957   // instruction.
9958   EVT EltVT = V1.getValueType().getVectorElementType();
9959   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9960 
9961   SmallVector<SDValue, 16> ResultMask;
9962   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9963     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9964 
9965     for (unsigned j = 0; j != BytesPerElement; ++j)
9966       if (isLittleEndian)
9967         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9968                                              dl, MVT::i32));
9969       else
9970         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9971                                              MVT::i32));
9972   }
9973 
9974   ShufflesHandledWithVPERM++;
9975   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9976   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9977   LLVM_DEBUG(SVOp->dump());
9978   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9979   LLVM_DEBUG(VPermMask.dump());
9980 
9981   if (isLittleEndian)
9982     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9983                        V2, V1, VPermMask);
9984   else
9985     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9986                        V1, V2, VPermMask);
9987 }
9988 
9989 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9990 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9991 /// information about the intrinsic.
9992 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9993                                  bool &isDot, const PPCSubtarget &Subtarget) {
9994   unsigned IntrinsicID =
9995       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9996   CompareOpc = -1;
9997   isDot = false;
9998   switch (IntrinsicID) {
9999   default:
10000     return false;
10001   // Comparison predicates.
10002   case Intrinsic::ppc_altivec_vcmpbfp_p:
10003     CompareOpc = 966;
10004     isDot = true;
10005     break;
10006   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10007     CompareOpc = 198;
10008     isDot = true;
10009     break;
10010   case Intrinsic::ppc_altivec_vcmpequb_p:
10011     CompareOpc = 6;
10012     isDot = true;
10013     break;
10014   case Intrinsic::ppc_altivec_vcmpequh_p:
10015     CompareOpc = 70;
10016     isDot = true;
10017     break;
10018   case Intrinsic::ppc_altivec_vcmpequw_p:
10019     CompareOpc = 134;
10020     isDot = true;
10021     break;
10022   case Intrinsic::ppc_altivec_vcmpequd_p:
10023     if (Subtarget.hasP8Altivec()) {
10024       CompareOpc = 199;
10025       isDot = true;
10026     } else
10027       return false;
10028     break;
10029   case Intrinsic::ppc_altivec_vcmpneb_p:
10030   case Intrinsic::ppc_altivec_vcmpneh_p:
10031   case Intrinsic::ppc_altivec_vcmpnew_p:
10032   case Intrinsic::ppc_altivec_vcmpnezb_p:
10033   case Intrinsic::ppc_altivec_vcmpnezh_p:
10034   case Intrinsic::ppc_altivec_vcmpnezw_p:
10035     if (Subtarget.hasP9Altivec()) {
10036       switch (IntrinsicID) {
10037       default:
10038         llvm_unreachable("Unknown comparison intrinsic.");
10039       case Intrinsic::ppc_altivec_vcmpneb_p:
10040         CompareOpc = 7;
10041         break;
10042       case Intrinsic::ppc_altivec_vcmpneh_p:
10043         CompareOpc = 71;
10044         break;
10045       case Intrinsic::ppc_altivec_vcmpnew_p:
10046         CompareOpc = 135;
10047         break;
10048       case Intrinsic::ppc_altivec_vcmpnezb_p:
10049         CompareOpc = 263;
10050         break;
10051       case Intrinsic::ppc_altivec_vcmpnezh_p:
10052         CompareOpc = 327;
10053         break;
10054       case Intrinsic::ppc_altivec_vcmpnezw_p:
10055         CompareOpc = 391;
10056         break;
10057       }
10058       isDot = true;
10059     } else
10060       return false;
10061     break;
10062   case Intrinsic::ppc_altivec_vcmpgefp_p:
10063     CompareOpc = 454;
10064     isDot = true;
10065     break;
10066   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10067     CompareOpc = 710;
10068     isDot = true;
10069     break;
10070   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10071     CompareOpc = 774;
10072     isDot = true;
10073     break;
10074   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10075     CompareOpc = 838;
10076     isDot = true;
10077     break;
10078   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10079     CompareOpc = 902;
10080     isDot = true;
10081     break;
10082   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10083     if (Subtarget.hasP8Altivec()) {
10084       CompareOpc = 967;
10085       isDot = true;
10086     } else
10087       return false;
10088     break;
10089   case Intrinsic::ppc_altivec_vcmpgtub_p:
10090     CompareOpc = 518;
10091     isDot = true;
10092     break;
10093   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10094     CompareOpc = 582;
10095     isDot = true;
10096     break;
10097   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10098     CompareOpc = 646;
10099     isDot = true;
10100     break;
10101   case Intrinsic::ppc_altivec_vcmpgtud_p:
10102     if (Subtarget.hasP8Altivec()) {
10103       CompareOpc = 711;
10104       isDot = true;
10105     } else
10106       return false;
10107     break;
10108 
10109   // VSX predicate comparisons use the same infrastructure
10110   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10111   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10112   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10113   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10114   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10115   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10116     if (Subtarget.hasVSX()) {
10117       switch (IntrinsicID) {
10118       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10119         CompareOpc = 99;
10120         break;
10121       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10122         CompareOpc = 115;
10123         break;
10124       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10125         CompareOpc = 107;
10126         break;
10127       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10128         CompareOpc = 67;
10129         break;
10130       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10131         CompareOpc = 83;
10132         break;
10133       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10134         CompareOpc = 75;
10135         break;
10136       }
10137       isDot = true;
10138     } else
10139       return false;
10140     break;
10141 
10142   // Normal Comparisons.
10143   case Intrinsic::ppc_altivec_vcmpbfp:
10144     CompareOpc = 966;
10145     break;
10146   case Intrinsic::ppc_altivec_vcmpeqfp:
10147     CompareOpc = 198;
10148     break;
10149   case Intrinsic::ppc_altivec_vcmpequb:
10150     CompareOpc = 6;
10151     break;
10152   case Intrinsic::ppc_altivec_vcmpequh:
10153     CompareOpc = 70;
10154     break;
10155   case Intrinsic::ppc_altivec_vcmpequw:
10156     CompareOpc = 134;
10157     break;
10158   case Intrinsic::ppc_altivec_vcmpequd:
10159     if (Subtarget.hasP8Altivec())
10160       CompareOpc = 199;
10161     else
10162       return false;
10163     break;
10164   case Intrinsic::ppc_altivec_vcmpneb:
10165   case Intrinsic::ppc_altivec_vcmpneh:
10166   case Intrinsic::ppc_altivec_vcmpnew:
10167   case Intrinsic::ppc_altivec_vcmpnezb:
10168   case Intrinsic::ppc_altivec_vcmpnezh:
10169   case Intrinsic::ppc_altivec_vcmpnezw:
10170     if (Subtarget.hasP9Altivec())
10171       switch (IntrinsicID) {
10172       default:
10173         llvm_unreachable("Unknown comparison intrinsic.");
10174       case Intrinsic::ppc_altivec_vcmpneb:
10175         CompareOpc = 7;
10176         break;
10177       case Intrinsic::ppc_altivec_vcmpneh:
10178         CompareOpc = 71;
10179         break;
10180       case Intrinsic::ppc_altivec_vcmpnew:
10181         CompareOpc = 135;
10182         break;
10183       case Intrinsic::ppc_altivec_vcmpnezb:
10184         CompareOpc = 263;
10185         break;
10186       case Intrinsic::ppc_altivec_vcmpnezh:
10187         CompareOpc = 327;
10188         break;
10189       case Intrinsic::ppc_altivec_vcmpnezw:
10190         CompareOpc = 391;
10191         break;
10192       }
10193     else
10194       return false;
10195     break;
10196   case Intrinsic::ppc_altivec_vcmpgefp:
10197     CompareOpc = 454;
10198     break;
10199   case Intrinsic::ppc_altivec_vcmpgtfp:
10200     CompareOpc = 710;
10201     break;
10202   case Intrinsic::ppc_altivec_vcmpgtsb:
10203     CompareOpc = 774;
10204     break;
10205   case Intrinsic::ppc_altivec_vcmpgtsh:
10206     CompareOpc = 838;
10207     break;
10208   case Intrinsic::ppc_altivec_vcmpgtsw:
10209     CompareOpc = 902;
10210     break;
10211   case Intrinsic::ppc_altivec_vcmpgtsd:
10212     if (Subtarget.hasP8Altivec())
10213       CompareOpc = 967;
10214     else
10215       return false;
10216     break;
10217   case Intrinsic::ppc_altivec_vcmpgtub:
10218     CompareOpc = 518;
10219     break;
10220   case Intrinsic::ppc_altivec_vcmpgtuh:
10221     CompareOpc = 582;
10222     break;
10223   case Intrinsic::ppc_altivec_vcmpgtuw:
10224     CompareOpc = 646;
10225     break;
10226   case Intrinsic::ppc_altivec_vcmpgtud:
10227     if (Subtarget.hasP8Altivec())
10228       CompareOpc = 711;
10229     else
10230       return false;
10231     break;
10232   }
10233   return true;
10234 }
10235 
10236 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10237 /// lower, do it, otherwise return null.
10238 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10239                                                    SelectionDAG &DAG) const {
10240   unsigned IntrinsicID =
10241     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10242 
10243   SDLoc dl(Op);
10244 
10245   if (IntrinsicID == Intrinsic::thread_pointer) {
10246     // Reads the thread pointer register, used for __builtin_thread_pointer.
10247     if (Subtarget.isPPC64())
10248       return DAG.getRegister(PPC::X13, MVT::i64);
10249     return DAG.getRegister(PPC::R2, MVT::i32);
10250   }
10251 
10252   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10253   // opcode number of the comparison.
10254   int CompareOpc;
10255   bool isDot;
10256   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10257     return SDValue();    // Don't custom lower most intrinsics.
10258 
10259   // If this is a non-dot comparison, make the VCMP node and we are done.
10260   if (!isDot) {
10261     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10262                               Op.getOperand(1), Op.getOperand(2),
10263                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10264     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10265   }
10266 
10267   // Create the PPCISD altivec 'dot' comparison node.
10268   SDValue Ops[] = {
10269     Op.getOperand(2),  // LHS
10270     Op.getOperand(3),  // RHS
10271     DAG.getConstant(CompareOpc, dl, MVT::i32)
10272   };
10273   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10274   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10275 
10276   // Now that we have the comparison, emit a copy from the CR to a GPR.
10277   // This is flagged to the above dot comparison.
10278   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10279                                 DAG.getRegister(PPC::CR6, MVT::i32),
10280                                 CompNode.getValue(1));
10281 
10282   // Unpack the result based on how the target uses it.
10283   unsigned BitNo;   // Bit # of CR6.
10284   bool InvertBit;   // Invert result?
10285   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10286   default:  // Can't happen, don't crash on invalid number though.
10287   case 0:   // Return the value of the EQ bit of CR6.
10288     BitNo = 0; InvertBit = false;
10289     break;
10290   case 1:   // Return the inverted value of the EQ bit of CR6.
10291     BitNo = 0; InvertBit = true;
10292     break;
10293   case 2:   // Return the value of the LT bit of CR6.
10294     BitNo = 2; InvertBit = false;
10295     break;
10296   case 3:   // Return the inverted value of the LT bit of CR6.
10297     BitNo = 2; InvertBit = true;
10298     break;
10299   }
10300 
10301   // Shift the bit into the low position.
10302   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10303                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10304   // Isolate the bit.
10305   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10306                       DAG.getConstant(1, dl, MVT::i32));
10307 
10308   // If we are supposed to, toggle the bit.
10309   if (InvertBit)
10310     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10311                         DAG.getConstant(1, dl, MVT::i32));
10312   return Flags;
10313 }
10314 
10315 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10316                                                SelectionDAG &DAG) const {
10317   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10318   // the beginning of the argument list.
10319   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10320   SDLoc DL(Op);
10321   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10322   case Intrinsic::ppc_cfence: {
10323     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10324     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10325     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10326                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10327                                                   Op.getOperand(ArgStart + 1)),
10328                                       Op.getOperand(0)),
10329                    0);
10330   }
10331   default:
10332     break;
10333   }
10334   return SDValue();
10335 }
10336 
10337 // Lower scalar BSWAP64 to xxbrd.
10338 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10339   SDLoc dl(Op);
10340   // MTVSRDD
10341   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10342                    Op.getOperand(0));
10343   // XXBRD
10344   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10345   // MFVSRD
10346   int VectorIndex = 0;
10347   if (Subtarget.isLittleEndian())
10348     VectorIndex = 1;
10349   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10350                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10351   return Op;
10352 }
10353 
10354 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10355 // compared to a value that is atomically loaded (atomic loads zero-extend).
10356 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10357                                                 SelectionDAG &DAG) const {
10358   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10359          "Expecting an atomic compare-and-swap here.");
10360   SDLoc dl(Op);
10361   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10362   EVT MemVT = AtomicNode->getMemoryVT();
10363   if (MemVT.getSizeInBits() >= 32)
10364     return Op;
10365 
10366   SDValue CmpOp = Op.getOperand(2);
10367   // If this is already correctly zero-extended, leave it alone.
10368   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10369   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10370     return Op;
10371 
10372   // Clear the high bits of the compare operand.
10373   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10374   SDValue NewCmpOp =
10375     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10376                 DAG.getConstant(MaskVal, dl, MVT::i32));
10377 
10378   // Replace the existing compare operand with the properly zero-extended one.
10379   SmallVector<SDValue, 4> Ops;
10380   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10381     Ops.push_back(AtomicNode->getOperand(i));
10382   Ops[2] = NewCmpOp;
10383   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10384   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10385   auto NodeTy =
10386     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10387   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10388 }
10389 
10390 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10391                                                  SelectionDAG &DAG) const {
10392   SDLoc dl(Op);
10393   // Create a stack slot that is 16-byte aligned.
10394   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10395   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10396   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10397   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10398 
10399   // Store the input value into Value#0 of the stack slot.
10400   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10401                                MachinePointerInfo());
10402   // Load it out.
10403   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10404 }
10405 
10406 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10407                                                   SelectionDAG &DAG) const {
10408   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10409          "Should only be called for ISD::INSERT_VECTOR_ELT");
10410 
10411   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10412   // We have legal lowering for constant indices but not for variable ones.
10413   if (!C)
10414     return SDValue();
10415 
10416   EVT VT = Op.getValueType();
10417   SDLoc dl(Op);
10418   SDValue V1 = Op.getOperand(0);
10419   SDValue V2 = Op.getOperand(1);
10420   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10421   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10422     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10423     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10424     unsigned InsertAtElement = C->getZExtValue();
10425     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10426     if (Subtarget.isLittleEndian()) {
10427       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10428     }
10429     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10430                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10431   }
10432   return Op;
10433 }
10434 
10435 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10436   SDLoc dl(Op);
10437   if (Op.getValueType() == MVT::v4i32) {
10438     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10439 
10440     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10441     // +16 as shift amt.
10442     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10443     SDValue RHSSwap =   // = vrlw RHS, 16
10444       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10445 
10446     // Shrinkify inputs to v8i16.
10447     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10448     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10449     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10450 
10451     // Low parts multiplied together, generating 32-bit results (we ignore the
10452     // top parts).
10453     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10454                                         LHS, RHS, DAG, dl, MVT::v4i32);
10455 
10456     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10457                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10458     // Shift the high parts up 16 bits.
10459     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10460                               Neg16, DAG, dl);
10461     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10462   } else if (Op.getValueType() == MVT::v16i8) {
10463     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10464     bool isLittleEndian = Subtarget.isLittleEndian();
10465 
10466     // Multiply the even 8-bit parts, producing 16-bit sums.
10467     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10468                                            LHS, RHS, DAG, dl, MVT::v8i16);
10469     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10470 
10471     // Multiply the odd 8-bit parts, producing 16-bit sums.
10472     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10473                                           LHS, RHS, DAG, dl, MVT::v8i16);
10474     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10475 
10476     // Merge the results together.  Because vmuleub and vmuloub are
10477     // instructions with a big-endian bias, we must reverse the
10478     // element numbering and reverse the meaning of "odd" and "even"
10479     // when generating little endian code.
10480     int Ops[16];
10481     for (unsigned i = 0; i != 8; ++i) {
10482       if (isLittleEndian) {
10483         Ops[i*2  ] = 2*i;
10484         Ops[i*2+1] = 2*i+16;
10485       } else {
10486         Ops[i*2  ] = 2*i+1;
10487         Ops[i*2+1] = 2*i+1+16;
10488       }
10489     }
10490     if (isLittleEndian)
10491       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10492     else
10493       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10494   } else {
10495     llvm_unreachable("Unknown mul to lower!");
10496   }
10497 }
10498 
10499 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10500 
10501   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10502 
10503   EVT VT = Op.getValueType();
10504   assert(VT.isVector() &&
10505          "Only set vector abs as custom, scalar abs shouldn't reach here!");
10506   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10507           VT == MVT::v16i8) &&
10508          "Unexpected vector element type!");
10509   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10510          "Current subtarget doesn't support smax v2i64!");
10511 
10512   // For vector abs, it can be lowered to:
10513   // abs x
10514   // ==>
10515   // y = -x
10516   // smax(x, y)
10517 
10518   SDLoc dl(Op);
10519   SDValue X = Op.getOperand(0);
10520   SDValue Zero = DAG.getConstant(0, dl, VT);
10521   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10522 
10523   // SMAX patch https://reviews.llvm.org/D47332
10524   // hasn't landed yet, so use intrinsic first here.
10525   // TODO: Should use SMAX directly once SMAX patch landed
10526   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10527   if (VT == MVT::v2i64)
10528     BifID = Intrinsic::ppc_altivec_vmaxsd;
10529   else if (VT == MVT::v8i16)
10530     BifID = Intrinsic::ppc_altivec_vmaxsh;
10531   else if (VT == MVT::v16i8)
10532     BifID = Intrinsic::ppc_altivec_vmaxsb;
10533 
10534   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10535 }
10536 
10537 // Custom lowering for fpext vf32 to v2f64
10538 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10539 
10540   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10541          "Should only be called for ISD::FP_EXTEND");
10542 
10543   // FIXME: handle extends from half precision float vectors on P9.
10544   // We only want to custom lower an extend from v2f32 to v2f64.
10545   if (Op.getValueType() != MVT::v2f64 ||
10546       Op.getOperand(0).getValueType() != MVT::v2f32)
10547     return SDValue();
10548 
10549   SDLoc dl(Op);
10550   SDValue Op0 = Op.getOperand(0);
10551 
10552   switch (Op0.getOpcode()) {
10553   default:
10554     return SDValue();
10555   case ISD::EXTRACT_SUBVECTOR: {
10556     assert(Op0.getNumOperands() == 2 &&
10557            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10558            "Node should have 2 operands with second one being a constant!");
10559 
10560     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10561       return SDValue();
10562 
10563     // Custom lower is only done for high or low doubleword.
10564     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10565     if (Idx % 2 != 0)
10566       return SDValue();
10567 
10568     // Since input is v4f32, at this point Idx is either 0 or 2.
10569     // Shift to get the doubleword position we want.
10570     int DWord = Idx >> 1;
10571 
10572     // High and low word positions are different on little endian.
10573     if (Subtarget.isLittleEndian())
10574       DWord ^= 0x1;
10575 
10576     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10577                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10578   }
10579   case ISD::FADD:
10580   case ISD::FMUL:
10581   case ISD::FSUB: {
10582     SDValue NewLoad[2];
10583     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10584       // Ensure both input are loads.
10585       SDValue LdOp = Op0.getOperand(i);
10586       if (LdOp.getOpcode() != ISD::LOAD)
10587         return SDValue();
10588       // Generate new load node.
10589       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10590       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10591       NewLoad[i] = DAG.getMemIntrinsicNode(
10592           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10593           LD->getMemoryVT(), LD->getMemOperand());
10594     }
10595     SDValue NewOp =
10596         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10597                     NewLoad[1], Op0.getNode()->getFlags());
10598     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10599                        DAG.getConstant(0, dl, MVT::i32));
10600   }
10601   case ISD::LOAD: {
10602     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10603     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10604     SDValue NewLd = DAG.getMemIntrinsicNode(
10605         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10606         LD->getMemoryVT(), LD->getMemOperand());
10607     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10608                        DAG.getConstant(0, dl, MVT::i32));
10609   }
10610   }
10611   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10612 }
10613 
10614 /// LowerOperation - Provide custom lowering hooks for some operations.
10615 ///
10616 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10617   switch (Op.getOpcode()) {
10618   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10619   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10620   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10621   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10622   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10623   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10624   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10625   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10626   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10627 
10628   // Variable argument lowering.
10629   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10630   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10631   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10632 
10633   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10634   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10635   case ISD::GET_DYNAMIC_AREA_OFFSET:
10636     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10637 
10638   // Exception handling lowering.
10639   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10640   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10641   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10642 
10643   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10644   case ISD::STORE:              return LowerSTORE(Op, DAG);
10645   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10646   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10647   case ISD::STRICT_FP_TO_UINT:
10648   case ISD::STRICT_FP_TO_SINT:
10649   case ISD::FP_TO_UINT:
10650   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10651   case ISD::STRICT_UINT_TO_FP:
10652   case ISD::STRICT_SINT_TO_FP:
10653   case ISD::UINT_TO_FP:
10654   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10655   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10656 
10657   // Lower 64-bit shifts.
10658   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10659   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10660   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10661 
10662   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10663   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10664 
10665   // Vector-related lowering.
10666   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10667   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10668   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10669   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10670   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10671   case ISD::MUL:                return LowerMUL(Op, DAG);
10672   case ISD::ABS:                return LowerABS(Op, DAG);
10673   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10674   case ISD::ROTL:               return LowerROTL(Op, DAG);
10675 
10676   // For counter-based loop handling.
10677   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10678 
10679   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10680 
10681   // Frame & Return address.
10682   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10683   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10684 
10685   case ISD::INTRINSIC_VOID:
10686     return LowerINTRINSIC_VOID(Op, DAG);
10687   case ISD::BSWAP:
10688     return LowerBSWAP(Op, DAG);
10689   case ISD::ATOMIC_CMP_SWAP:
10690     return LowerATOMIC_CMP_SWAP(Op, DAG);
10691   }
10692 }
10693 
10694 void PPCTargetLowering::LowerOperationWrapper(SDNode *N,
10695                                               SmallVectorImpl<SDValue> &Results,
10696                                               SelectionDAG &DAG) const {
10697   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10698 
10699   if (!Res.getNode())
10700     return;
10701 
10702   // Take the return value as-is if original node has only one result.
10703   if (N->getNumValues() == 1) {
10704     Results.push_back(Res);
10705     return;
10706   }
10707 
10708   // New node should have the same number of results.
10709   assert((N->getNumValues() == Res->getNumValues()) &&
10710       "Lowering returned the wrong number of results!");
10711 
10712   for (unsigned i = 0; i < N->getNumValues(); ++i)
10713     Results.push_back(Res.getValue(i));
10714 }
10715 
10716 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10717                                            SmallVectorImpl<SDValue>&Results,
10718                                            SelectionDAG &DAG) const {
10719   SDLoc dl(N);
10720   switch (N->getOpcode()) {
10721   default:
10722     llvm_unreachable("Do not know how to custom type legalize this operation!");
10723   case ISD::READCYCLECOUNTER: {
10724     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10725     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10726 
10727     Results.push_back(
10728         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10729     Results.push_back(RTB.getValue(2));
10730     break;
10731   }
10732   case ISD::INTRINSIC_W_CHAIN: {
10733     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10734         Intrinsic::loop_decrement)
10735       break;
10736 
10737     assert(N->getValueType(0) == MVT::i1 &&
10738            "Unexpected result type for CTR decrement intrinsic");
10739     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10740                                  N->getValueType(0));
10741     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10742     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10743                                  N->getOperand(1));
10744 
10745     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10746     Results.push_back(NewInt.getValue(1));
10747     break;
10748   }
10749   case ISD::VAARG: {
10750     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10751       return;
10752 
10753     EVT VT = N->getValueType(0);
10754 
10755     if (VT == MVT::i64) {
10756       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10757 
10758       Results.push_back(NewNode);
10759       Results.push_back(NewNode.getValue(1));
10760     }
10761     return;
10762   }
10763   case ISD::STRICT_FP_TO_SINT:
10764   case ISD::STRICT_FP_TO_UINT:
10765   case ISD::FP_TO_SINT:
10766   case ISD::FP_TO_UINT:
10767     // LowerFP_TO_INT() can only handle f32 and f64.
10768     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10769         MVT::ppcf128)
10770       return;
10771     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10772     return;
10773   case ISD::TRUNCATE: {
10774     if (!N->getValueType(0).isVector())
10775       return;
10776     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10777     if (Lowered)
10778       Results.push_back(Lowered);
10779     return;
10780   }
10781   case ISD::BITCAST:
10782     // Don't handle bitcast here.
10783     return;
10784   case ISD::FP_EXTEND:
10785     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10786     if (Lowered)
10787       Results.push_back(Lowered);
10788     return;
10789   }
10790 }
10791 
10792 //===----------------------------------------------------------------------===//
10793 //  Other Lowering Code
10794 //===----------------------------------------------------------------------===//
10795 
10796 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10797   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10798   Function *Func = Intrinsic::getDeclaration(M, Id);
10799   return Builder.CreateCall(Func, {});
10800 }
10801 
10802 // The mappings for emitLeading/TrailingFence is taken from
10803 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10804 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10805                                                  Instruction *Inst,
10806                                                  AtomicOrdering Ord) const {
10807   if (Ord == AtomicOrdering::SequentiallyConsistent)
10808     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10809   if (isReleaseOrStronger(Ord))
10810     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10811   return nullptr;
10812 }
10813 
10814 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10815                                                   Instruction *Inst,
10816                                                   AtomicOrdering Ord) const {
10817   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10818     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10819     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10820     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10821     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10822       return Builder.CreateCall(
10823           Intrinsic::getDeclaration(
10824               Builder.GetInsertBlock()->getParent()->getParent(),
10825               Intrinsic::ppc_cfence, {Inst->getType()}),
10826           {Inst});
10827     // FIXME: Can use isync for rmw operation.
10828     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10829   }
10830   return nullptr;
10831 }
10832 
10833 MachineBasicBlock *
10834 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10835                                     unsigned AtomicSize,
10836                                     unsigned BinOpcode,
10837                                     unsigned CmpOpcode,
10838                                     unsigned CmpPred) const {
10839   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10840   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10841 
10842   auto LoadMnemonic = PPC::LDARX;
10843   auto StoreMnemonic = PPC::STDCX;
10844   switch (AtomicSize) {
10845   default:
10846     llvm_unreachable("Unexpected size of atomic entity");
10847   case 1:
10848     LoadMnemonic = PPC::LBARX;
10849     StoreMnemonic = PPC::STBCX;
10850     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10851     break;
10852   case 2:
10853     LoadMnemonic = PPC::LHARX;
10854     StoreMnemonic = PPC::STHCX;
10855     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10856     break;
10857   case 4:
10858     LoadMnemonic = PPC::LWARX;
10859     StoreMnemonic = PPC::STWCX;
10860     break;
10861   case 8:
10862     LoadMnemonic = PPC::LDARX;
10863     StoreMnemonic = PPC::STDCX;
10864     break;
10865   }
10866 
10867   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10868   MachineFunction *F = BB->getParent();
10869   MachineFunction::iterator It = ++BB->getIterator();
10870 
10871   Register dest = MI.getOperand(0).getReg();
10872   Register ptrA = MI.getOperand(1).getReg();
10873   Register ptrB = MI.getOperand(2).getReg();
10874   Register incr = MI.getOperand(3).getReg();
10875   DebugLoc dl = MI.getDebugLoc();
10876 
10877   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10878   MachineBasicBlock *loop2MBB =
10879     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10880   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10881   F->insert(It, loopMBB);
10882   if (CmpOpcode)
10883     F->insert(It, loop2MBB);
10884   F->insert(It, exitMBB);
10885   exitMBB->splice(exitMBB->begin(), BB,
10886                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10887   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10888 
10889   MachineRegisterInfo &RegInfo = F->getRegInfo();
10890   Register TmpReg = (!BinOpcode) ? incr :
10891     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10892                                            : &PPC::GPRCRegClass);
10893 
10894   //  thisMBB:
10895   //   ...
10896   //   fallthrough --> loopMBB
10897   BB->addSuccessor(loopMBB);
10898 
10899   //  loopMBB:
10900   //   l[wd]arx dest, ptr
10901   //   add r0, dest, incr
10902   //   st[wd]cx. r0, ptr
10903   //   bne- loopMBB
10904   //   fallthrough --> exitMBB
10905 
10906   // For max/min...
10907   //  loopMBB:
10908   //   l[wd]arx dest, ptr
10909   //   cmpl?[wd] incr, dest
10910   //   bgt exitMBB
10911   //  loop2MBB:
10912   //   st[wd]cx. dest, ptr
10913   //   bne- loopMBB
10914   //   fallthrough --> exitMBB
10915 
10916   BB = loopMBB;
10917   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10918     .addReg(ptrA).addReg(ptrB);
10919   if (BinOpcode)
10920     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10921   if (CmpOpcode) {
10922     // Signed comparisons of byte or halfword values must be sign-extended.
10923     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10924       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10925       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10926               ExtReg).addReg(dest);
10927       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10928         .addReg(incr).addReg(ExtReg);
10929     } else
10930       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10931         .addReg(incr).addReg(dest);
10932 
10933     BuildMI(BB, dl, TII->get(PPC::BCC))
10934       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10935     BB->addSuccessor(loop2MBB);
10936     BB->addSuccessor(exitMBB);
10937     BB = loop2MBB;
10938   }
10939   BuildMI(BB, dl, TII->get(StoreMnemonic))
10940     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10941   BuildMI(BB, dl, TII->get(PPC::BCC))
10942     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10943   BB->addSuccessor(loopMBB);
10944   BB->addSuccessor(exitMBB);
10945 
10946   //  exitMBB:
10947   //   ...
10948   BB = exitMBB;
10949   return BB;
10950 }
10951 
10952 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10953     MachineInstr &MI, MachineBasicBlock *BB,
10954     bool is8bit, // operation
10955     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10956   // If we support part-word atomic mnemonics, just use them
10957   if (Subtarget.hasPartwordAtomics())
10958     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10959                             CmpPred);
10960 
10961   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10962   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10963   // In 64 bit mode we have to use 64 bits for addresses, even though the
10964   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10965   // registers without caring whether they're 32 or 64, but here we're
10966   // doing actual arithmetic on the addresses.
10967   bool is64bit = Subtarget.isPPC64();
10968   bool isLittleEndian = Subtarget.isLittleEndian();
10969   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10970 
10971   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10972   MachineFunction *F = BB->getParent();
10973   MachineFunction::iterator It = ++BB->getIterator();
10974 
10975   Register dest = MI.getOperand(0).getReg();
10976   Register ptrA = MI.getOperand(1).getReg();
10977   Register ptrB = MI.getOperand(2).getReg();
10978   Register incr = MI.getOperand(3).getReg();
10979   DebugLoc dl = MI.getDebugLoc();
10980 
10981   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10982   MachineBasicBlock *loop2MBB =
10983       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10984   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10985   F->insert(It, loopMBB);
10986   if (CmpOpcode)
10987     F->insert(It, loop2MBB);
10988   F->insert(It, exitMBB);
10989   exitMBB->splice(exitMBB->begin(), BB,
10990                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10991   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10992 
10993   MachineRegisterInfo &RegInfo = F->getRegInfo();
10994   const TargetRegisterClass *RC =
10995       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10996   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10997 
10998   Register PtrReg = RegInfo.createVirtualRegister(RC);
10999   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11000   Register ShiftReg =
11001       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11002   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11003   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11004   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11005   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11006   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11007   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11008   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11009   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11010   Register Ptr1Reg;
11011   Register TmpReg =
11012       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11013 
11014   //  thisMBB:
11015   //   ...
11016   //   fallthrough --> loopMBB
11017   BB->addSuccessor(loopMBB);
11018 
11019   // The 4-byte load must be aligned, while a char or short may be
11020   // anywhere in the word.  Hence all this nasty bookkeeping code.
11021   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11022   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11023   //   xori shift, shift1, 24 [16]
11024   //   rlwinm ptr, ptr1, 0, 0, 29
11025   //   slw incr2, incr, shift
11026   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11027   //   slw mask, mask2, shift
11028   //  loopMBB:
11029   //   lwarx tmpDest, ptr
11030   //   add tmp, tmpDest, incr2
11031   //   andc tmp2, tmpDest, mask
11032   //   and tmp3, tmp, mask
11033   //   or tmp4, tmp3, tmp2
11034   //   stwcx. tmp4, ptr
11035   //   bne- loopMBB
11036   //   fallthrough --> exitMBB
11037   //   srw dest, tmpDest, shift
11038   if (ptrA != ZeroReg) {
11039     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11040     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11041         .addReg(ptrA)
11042         .addReg(ptrB);
11043   } else {
11044     Ptr1Reg = ptrB;
11045   }
11046   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11047   // mode.
11048   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11049       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11050       .addImm(3)
11051       .addImm(27)
11052       .addImm(is8bit ? 28 : 27);
11053   if (!isLittleEndian)
11054     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11055         .addReg(Shift1Reg)
11056         .addImm(is8bit ? 24 : 16);
11057   if (is64bit)
11058     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11059         .addReg(Ptr1Reg)
11060         .addImm(0)
11061         .addImm(61);
11062   else
11063     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11064         .addReg(Ptr1Reg)
11065         .addImm(0)
11066         .addImm(0)
11067         .addImm(29);
11068   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11069   if (is8bit)
11070     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11071   else {
11072     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11073     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11074         .addReg(Mask3Reg)
11075         .addImm(65535);
11076   }
11077   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11078       .addReg(Mask2Reg)
11079       .addReg(ShiftReg);
11080 
11081   BB = loopMBB;
11082   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11083       .addReg(ZeroReg)
11084       .addReg(PtrReg);
11085   if (BinOpcode)
11086     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11087         .addReg(Incr2Reg)
11088         .addReg(TmpDestReg);
11089   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11090       .addReg(TmpDestReg)
11091       .addReg(MaskReg);
11092   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11093   if (CmpOpcode) {
11094     // For unsigned comparisons, we can directly compare the shifted values.
11095     // For signed comparisons we shift and sign extend.
11096     Register SReg = RegInfo.createVirtualRegister(GPRC);
11097     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11098         .addReg(TmpDestReg)
11099         .addReg(MaskReg);
11100     unsigned ValueReg = SReg;
11101     unsigned CmpReg = Incr2Reg;
11102     if (CmpOpcode == PPC::CMPW) {
11103       ValueReg = RegInfo.createVirtualRegister(GPRC);
11104       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11105           .addReg(SReg)
11106           .addReg(ShiftReg);
11107       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11108       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11109           .addReg(ValueReg);
11110       ValueReg = ValueSReg;
11111       CmpReg = incr;
11112     }
11113     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11114         .addReg(CmpReg)
11115         .addReg(ValueReg);
11116     BuildMI(BB, dl, TII->get(PPC::BCC))
11117         .addImm(CmpPred)
11118         .addReg(PPC::CR0)
11119         .addMBB(exitMBB);
11120     BB->addSuccessor(loop2MBB);
11121     BB->addSuccessor(exitMBB);
11122     BB = loop2MBB;
11123   }
11124   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11125   BuildMI(BB, dl, TII->get(PPC::STWCX))
11126       .addReg(Tmp4Reg)
11127       .addReg(ZeroReg)
11128       .addReg(PtrReg);
11129   BuildMI(BB, dl, TII->get(PPC::BCC))
11130       .addImm(PPC::PRED_NE)
11131       .addReg(PPC::CR0)
11132       .addMBB(loopMBB);
11133   BB->addSuccessor(loopMBB);
11134   BB->addSuccessor(exitMBB);
11135 
11136   //  exitMBB:
11137   //   ...
11138   BB = exitMBB;
11139   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11140       .addReg(TmpDestReg)
11141       .addReg(ShiftReg);
11142   return BB;
11143 }
11144 
11145 llvm::MachineBasicBlock *
11146 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11147                                     MachineBasicBlock *MBB) const {
11148   DebugLoc DL = MI.getDebugLoc();
11149   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11150   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11151 
11152   MachineFunction *MF = MBB->getParent();
11153   MachineRegisterInfo &MRI = MF->getRegInfo();
11154 
11155   const BasicBlock *BB = MBB->getBasicBlock();
11156   MachineFunction::iterator I = ++MBB->getIterator();
11157 
11158   Register DstReg = MI.getOperand(0).getReg();
11159   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11160   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11161   Register mainDstReg = MRI.createVirtualRegister(RC);
11162   Register restoreDstReg = MRI.createVirtualRegister(RC);
11163 
11164   MVT PVT = getPointerTy(MF->getDataLayout());
11165   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11166          "Invalid Pointer Size!");
11167   // For v = setjmp(buf), we generate
11168   //
11169   // thisMBB:
11170   //  SjLjSetup mainMBB
11171   //  bl mainMBB
11172   //  v_restore = 1
11173   //  b sinkMBB
11174   //
11175   // mainMBB:
11176   //  buf[LabelOffset] = LR
11177   //  v_main = 0
11178   //
11179   // sinkMBB:
11180   //  v = phi(main, restore)
11181   //
11182 
11183   MachineBasicBlock *thisMBB = MBB;
11184   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11185   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11186   MF->insert(I, mainMBB);
11187   MF->insert(I, sinkMBB);
11188 
11189   MachineInstrBuilder MIB;
11190 
11191   // Transfer the remainder of BB and its successor edges to sinkMBB.
11192   sinkMBB->splice(sinkMBB->begin(), MBB,
11193                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11194   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11195 
11196   // Note that the structure of the jmp_buf used here is not compatible
11197   // with that used by libc, and is not designed to be. Specifically, it
11198   // stores only those 'reserved' registers that LLVM does not otherwise
11199   // understand how to spill. Also, by convention, by the time this
11200   // intrinsic is called, Clang has already stored the frame address in the
11201   // first slot of the buffer and stack address in the third. Following the
11202   // X86 target code, we'll store the jump address in the second slot. We also
11203   // need to save the TOC pointer (R2) to handle jumps between shared
11204   // libraries, and that will be stored in the fourth slot. The thread
11205   // identifier (R13) is not affected.
11206 
11207   // thisMBB:
11208   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11209   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11210   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11211 
11212   // Prepare IP either in reg.
11213   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11214   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11215   Register BufReg = MI.getOperand(1).getReg();
11216 
11217   if (Subtarget.is64BitELFABI()) {
11218     setUsesTOCBasePtr(*MBB->getParent());
11219     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11220               .addReg(PPC::X2)
11221               .addImm(TOCOffset)
11222               .addReg(BufReg)
11223               .cloneMemRefs(MI);
11224   }
11225 
11226   // Naked functions never have a base pointer, and so we use r1. For all
11227   // other functions, this decision must be delayed until during PEI.
11228   unsigned BaseReg;
11229   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11230     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11231   else
11232     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11233 
11234   MIB = BuildMI(*thisMBB, MI, DL,
11235                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11236             .addReg(BaseReg)
11237             .addImm(BPOffset)
11238             .addReg(BufReg)
11239             .cloneMemRefs(MI);
11240 
11241   // Setup
11242   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11243   MIB.addRegMask(TRI->getNoPreservedMask());
11244 
11245   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11246 
11247   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11248           .addMBB(mainMBB);
11249   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11250 
11251   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11252   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11253 
11254   // mainMBB:
11255   //  mainDstReg = 0
11256   MIB =
11257       BuildMI(mainMBB, DL,
11258               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11259 
11260   // Store IP
11261   if (Subtarget.isPPC64()) {
11262     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11263             .addReg(LabelReg)
11264             .addImm(LabelOffset)
11265             .addReg(BufReg);
11266   } else {
11267     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11268             .addReg(LabelReg)
11269             .addImm(LabelOffset)
11270             .addReg(BufReg);
11271   }
11272   MIB.cloneMemRefs(MI);
11273 
11274   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11275   mainMBB->addSuccessor(sinkMBB);
11276 
11277   // sinkMBB:
11278   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11279           TII->get(PPC::PHI), DstReg)
11280     .addReg(mainDstReg).addMBB(mainMBB)
11281     .addReg(restoreDstReg).addMBB(thisMBB);
11282 
11283   MI.eraseFromParent();
11284   return sinkMBB;
11285 }
11286 
11287 MachineBasicBlock *
11288 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11289                                      MachineBasicBlock *MBB) const {
11290   DebugLoc DL = MI.getDebugLoc();
11291   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11292 
11293   MachineFunction *MF = MBB->getParent();
11294   MachineRegisterInfo &MRI = MF->getRegInfo();
11295 
11296   MVT PVT = getPointerTy(MF->getDataLayout());
11297   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11298          "Invalid Pointer Size!");
11299 
11300   const TargetRegisterClass *RC =
11301     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11302   Register Tmp = MRI.createVirtualRegister(RC);
11303   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11304   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11305   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11306   unsigned BP =
11307       (PVT == MVT::i64)
11308           ? PPC::X30
11309           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11310                                                               : PPC::R30);
11311 
11312   MachineInstrBuilder MIB;
11313 
11314   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11315   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11316   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11317   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11318 
11319   Register BufReg = MI.getOperand(0).getReg();
11320 
11321   // Reload FP (the jumped-to function may not have had a
11322   // frame pointer, and if so, then its r31 will be restored
11323   // as necessary).
11324   if (PVT == MVT::i64) {
11325     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11326             .addImm(0)
11327             .addReg(BufReg);
11328   } else {
11329     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11330             .addImm(0)
11331             .addReg(BufReg);
11332   }
11333   MIB.cloneMemRefs(MI);
11334 
11335   // Reload IP
11336   if (PVT == MVT::i64) {
11337     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11338             .addImm(LabelOffset)
11339             .addReg(BufReg);
11340   } else {
11341     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11342             .addImm(LabelOffset)
11343             .addReg(BufReg);
11344   }
11345   MIB.cloneMemRefs(MI);
11346 
11347   // Reload SP
11348   if (PVT == MVT::i64) {
11349     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11350             .addImm(SPOffset)
11351             .addReg(BufReg);
11352   } else {
11353     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11354             .addImm(SPOffset)
11355             .addReg(BufReg);
11356   }
11357   MIB.cloneMemRefs(MI);
11358 
11359   // Reload BP
11360   if (PVT == MVT::i64) {
11361     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11362             .addImm(BPOffset)
11363             .addReg(BufReg);
11364   } else {
11365     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11366             .addImm(BPOffset)
11367             .addReg(BufReg);
11368   }
11369   MIB.cloneMemRefs(MI);
11370 
11371   // Reload TOC
11372   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11373     setUsesTOCBasePtr(*MBB->getParent());
11374     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11375               .addImm(TOCOffset)
11376               .addReg(BufReg)
11377               .cloneMemRefs(MI);
11378   }
11379 
11380   // Jump
11381   BuildMI(*MBB, MI, DL,
11382           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11383   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11384 
11385   MI.eraseFromParent();
11386   return MBB;
11387 }
11388 
11389 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11390   // If the function specifically requests inline stack probes, emit them.
11391   if (MF.getFunction().hasFnAttribute("probe-stack"))
11392     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11393            "inline-asm";
11394   return false;
11395 }
11396 
11397 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11398   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11399   unsigned StackAlign = TFI->getStackAlignment();
11400   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11401          "Unexpected stack alignment");
11402   // The default stack probe size is 4096 if the function has no
11403   // stack-probe-size attribute.
11404   unsigned StackProbeSize = 4096;
11405   const Function &Fn = MF.getFunction();
11406   if (Fn.hasFnAttribute("stack-probe-size"))
11407     Fn.getFnAttribute("stack-probe-size")
11408         .getValueAsString()
11409         .getAsInteger(0, StackProbeSize);
11410   // Round down to the stack alignment.
11411   StackProbeSize &= ~(StackAlign - 1);
11412   return StackProbeSize ? StackProbeSize : StackAlign;
11413 }
11414 
11415 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11416 // into three phases. In the first phase, it uses pseudo instruction
11417 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11418 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11419 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11420 // MaxCallFrameSize so that it can calculate correct data area pointer.
11421 MachineBasicBlock *
11422 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11423                                     MachineBasicBlock *MBB) const {
11424   const bool isPPC64 = Subtarget.isPPC64();
11425   MachineFunction *MF = MBB->getParent();
11426   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11427   DebugLoc DL = MI.getDebugLoc();
11428   const unsigned ProbeSize = getStackProbeSize(*MF);
11429   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11430   MachineRegisterInfo &MRI = MF->getRegInfo();
11431   // The CFG of probing stack looks as
11432   //         +-----+
11433   //         | MBB |
11434   //         +--+--+
11435   //            |
11436   //       +----v----+
11437   //  +--->+ TestMBB +---+
11438   //  |    +----+----+   |
11439   //  |         |        |
11440   //  |   +-----v----+   |
11441   //  +---+ BlockMBB |   |
11442   //      +----------+   |
11443   //                     |
11444   //       +---------+   |
11445   //       | TailMBB +<--+
11446   //       +---------+
11447   // In MBB, calculate previous frame pointer and final stack pointer.
11448   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11449   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11450   // TailMBB is spliced via \p MI.
11451   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11452   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11453   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11454 
11455   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11456   MF->insert(MBBIter, TestMBB);
11457   MF->insert(MBBIter, BlockMBB);
11458   MF->insert(MBBIter, TailMBB);
11459 
11460   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11461   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11462 
11463   Register DstReg = MI.getOperand(0).getReg();
11464   Register NegSizeReg = MI.getOperand(1).getReg();
11465   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11466   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11467   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11468   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11469 
11470   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11471   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11472   // NegSize.
11473   unsigned ProbeOpc;
11474   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11475     ProbeOpc =
11476         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11477   else
11478     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11479     // and NegSizeReg will be allocated in the same phyreg to avoid
11480     // redundant copy when NegSizeReg has only one use which is current MI and
11481     // will be replaced by PREPARE_PROBED_ALLOCA then.
11482     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11483                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11484   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11485       .addDef(ActualNegSizeReg)
11486       .addReg(NegSizeReg)
11487       .add(MI.getOperand(2))
11488       .add(MI.getOperand(3));
11489 
11490   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11491   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11492           FinalStackPtr)
11493       .addReg(SPReg)
11494       .addReg(ActualNegSizeReg);
11495 
11496   // Materialize a scratch register for update.
11497   int64_t NegProbeSize = -(int64_t)ProbeSize;
11498   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11499   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11500   if (!isInt<16>(NegProbeSize)) {
11501     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11502     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11503         .addImm(NegProbeSize >> 16);
11504     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11505             ScratchReg)
11506         .addReg(TempReg)
11507         .addImm(NegProbeSize & 0xFFFF);
11508   } else
11509     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11510         .addImm(NegProbeSize);
11511 
11512   {
11513     // Probing leading residual part.
11514     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11515     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11516         .addReg(ActualNegSizeReg)
11517         .addReg(ScratchReg);
11518     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11519     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11520         .addReg(Div)
11521         .addReg(ScratchReg);
11522     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11523     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11524         .addReg(Mul)
11525         .addReg(ActualNegSizeReg);
11526     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11527         .addReg(FramePointer)
11528         .addReg(SPReg)
11529         .addReg(NegMod);
11530   }
11531 
11532   {
11533     // Remaining part should be multiple of ProbeSize.
11534     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11535     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11536         .addReg(SPReg)
11537         .addReg(FinalStackPtr);
11538     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11539         .addImm(PPC::PRED_EQ)
11540         .addReg(CmpResult)
11541         .addMBB(TailMBB);
11542     TestMBB->addSuccessor(BlockMBB);
11543     TestMBB->addSuccessor(TailMBB);
11544   }
11545 
11546   {
11547     // Touch the block.
11548     // |P...|P...|P...
11549     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11550         .addReg(FramePointer)
11551         .addReg(SPReg)
11552         .addReg(ScratchReg);
11553     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11554     BlockMBB->addSuccessor(TestMBB);
11555   }
11556 
11557   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11558   // DYNAREAOFFSET pseudo instruction to get the future result.
11559   Register MaxCallFrameSizeReg =
11560       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11561   BuildMI(TailMBB, DL,
11562           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11563           MaxCallFrameSizeReg)
11564       .add(MI.getOperand(2))
11565       .add(MI.getOperand(3));
11566   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11567       .addReg(SPReg)
11568       .addReg(MaxCallFrameSizeReg);
11569 
11570   // Splice instructions after MI to TailMBB.
11571   TailMBB->splice(TailMBB->end(), MBB,
11572                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11573   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11574   MBB->addSuccessor(TestMBB);
11575 
11576   // Delete the pseudo instruction.
11577   MI.eraseFromParent();
11578 
11579   ++NumDynamicAllocaProbed;
11580   return TailMBB;
11581 }
11582 
11583 MachineBasicBlock *
11584 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11585                                                MachineBasicBlock *BB) const {
11586   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11587       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11588     if (Subtarget.is64BitELFABI() &&
11589         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11590         !Subtarget.isUsingPCRelativeCalls()) {
11591       // Call lowering should have added an r2 operand to indicate a dependence
11592       // on the TOC base pointer value. It can't however, because there is no
11593       // way to mark the dependence as implicit there, and so the stackmap code
11594       // will confuse it with a regular operand. Instead, add the dependence
11595       // here.
11596       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11597     }
11598 
11599     return emitPatchPoint(MI, BB);
11600   }
11601 
11602   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11603       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11604     return emitEHSjLjSetJmp(MI, BB);
11605   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11606              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11607     return emitEHSjLjLongJmp(MI, BB);
11608   }
11609 
11610   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11611 
11612   // To "insert" these instructions we actually have to insert their
11613   // control-flow patterns.
11614   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11615   MachineFunction::iterator It = ++BB->getIterator();
11616 
11617   MachineFunction *F = BB->getParent();
11618 
11619   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11620       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11621       MI.getOpcode() == PPC::SELECT_I8) {
11622     SmallVector<MachineOperand, 2> Cond;
11623     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11624         MI.getOpcode() == PPC::SELECT_CC_I8)
11625       Cond.push_back(MI.getOperand(4));
11626     else
11627       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11628     Cond.push_back(MI.getOperand(1));
11629 
11630     DebugLoc dl = MI.getDebugLoc();
11631     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11632                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11633   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11634              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11635              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11636              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11637              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11638              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11639              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11640              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11641              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11642              MI.getOpcode() == PPC::SELECT_F4 ||
11643              MI.getOpcode() == PPC::SELECT_F8 ||
11644              MI.getOpcode() == PPC::SELECT_F16 ||
11645              MI.getOpcode() == PPC::SELECT_SPE ||
11646              MI.getOpcode() == PPC::SELECT_SPE4 ||
11647              MI.getOpcode() == PPC::SELECT_VRRC ||
11648              MI.getOpcode() == PPC::SELECT_VSFRC ||
11649              MI.getOpcode() == PPC::SELECT_VSSRC ||
11650              MI.getOpcode() == PPC::SELECT_VSRC) {
11651     // The incoming instruction knows the destination vreg to set, the
11652     // condition code register to branch on, the true/false values to
11653     // select between, and a branch opcode to use.
11654 
11655     //  thisMBB:
11656     //  ...
11657     //   TrueVal = ...
11658     //   cmpTY ccX, r1, r2
11659     //   bCC copy1MBB
11660     //   fallthrough --> copy0MBB
11661     MachineBasicBlock *thisMBB = BB;
11662     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11663     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11664     DebugLoc dl = MI.getDebugLoc();
11665     F->insert(It, copy0MBB);
11666     F->insert(It, sinkMBB);
11667 
11668     // Transfer the remainder of BB and its successor edges to sinkMBB.
11669     sinkMBB->splice(sinkMBB->begin(), BB,
11670                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11671     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11672 
11673     // Next, add the true and fallthrough blocks as its successors.
11674     BB->addSuccessor(copy0MBB);
11675     BB->addSuccessor(sinkMBB);
11676 
11677     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11678         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11679         MI.getOpcode() == PPC::SELECT_F16 ||
11680         MI.getOpcode() == PPC::SELECT_SPE4 ||
11681         MI.getOpcode() == PPC::SELECT_SPE ||
11682         MI.getOpcode() == PPC::SELECT_VRRC ||
11683         MI.getOpcode() == PPC::SELECT_VSFRC ||
11684         MI.getOpcode() == PPC::SELECT_VSSRC ||
11685         MI.getOpcode() == PPC::SELECT_VSRC) {
11686       BuildMI(BB, dl, TII->get(PPC::BC))
11687           .addReg(MI.getOperand(1).getReg())
11688           .addMBB(sinkMBB);
11689     } else {
11690       unsigned SelectPred = MI.getOperand(4).getImm();
11691       BuildMI(BB, dl, TII->get(PPC::BCC))
11692           .addImm(SelectPred)
11693           .addReg(MI.getOperand(1).getReg())
11694           .addMBB(sinkMBB);
11695     }
11696 
11697     //  copy0MBB:
11698     //   %FalseValue = ...
11699     //   # fallthrough to sinkMBB
11700     BB = copy0MBB;
11701 
11702     // Update machine-CFG edges
11703     BB->addSuccessor(sinkMBB);
11704 
11705     //  sinkMBB:
11706     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11707     //  ...
11708     BB = sinkMBB;
11709     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11710         .addReg(MI.getOperand(3).getReg())
11711         .addMBB(copy0MBB)
11712         .addReg(MI.getOperand(2).getReg())
11713         .addMBB(thisMBB);
11714   } else if (MI.getOpcode() == PPC::ReadTB) {
11715     // To read the 64-bit time-base register on a 32-bit target, we read the
11716     // two halves. Should the counter have wrapped while it was being read, we
11717     // need to try again.
11718     // ...
11719     // readLoop:
11720     // mfspr Rx,TBU # load from TBU
11721     // mfspr Ry,TB  # load from TB
11722     // mfspr Rz,TBU # load from TBU
11723     // cmpw crX,Rx,Rz # check if 'old'='new'
11724     // bne readLoop   # branch if they're not equal
11725     // ...
11726 
11727     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11728     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11729     DebugLoc dl = MI.getDebugLoc();
11730     F->insert(It, readMBB);
11731     F->insert(It, sinkMBB);
11732 
11733     // Transfer the remainder of BB and its successor edges to sinkMBB.
11734     sinkMBB->splice(sinkMBB->begin(), BB,
11735                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11736     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11737 
11738     BB->addSuccessor(readMBB);
11739     BB = readMBB;
11740 
11741     MachineRegisterInfo &RegInfo = F->getRegInfo();
11742     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11743     Register LoReg = MI.getOperand(0).getReg();
11744     Register HiReg = MI.getOperand(1).getReg();
11745 
11746     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11747     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11748     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11749 
11750     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11751 
11752     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11753         .addReg(HiReg)
11754         .addReg(ReadAgainReg);
11755     BuildMI(BB, dl, TII->get(PPC::BCC))
11756         .addImm(PPC::PRED_NE)
11757         .addReg(CmpReg)
11758         .addMBB(readMBB);
11759 
11760     BB->addSuccessor(readMBB);
11761     BB->addSuccessor(sinkMBB);
11762   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11763     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11764   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11765     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11766   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11767     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11768   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11769     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11770 
11771   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11772     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11773   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11774     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11775   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11776     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11777   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11778     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11779 
11780   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11781     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11782   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11783     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11784   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11785     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11786   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11787     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11788 
11789   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11790     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11791   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11792     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11793   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11794     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11795   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11796     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11797 
11798   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11799     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11800   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11801     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11802   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11803     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11804   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11805     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11806 
11807   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11808     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11809   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11810     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11811   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11812     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11813   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11814     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11815 
11816   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11817     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11818   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11819     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11820   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11821     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11822   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11823     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11824 
11825   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11826     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11827   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11828     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11829   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11830     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11831   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11832     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11833 
11834   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11835     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11836   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11837     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11838   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11839     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11840   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11841     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11842 
11843   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11844     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11845   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11846     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11847   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11848     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11849   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11850     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11851 
11852   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11853     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11854   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11855     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11856   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11857     BB = EmitAtomicBinary(MI, BB, 4, 0);
11858   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11859     BB = EmitAtomicBinary(MI, BB, 8, 0);
11860   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11861            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11862            (Subtarget.hasPartwordAtomics() &&
11863             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11864            (Subtarget.hasPartwordAtomics() &&
11865             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11866     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11867 
11868     auto LoadMnemonic = PPC::LDARX;
11869     auto StoreMnemonic = PPC::STDCX;
11870     switch (MI.getOpcode()) {
11871     default:
11872       llvm_unreachable("Compare and swap of unknown size");
11873     case PPC::ATOMIC_CMP_SWAP_I8:
11874       LoadMnemonic = PPC::LBARX;
11875       StoreMnemonic = PPC::STBCX;
11876       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11877       break;
11878     case PPC::ATOMIC_CMP_SWAP_I16:
11879       LoadMnemonic = PPC::LHARX;
11880       StoreMnemonic = PPC::STHCX;
11881       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11882       break;
11883     case PPC::ATOMIC_CMP_SWAP_I32:
11884       LoadMnemonic = PPC::LWARX;
11885       StoreMnemonic = PPC::STWCX;
11886       break;
11887     case PPC::ATOMIC_CMP_SWAP_I64:
11888       LoadMnemonic = PPC::LDARX;
11889       StoreMnemonic = PPC::STDCX;
11890       break;
11891     }
11892     Register dest = MI.getOperand(0).getReg();
11893     Register ptrA = MI.getOperand(1).getReg();
11894     Register ptrB = MI.getOperand(2).getReg();
11895     Register oldval = MI.getOperand(3).getReg();
11896     Register newval = MI.getOperand(4).getReg();
11897     DebugLoc dl = MI.getDebugLoc();
11898 
11899     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11900     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11901     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11902     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11903     F->insert(It, loop1MBB);
11904     F->insert(It, loop2MBB);
11905     F->insert(It, midMBB);
11906     F->insert(It, exitMBB);
11907     exitMBB->splice(exitMBB->begin(), BB,
11908                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11909     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11910 
11911     //  thisMBB:
11912     //   ...
11913     //   fallthrough --> loopMBB
11914     BB->addSuccessor(loop1MBB);
11915 
11916     // loop1MBB:
11917     //   l[bhwd]arx dest, ptr
11918     //   cmp[wd] dest, oldval
11919     //   bne- midMBB
11920     // loop2MBB:
11921     //   st[bhwd]cx. newval, ptr
11922     //   bne- loopMBB
11923     //   b exitBB
11924     // midMBB:
11925     //   st[bhwd]cx. dest, ptr
11926     // exitBB:
11927     BB = loop1MBB;
11928     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11929     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11930         .addReg(oldval)
11931         .addReg(dest);
11932     BuildMI(BB, dl, TII->get(PPC::BCC))
11933         .addImm(PPC::PRED_NE)
11934         .addReg(PPC::CR0)
11935         .addMBB(midMBB);
11936     BB->addSuccessor(loop2MBB);
11937     BB->addSuccessor(midMBB);
11938 
11939     BB = loop2MBB;
11940     BuildMI(BB, dl, TII->get(StoreMnemonic))
11941         .addReg(newval)
11942         .addReg(ptrA)
11943         .addReg(ptrB);
11944     BuildMI(BB, dl, TII->get(PPC::BCC))
11945         .addImm(PPC::PRED_NE)
11946         .addReg(PPC::CR0)
11947         .addMBB(loop1MBB);
11948     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11949     BB->addSuccessor(loop1MBB);
11950     BB->addSuccessor(exitMBB);
11951 
11952     BB = midMBB;
11953     BuildMI(BB, dl, TII->get(StoreMnemonic))
11954         .addReg(dest)
11955         .addReg(ptrA)
11956         .addReg(ptrB);
11957     BB->addSuccessor(exitMBB);
11958 
11959     //  exitMBB:
11960     //   ...
11961     BB = exitMBB;
11962   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11963              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11964     // We must use 64-bit registers for addresses when targeting 64-bit,
11965     // since we're actually doing arithmetic on them.  Other registers
11966     // can be 32-bit.
11967     bool is64bit = Subtarget.isPPC64();
11968     bool isLittleEndian = Subtarget.isLittleEndian();
11969     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11970 
11971     Register dest = MI.getOperand(0).getReg();
11972     Register ptrA = MI.getOperand(1).getReg();
11973     Register ptrB = MI.getOperand(2).getReg();
11974     Register oldval = MI.getOperand(3).getReg();
11975     Register newval = MI.getOperand(4).getReg();
11976     DebugLoc dl = MI.getDebugLoc();
11977 
11978     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11979     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11980     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11981     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11982     F->insert(It, loop1MBB);
11983     F->insert(It, loop2MBB);
11984     F->insert(It, midMBB);
11985     F->insert(It, exitMBB);
11986     exitMBB->splice(exitMBB->begin(), BB,
11987                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11988     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11989 
11990     MachineRegisterInfo &RegInfo = F->getRegInfo();
11991     const TargetRegisterClass *RC =
11992         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11993     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11994 
11995     Register PtrReg = RegInfo.createVirtualRegister(RC);
11996     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11997     Register ShiftReg =
11998         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11999     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12000     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12001     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12002     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12003     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12004     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12005     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12006     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12007     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12008     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12009     Register Ptr1Reg;
12010     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12011     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12012     //  thisMBB:
12013     //   ...
12014     //   fallthrough --> loopMBB
12015     BB->addSuccessor(loop1MBB);
12016 
12017     // The 4-byte load must be aligned, while a char or short may be
12018     // anywhere in the word.  Hence all this nasty bookkeeping code.
12019     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12020     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12021     //   xori shift, shift1, 24 [16]
12022     //   rlwinm ptr, ptr1, 0, 0, 29
12023     //   slw newval2, newval, shift
12024     //   slw oldval2, oldval,shift
12025     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12026     //   slw mask, mask2, shift
12027     //   and newval3, newval2, mask
12028     //   and oldval3, oldval2, mask
12029     // loop1MBB:
12030     //   lwarx tmpDest, ptr
12031     //   and tmp, tmpDest, mask
12032     //   cmpw tmp, oldval3
12033     //   bne- midMBB
12034     // loop2MBB:
12035     //   andc tmp2, tmpDest, mask
12036     //   or tmp4, tmp2, newval3
12037     //   stwcx. tmp4, ptr
12038     //   bne- loop1MBB
12039     //   b exitBB
12040     // midMBB:
12041     //   stwcx. tmpDest, ptr
12042     // exitBB:
12043     //   srw dest, tmpDest, shift
12044     if (ptrA != ZeroReg) {
12045       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12046       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12047           .addReg(ptrA)
12048           .addReg(ptrB);
12049     } else {
12050       Ptr1Reg = ptrB;
12051     }
12052 
12053     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12054     // mode.
12055     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12056         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12057         .addImm(3)
12058         .addImm(27)
12059         .addImm(is8bit ? 28 : 27);
12060     if (!isLittleEndian)
12061       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12062           .addReg(Shift1Reg)
12063           .addImm(is8bit ? 24 : 16);
12064     if (is64bit)
12065       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12066           .addReg(Ptr1Reg)
12067           .addImm(0)
12068           .addImm(61);
12069     else
12070       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12071           .addReg(Ptr1Reg)
12072           .addImm(0)
12073           .addImm(0)
12074           .addImm(29);
12075     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12076         .addReg(newval)
12077         .addReg(ShiftReg);
12078     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12079         .addReg(oldval)
12080         .addReg(ShiftReg);
12081     if (is8bit)
12082       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12083     else {
12084       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12085       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12086           .addReg(Mask3Reg)
12087           .addImm(65535);
12088     }
12089     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12090         .addReg(Mask2Reg)
12091         .addReg(ShiftReg);
12092     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12093         .addReg(NewVal2Reg)
12094         .addReg(MaskReg);
12095     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12096         .addReg(OldVal2Reg)
12097         .addReg(MaskReg);
12098 
12099     BB = loop1MBB;
12100     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12101         .addReg(ZeroReg)
12102         .addReg(PtrReg);
12103     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12104         .addReg(TmpDestReg)
12105         .addReg(MaskReg);
12106     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12107         .addReg(TmpReg)
12108         .addReg(OldVal3Reg);
12109     BuildMI(BB, dl, TII->get(PPC::BCC))
12110         .addImm(PPC::PRED_NE)
12111         .addReg(PPC::CR0)
12112         .addMBB(midMBB);
12113     BB->addSuccessor(loop2MBB);
12114     BB->addSuccessor(midMBB);
12115 
12116     BB = loop2MBB;
12117     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12118         .addReg(TmpDestReg)
12119         .addReg(MaskReg);
12120     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12121         .addReg(Tmp2Reg)
12122         .addReg(NewVal3Reg);
12123     BuildMI(BB, dl, TII->get(PPC::STWCX))
12124         .addReg(Tmp4Reg)
12125         .addReg(ZeroReg)
12126         .addReg(PtrReg);
12127     BuildMI(BB, dl, TII->get(PPC::BCC))
12128         .addImm(PPC::PRED_NE)
12129         .addReg(PPC::CR0)
12130         .addMBB(loop1MBB);
12131     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12132     BB->addSuccessor(loop1MBB);
12133     BB->addSuccessor(exitMBB);
12134 
12135     BB = midMBB;
12136     BuildMI(BB, dl, TII->get(PPC::STWCX))
12137         .addReg(TmpDestReg)
12138         .addReg(ZeroReg)
12139         .addReg(PtrReg);
12140     BB->addSuccessor(exitMBB);
12141 
12142     //  exitMBB:
12143     //   ...
12144     BB = exitMBB;
12145     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12146         .addReg(TmpReg)
12147         .addReg(ShiftReg);
12148   } else if (MI.getOpcode() == PPC::FADDrtz) {
12149     // This pseudo performs an FADD with rounding mode temporarily forced
12150     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12151     // is not modeled at the SelectionDAG level.
12152     Register Dest = MI.getOperand(0).getReg();
12153     Register Src1 = MI.getOperand(1).getReg();
12154     Register Src2 = MI.getOperand(2).getReg();
12155     DebugLoc dl = MI.getDebugLoc();
12156 
12157     MachineRegisterInfo &RegInfo = F->getRegInfo();
12158     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12159 
12160     // Save FPSCR value.
12161     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12162 
12163     // Set rounding mode to round-to-zero.
12164     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12165         .addImm(31)
12166         .addReg(PPC::RM, RegState::ImplicitDefine);
12167 
12168     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12169         .addImm(30)
12170         .addReg(PPC::RM, RegState::ImplicitDefine);
12171 
12172     // Perform addition.
12173     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
12174 
12175     // Restore FPSCR value.
12176     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12177   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12178              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12179              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12180              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12181     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12182                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12183                           ? PPC::ANDI8_rec
12184                           : PPC::ANDI_rec;
12185     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12186                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12187 
12188     MachineRegisterInfo &RegInfo = F->getRegInfo();
12189     Register Dest = RegInfo.createVirtualRegister(
12190         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12191 
12192     DebugLoc Dl = MI.getDebugLoc();
12193     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12194         .addReg(MI.getOperand(1).getReg())
12195         .addImm(1);
12196     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12197             MI.getOperand(0).getReg())
12198         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12199   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12200     DebugLoc Dl = MI.getDebugLoc();
12201     MachineRegisterInfo &RegInfo = F->getRegInfo();
12202     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12203     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12204     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12205             MI.getOperand(0).getReg())
12206         .addReg(CRReg);
12207   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12208     DebugLoc Dl = MI.getDebugLoc();
12209     unsigned Imm = MI.getOperand(1).getImm();
12210     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12211     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12212             MI.getOperand(0).getReg())
12213         .addReg(PPC::CR0EQ);
12214   } else if (MI.getOpcode() == PPC::SETRNDi) {
12215     DebugLoc dl = MI.getDebugLoc();
12216     Register OldFPSCRReg = MI.getOperand(0).getReg();
12217 
12218     // Save FPSCR value.
12219     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12220 
12221     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12222     // the following settings:
12223     //   00 Round to nearest
12224     //   01 Round to 0
12225     //   10 Round to +inf
12226     //   11 Round to -inf
12227 
12228     // When the operand is immediate, using the two least significant bits of
12229     // the immediate to set the bits 62:63 of FPSCR.
12230     unsigned Mode = MI.getOperand(1).getImm();
12231     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12232         .addImm(31)
12233         .addReg(PPC::RM, RegState::ImplicitDefine);
12234 
12235     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12236         .addImm(30)
12237         .addReg(PPC::RM, RegState::ImplicitDefine);
12238   } else if (MI.getOpcode() == PPC::SETRND) {
12239     DebugLoc dl = MI.getDebugLoc();
12240 
12241     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12242     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12243     // If the target doesn't have DirectMove, we should use stack to do the
12244     // conversion, because the target doesn't have the instructions like mtvsrd
12245     // or mfvsrd to do this conversion directly.
12246     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12247       if (Subtarget.hasDirectMove()) {
12248         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12249           .addReg(SrcReg);
12250       } else {
12251         // Use stack to do the register copy.
12252         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12253         MachineRegisterInfo &RegInfo = F->getRegInfo();
12254         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12255         if (RC == &PPC::F8RCRegClass) {
12256           // Copy register from F8RCRegClass to G8RCRegclass.
12257           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12258                  "Unsupported RegClass.");
12259 
12260           StoreOp = PPC::STFD;
12261           LoadOp = PPC::LD;
12262         } else {
12263           // Copy register from G8RCRegClass to F8RCRegclass.
12264           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12265                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12266                  "Unsupported RegClass.");
12267         }
12268 
12269         MachineFrameInfo &MFI = F->getFrameInfo();
12270         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12271 
12272         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12273             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12274             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12275             MFI.getObjectAlign(FrameIdx));
12276 
12277         // Store the SrcReg into the stack.
12278         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12279           .addReg(SrcReg)
12280           .addImm(0)
12281           .addFrameIndex(FrameIdx)
12282           .addMemOperand(MMOStore);
12283 
12284         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12285             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12286             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12287             MFI.getObjectAlign(FrameIdx));
12288 
12289         // Load from the stack where SrcReg is stored, and save to DestReg,
12290         // so we have done the RegClass conversion from RegClass::SrcReg to
12291         // RegClass::DestReg.
12292         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12293           .addImm(0)
12294           .addFrameIndex(FrameIdx)
12295           .addMemOperand(MMOLoad);
12296       }
12297     };
12298 
12299     Register OldFPSCRReg = MI.getOperand(0).getReg();
12300 
12301     // Save FPSCR value.
12302     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12303 
12304     // When the operand is gprc register, use two least significant bits of the
12305     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12306     //
12307     // copy OldFPSCRTmpReg, OldFPSCRReg
12308     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12309     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12310     // copy NewFPSCRReg, NewFPSCRTmpReg
12311     // mtfsf 255, NewFPSCRReg
12312     MachineOperand SrcOp = MI.getOperand(1);
12313     MachineRegisterInfo &RegInfo = F->getRegInfo();
12314     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12315 
12316     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12317 
12318     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12319     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12320 
12321     // The first operand of INSERT_SUBREG should be a register which has
12322     // subregisters, we only care about its RegClass, so we should use an
12323     // IMPLICIT_DEF register.
12324     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12325     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12326       .addReg(ImDefReg)
12327       .add(SrcOp)
12328       .addImm(1);
12329 
12330     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12331     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12332       .addReg(OldFPSCRTmpReg)
12333       .addReg(ExtSrcReg)
12334       .addImm(0)
12335       .addImm(62);
12336 
12337     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12338     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12339 
12340     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12341     // bits of FPSCR.
12342     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12343       .addImm(255)
12344       .addReg(NewFPSCRReg)
12345       .addImm(0)
12346       .addImm(0);
12347   } else if (MI.getOpcode() == PPC::SETFLM) {
12348     DebugLoc Dl = MI.getDebugLoc();
12349 
12350     // Result of setflm is previous FPSCR content, so we need to save it first.
12351     Register OldFPSCRReg = MI.getOperand(0).getReg();
12352     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12353 
12354     // Put bits in 32:63 to FPSCR.
12355     Register NewFPSCRReg = MI.getOperand(1).getReg();
12356     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12357         .addImm(255)
12358         .addReg(NewFPSCRReg)
12359         .addImm(0)
12360         .addImm(0);
12361   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12362              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12363     return emitProbedAlloca(MI, BB);
12364   } else {
12365     llvm_unreachable("Unexpected instr type to insert");
12366   }
12367 
12368   MI.eraseFromParent(); // The pseudo instruction is gone now.
12369   return BB;
12370 }
12371 
12372 //===----------------------------------------------------------------------===//
12373 // Target Optimization Hooks
12374 //===----------------------------------------------------------------------===//
12375 
12376 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12377   // For the estimates, convergence is quadratic, so we essentially double the
12378   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12379   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12380   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12381   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12382   if (VT.getScalarType() == MVT::f64)
12383     RefinementSteps++;
12384   return RefinementSteps;
12385 }
12386 
12387 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12388                                            int Enabled, int &RefinementSteps,
12389                                            bool &UseOneConstNR,
12390                                            bool Reciprocal) const {
12391   EVT VT = Operand.getValueType();
12392   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12393       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12394       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12395       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12396     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12397       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12398 
12399     // The Newton-Raphson computation with a single constant does not provide
12400     // enough accuracy on some CPUs.
12401     UseOneConstNR = !Subtarget.needsTwoConstNR();
12402     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12403   }
12404   return SDValue();
12405 }
12406 
12407 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12408                                             int Enabled,
12409                                             int &RefinementSteps) const {
12410   EVT VT = Operand.getValueType();
12411   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12412       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12413       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12414       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12415     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12416       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12417     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12418   }
12419   return SDValue();
12420 }
12421 
12422 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12423   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12424   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12425   // enabled for division), this functionality is redundant with the default
12426   // combiner logic (once the division -> reciprocal/multiply transformation
12427   // has taken place). As a result, this matters more for older cores than for
12428   // newer ones.
12429 
12430   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12431   // reciprocal if there are two or more FDIVs (for embedded cores with only
12432   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12433   switch (Subtarget.getCPUDirective()) {
12434   default:
12435     return 3;
12436   case PPC::DIR_440:
12437   case PPC::DIR_A2:
12438   case PPC::DIR_E500:
12439   case PPC::DIR_E500mc:
12440   case PPC::DIR_E5500:
12441     return 2;
12442   }
12443 }
12444 
12445 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12446 // collapsed, and so we need to look through chains of them.
12447 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12448                                      int64_t& Offset, SelectionDAG &DAG) {
12449   if (DAG.isBaseWithConstantOffset(Loc)) {
12450     Base = Loc.getOperand(0);
12451     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12452 
12453     // The base might itself be a base plus an offset, and if so, accumulate
12454     // that as well.
12455     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12456   }
12457 }
12458 
12459 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12460                             unsigned Bytes, int Dist,
12461                             SelectionDAG &DAG) {
12462   if (VT.getSizeInBits() / 8 != Bytes)
12463     return false;
12464 
12465   SDValue BaseLoc = Base->getBasePtr();
12466   if (Loc.getOpcode() == ISD::FrameIndex) {
12467     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12468       return false;
12469     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12470     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12471     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12472     int FS  = MFI.getObjectSize(FI);
12473     int BFS = MFI.getObjectSize(BFI);
12474     if (FS != BFS || FS != (int)Bytes) return false;
12475     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12476   }
12477 
12478   SDValue Base1 = Loc, Base2 = BaseLoc;
12479   int64_t Offset1 = 0, Offset2 = 0;
12480   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12481   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12482   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12483     return true;
12484 
12485   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12486   const GlobalValue *GV1 = nullptr;
12487   const GlobalValue *GV2 = nullptr;
12488   Offset1 = 0;
12489   Offset2 = 0;
12490   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12491   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12492   if (isGA1 && isGA2 && GV1 == GV2)
12493     return Offset1 == (Offset2 + Dist*Bytes);
12494   return false;
12495 }
12496 
12497 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12498 // not enforce equality of the chain operands.
12499 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12500                             unsigned Bytes, int Dist,
12501                             SelectionDAG &DAG) {
12502   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12503     EVT VT = LS->getMemoryVT();
12504     SDValue Loc = LS->getBasePtr();
12505     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12506   }
12507 
12508   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12509     EVT VT;
12510     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12511     default: return false;
12512     case Intrinsic::ppc_altivec_lvx:
12513     case Intrinsic::ppc_altivec_lvxl:
12514     case Intrinsic::ppc_vsx_lxvw4x:
12515     case Intrinsic::ppc_vsx_lxvw4x_be:
12516       VT = MVT::v4i32;
12517       break;
12518     case Intrinsic::ppc_vsx_lxvd2x:
12519     case Intrinsic::ppc_vsx_lxvd2x_be:
12520       VT = MVT::v2f64;
12521       break;
12522     case Intrinsic::ppc_altivec_lvebx:
12523       VT = MVT::i8;
12524       break;
12525     case Intrinsic::ppc_altivec_lvehx:
12526       VT = MVT::i16;
12527       break;
12528     case Intrinsic::ppc_altivec_lvewx:
12529       VT = MVT::i32;
12530       break;
12531     }
12532 
12533     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12534   }
12535 
12536   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12537     EVT VT;
12538     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12539     default: return false;
12540     case Intrinsic::ppc_altivec_stvx:
12541     case Intrinsic::ppc_altivec_stvxl:
12542     case Intrinsic::ppc_vsx_stxvw4x:
12543       VT = MVT::v4i32;
12544       break;
12545     case Intrinsic::ppc_vsx_stxvd2x:
12546       VT = MVT::v2f64;
12547       break;
12548     case Intrinsic::ppc_vsx_stxvw4x_be:
12549       VT = MVT::v4i32;
12550       break;
12551     case Intrinsic::ppc_vsx_stxvd2x_be:
12552       VT = MVT::v2f64;
12553       break;
12554     case Intrinsic::ppc_altivec_stvebx:
12555       VT = MVT::i8;
12556       break;
12557     case Intrinsic::ppc_altivec_stvehx:
12558       VT = MVT::i16;
12559       break;
12560     case Intrinsic::ppc_altivec_stvewx:
12561       VT = MVT::i32;
12562       break;
12563     }
12564 
12565     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12566   }
12567 
12568   return false;
12569 }
12570 
12571 // Return true is there is a nearyby consecutive load to the one provided
12572 // (regardless of alignment). We search up and down the chain, looking though
12573 // token factors and other loads (but nothing else). As a result, a true result
12574 // indicates that it is safe to create a new consecutive load adjacent to the
12575 // load provided.
12576 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12577   SDValue Chain = LD->getChain();
12578   EVT VT = LD->getMemoryVT();
12579 
12580   SmallSet<SDNode *, 16> LoadRoots;
12581   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12582   SmallSet<SDNode *, 16> Visited;
12583 
12584   // First, search up the chain, branching to follow all token-factor operands.
12585   // If we find a consecutive load, then we're done, otherwise, record all
12586   // nodes just above the top-level loads and token factors.
12587   while (!Queue.empty()) {
12588     SDNode *ChainNext = Queue.pop_back_val();
12589     if (!Visited.insert(ChainNext).second)
12590       continue;
12591 
12592     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12593       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12594         return true;
12595 
12596       if (!Visited.count(ChainLD->getChain().getNode()))
12597         Queue.push_back(ChainLD->getChain().getNode());
12598     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12599       for (const SDUse &O : ChainNext->ops())
12600         if (!Visited.count(O.getNode()))
12601           Queue.push_back(O.getNode());
12602     } else
12603       LoadRoots.insert(ChainNext);
12604   }
12605 
12606   // Second, search down the chain, starting from the top-level nodes recorded
12607   // in the first phase. These top-level nodes are the nodes just above all
12608   // loads and token factors. Starting with their uses, recursively look though
12609   // all loads (just the chain uses) and token factors to find a consecutive
12610   // load.
12611   Visited.clear();
12612   Queue.clear();
12613 
12614   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12615        IE = LoadRoots.end(); I != IE; ++I) {
12616     Queue.push_back(*I);
12617 
12618     while (!Queue.empty()) {
12619       SDNode *LoadRoot = Queue.pop_back_val();
12620       if (!Visited.insert(LoadRoot).second)
12621         continue;
12622 
12623       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12624         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12625           return true;
12626 
12627       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12628            UE = LoadRoot->use_end(); UI != UE; ++UI)
12629         if (((isa<MemSDNode>(*UI) &&
12630             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12631             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12632           Queue.push_back(*UI);
12633     }
12634   }
12635 
12636   return false;
12637 }
12638 
12639 /// This function is called when we have proved that a SETCC node can be replaced
12640 /// by subtraction (and other supporting instructions) so that the result of
12641 /// comparison is kept in a GPR instead of CR. This function is purely for
12642 /// codegen purposes and has some flags to guide the codegen process.
12643 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12644                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12645   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12646 
12647   // Zero extend the operands to the largest legal integer. Originally, they
12648   // must be of a strictly smaller size.
12649   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12650                          DAG.getConstant(Size, DL, MVT::i32));
12651   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12652                          DAG.getConstant(Size, DL, MVT::i32));
12653 
12654   // Swap if needed. Depends on the condition code.
12655   if (Swap)
12656     std::swap(Op0, Op1);
12657 
12658   // Subtract extended integers.
12659   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12660 
12661   // Move the sign bit to the least significant position and zero out the rest.
12662   // Now the least significant bit carries the result of original comparison.
12663   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12664                              DAG.getConstant(Size - 1, DL, MVT::i32));
12665   auto Final = Shifted;
12666 
12667   // Complement the result if needed. Based on the condition code.
12668   if (Complement)
12669     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12670                         DAG.getConstant(1, DL, MVT::i64));
12671 
12672   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12673 }
12674 
12675 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12676                                                   DAGCombinerInfo &DCI) const {
12677   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12678 
12679   SelectionDAG &DAG = DCI.DAG;
12680   SDLoc DL(N);
12681 
12682   // Size of integers being compared has a critical role in the following
12683   // analysis, so we prefer to do this when all types are legal.
12684   if (!DCI.isAfterLegalizeDAG())
12685     return SDValue();
12686 
12687   // If all users of SETCC extend its value to a legal integer type
12688   // then we replace SETCC with a subtraction
12689   for (SDNode::use_iterator UI = N->use_begin(),
12690        UE = N->use_end(); UI != UE; ++UI) {
12691     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12692       return SDValue();
12693   }
12694 
12695   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12696   auto OpSize = N->getOperand(0).getValueSizeInBits();
12697 
12698   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12699 
12700   if (OpSize < Size) {
12701     switch (CC) {
12702     default: break;
12703     case ISD::SETULT:
12704       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12705     case ISD::SETULE:
12706       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12707     case ISD::SETUGT:
12708       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12709     case ISD::SETUGE:
12710       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12711     }
12712   }
12713 
12714   return SDValue();
12715 }
12716 
12717 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12718                                                   DAGCombinerInfo &DCI) const {
12719   SelectionDAG &DAG = DCI.DAG;
12720   SDLoc dl(N);
12721 
12722   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12723   // If we're tracking CR bits, we need to be careful that we don't have:
12724   //   trunc(binary-ops(zext(x), zext(y)))
12725   // or
12726   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12727   // such that we're unnecessarily moving things into GPRs when it would be
12728   // better to keep them in CR bits.
12729 
12730   // Note that trunc here can be an actual i1 trunc, or can be the effective
12731   // truncation that comes from a setcc or select_cc.
12732   if (N->getOpcode() == ISD::TRUNCATE &&
12733       N->getValueType(0) != MVT::i1)
12734     return SDValue();
12735 
12736   if (N->getOperand(0).getValueType() != MVT::i32 &&
12737       N->getOperand(0).getValueType() != MVT::i64)
12738     return SDValue();
12739 
12740   if (N->getOpcode() == ISD::SETCC ||
12741       N->getOpcode() == ISD::SELECT_CC) {
12742     // If we're looking at a comparison, then we need to make sure that the
12743     // high bits (all except for the first) don't matter the result.
12744     ISD::CondCode CC =
12745       cast<CondCodeSDNode>(N->getOperand(
12746         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12747     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12748 
12749     if (ISD::isSignedIntSetCC(CC)) {
12750       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12751           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12752         return SDValue();
12753     } else if (ISD::isUnsignedIntSetCC(CC)) {
12754       if (!DAG.MaskedValueIsZero(N->getOperand(0),
12755                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12756           !DAG.MaskedValueIsZero(N->getOperand(1),
12757                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
12758         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12759                                              : SDValue());
12760     } else {
12761       // This is neither a signed nor an unsigned comparison, just make sure
12762       // that the high bits are equal.
12763       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12764       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12765 
12766       // We don't really care about what is known about the first bit (if
12767       // anything), so clear it in all masks prior to comparing them.
12768       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12769       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12770 
12771       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12772         return SDValue();
12773     }
12774   }
12775 
12776   // We now know that the higher-order bits are irrelevant, we just need to
12777   // make sure that all of the intermediate operations are bit operations, and
12778   // all inputs are extensions.
12779   if (N->getOperand(0).getOpcode() != ISD::AND &&
12780       N->getOperand(0).getOpcode() != ISD::OR  &&
12781       N->getOperand(0).getOpcode() != ISD::XOR &&
12782       N->getOperand(0).getOpcode() != ISD::SELECT &&
12783       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12784       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12785       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12786       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12787       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12788     return SDValue();
12789 
12790   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12791       N->getOperand(1).getOpcode() != ISD::AND &&
12792       N->getOperand(1).getOpcode() != ISD::OR  &&
12793       N->getOperand(1).getOpcode() != ISD::XOR &&
12794       N->getOperand(1).getOpcode() != ISD::SELECT &&
12795       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12796       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12797       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12798       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12799       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12800     return SDValue();
12801 
12802   SmallVector<SDValue, 4> Inputs;
12803   SmallVector<SDValue, 8> BinOps, PromOps;
12804   SmallPtrSet<SDNode *, 16> Visited;
12805 
12806   for (unsigned i = 0; i < 2; ++i) {
12807     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12808           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12809           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12810           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12811         isa<ConstantSDNode>(N->getOperand(i)))
12812       Inputs.push_back(N->getOperand(i));
12813     else
12814       BinOps.push_back(N->getOperand(i));
12815 
12816     if (N->getOpcode() == ISD::TRUNCATE)
12817       break;
12818   }
12819 
12820   // Visit all inputs, collect all binary operations (and, or, xor and
12821   // select) that are all fed by extensions.
12822   while (!BinOps.empty()) {
12823     SDValue BinOp = BinOps.back();
12824     BinOps.pop_back();
12825 
12826     if (!Visited.insert(BinOp.getNode()).second)
12827       continue;
12828 
12829     PromOps.push_back(BinOp);
12830 
12831     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12832       // The condition of the select is not promoted.
12833       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12834         continue;
12835       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12836         continue;
12837 
12838       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12839             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12840             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12841            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12842           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12843         Inputs.push_back(BinOp.getOperand(i));
12844       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12845                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12846                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12847                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12848                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12849                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12850                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12851                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12852                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12853         BinOps.push_back(BinOp.getOperand(i));
12854       } else {
12855         // We have an input that is not an extension or another binary
12856         // operation; we'll abort this transformation.
12857         return SDValue();
12858       }
12859     }
12860   }
12861 
12862   // Make sure that this is a self-contained cluster of operations (which
12863   // is not quite the same thing as saying that everything has only one
12864   // use).
12865   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12866     if (isa<ConstantSDNode>(Inputs[i]))
12867       continue;
12868 
12869     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12870                               UE = Inputs[i].getNode()->use_end();
12871          UI != UE; ++UI) {
12872       SDNode *User = *UI;
12873       if (User != N && !Visited.count(User))
12874         return SDValue();
12875 
12876       // Make sure that we're not going to promote the non-output-value
12877       // operand(s) or SELECT or SELECT_CC.
12878       // FIXME: Although we could sometimes handle this, and it does occur in
12879       // practice that one of the condition inputs to the select is also one of
12880       // the outputs, we currently can't deal with this.
12881       if (User->getOpcode() == ISD::SELECT) {
12882         if (User->getOperand(0) == Inputs[i])
12883           return SDValue();
12884       } else if (User->getOpcode() == ISD::SELECT_CC) {
12885         if (User->getOperand(0) == Inputs[i] ||
12886             User->getOperand(1) == Inputs[i])
12887           return SDValue();
12888       }
12889     }
12890   }
12891 
12892   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12893     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12894                               UE = PromOps[i].getNode()->use_end();
12895          UI != UE; ++UI) {
12896       SDNode *User = *UI;
12897       if (User != N && !Visited.count(User))
12898         return SDValue();
12899 
12900       // Make sure that we're not going to promote the non-output-value
12901       // operand(s) or SELECT or SELECT_CC.
12902       // FIXME: Although we could sometimes handle this, and it does occur in
12903       // practice that one of the condition inputs to the select is also one of
12904       // the outputs, we currently can't deal with this.
12905       if (User->getOpcode() == ISD::SELECT) {
12906         if (User->getOperand(0) == PromOps[i])
12907           return SDValue();
12908       } else if (User->getOpcode() == ISD::SELECT_CC) {
12909         if (User->getOperand(0) == PromOps[i] ||
12910             User->getOperand(1) == PromOps[i])
12911           return SDValue();
12912       }
12913     }
12914   }
12915 
12916   // Replace all inputs with the extension operand.
12917   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12918     // Constants may have users outside the cluster of to-be-promoted nodes,
12919     // and so we need to replace those as we do the promotions.
12920     if (isa<ConstantSDNode>(Inputs[i]))
12921       continue;
12922     else
12923       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12924   }
12925 
12926   std::list<HandleSDNode> PromOpHandles;
12927   for (auto &PromOp : PromOps)
12928     PromOpHandles.emplace_back(PromOp);
12929 
12930   // Replace all operations (these are all the same, but have a different
12931   // (i1) return type). DAG.getNode will validate that the types of
12932   // a binary operator match, so go through the list in reverse so that
12933   // we've likely promoted both operands first. Any intermediate truncations or
12934   // extensions disappear.
12935   while (!PromOpHandles.empty()) {
12936     SDValue PromOp = PromOpHandles.back().getValue();
12937     PromOpHandles.pop_back();
12938 
12939     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12940         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12941         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12942         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12943       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12944           PromOp.getOperand(0).getValueType() != MVT::i1) {
12945         // The operand is not yet ready (see comment below).
12946         PromOpHandles.emplace_front(PromOp);
12947         continue;
12948       }
12949 
12950       SDValue RepValue = PromOp.getOperand(0);
12951       if (isa<ConstantSDNode>(RepValue))
12952         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12953 
12954       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12955       continue;
12956     }
12957 
12958     unsigned C;
12959     switch (PromOp.getOpcode()) {
12960     default:             C = 0; break;
12961     case ISD::SELECT:    C = 1; break;
12962     case ISD::SELECT_CC: C = 2; break;
12963     }
12964 
12965     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12966          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12967         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12968          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12969       // The to-be-promoted operands of this node have not yet been
12970       // promoted (this should be rare because we're going through the
12971       // list backward, but if one of the operands has several users in
12972       // this cluster of to-be-promoted nodes, it is possible).
12973       PromOpHandles.emplace_front(PromOp);
12974       continue;
12975     }
12976 
12977     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12978                                 PromOp.getNode()->op_end());
12979 
12980     // If there are any constant inputs, make sure they're replaced now.
12981     for (unsigned i = 0; i < 2; ++i)
12982       if (isa<ConstantSDNode>(Ops[C+i]))
12983         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12984 
12985     DAG.ReplaceAllUsesOfValueWith(PromOp,
12986       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12987   }
12988 
12989   // Now we're left with the initial truncation itself.
12990   if (N->getOpcode() == ISD::TRUNCATE)
12991     return N->getOperand(0);
12992 
12993   // Otherwise, this is a comparison. The operands to be compared have just
12994   // changed type (to i1), but everything else is the same.
12995   return SDValue(N, 0);
12996 }
12997 
12998 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12999                                                   DAGCombinerInfo &DCI) const {
13000   SelectionDAG &DAG = DCI.DAG;
13001   SDLoc dl(N);
13002 
13003   // If we're tracking CR bits, we need to be careful that we don't have:
13004   //   zext(binary-ops(trunc(x), trunc(y)))
13005   // or
13006   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13007   // such that we're unnecessarily moving things into CR bits that can more
13008   // efficiently stay in GPRs. Note that if we're not certain that the high
13009   // bits are set as required by the final extension, we still may need to do
13010   // some masking to get the proper behavior.
13011 
13012   // This same functionality is important on PPC64 when dealing with
13013   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13014   // the return values of functions. Because it is so similar, it is handled
13015   // here as well.
13016 
13017   if (N->getValueType(0) != MVT::i32 &&
13018       N->getValueType(0) != MVT::i64)
13019     return SDValue();
13020 
13021   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13022         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13023     return SDValue();
13024 
13025   if (N->getOperand(0).getOpcode() != ISD::AND &&
13026       N->getOperand(0).getOpcode() != ISD::OR  &&
13027       N->getOperand(0).getOpcode() != ISD::XOR &&
13028       N->getOperand(0).getOpcode() != ISD::SELECT &&
13029       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13030     return SDValue();
13031 
13032   SmallVector<SDValue, 4> Inputs;
13033   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13034   SmallPtrSet<SDNode *, 16> Visited;
13035 
13036   // Visit all inputs, collect all binary operations (and, or, xor and
13037   // select) that are all fed by truncations.
13038   while (!BinOps.empty()) {
13039     SDValue BinOp = BinOps.back();
13040     BinOps.pop_back();
13041 
13042     if (!Visited.insert(BinOp.getNode()).second)
13043       continue;
13044 
13045     PromOps.push_back(BinOp);
13046 
13047     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13048       // The condition of the select is not promoted.
13049       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13050         continue;
13051       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13052         continue;
13053 
13054       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13055           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13056         Inputs.push_back(BinOp.getOperand(i));
13057       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13058                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13059                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13060                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13061                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13062         BinOps.push_back(BinOp.getOperand(i));
13063       } else {
13064         // We have an input that is not a truncation or another binary
13065         // operation; we'll abort this transformation.
13066         return SDValue();
13067       }
13068     }
13069   }
13070 
13071   // The operands of a select that must be truncated when the select is
13072   // promoted because the operand is actually part of the to-be-promoted set.
13073   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13074 
13075   // Make sure that this is a self-contained cluster of operations (which
13076   // is not quite the same thing as saying that everything has only one
13077   // use).
13078   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13079     if (isa<ConstantSDNode>(Inputs[i]))
13080       continue;
13081 
13082     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13083                               UE = Inputs[i].getNode()->use_end();
13084          UI != UE; ++UI) {
13085       SDNode *User = *UI;
13086       if (User != N && !Visited.count(User))
13087         return SDValue();
13088 
13089       // If we're going to promote the non-output-value operand(s) or SELECT or
13090       // SELECT_CC, record them for truncation.
13091       if (User->getOpcode() == ISD::SELECT) {
13092         if (User->getOperand(0) == Inputs[i])
13093           SelectTruncOp[0].insert(std::make_pair(User,
13094                                     User->getOperand(0).getValueType()));
13095       } else if (User->getOpcode() == ISD::SELECT_CC) {
13096         if (User->getOperand(0) == Inputs[i])
13097           SelectTruncOp[0].insert(std::make_pair(User,
13098                                     User->getOperand(0).getValueType()));
13099         if (User->getOperand(1) == Inputs[i])
13100           SelectTruncOp[1].insert(std::make_pair(User,
13101                                     User->getOperand(1).getValueType()));
13102       }
13103     }
13104   }
13105 
13106   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13107     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13108                               UE = PromOps[i].getNode()->use_end();
13109          UI != UE; ++UI) {
13110       SDNode *User = *UI;
13111       if (User != N && !Visited.count(User))
13112         return SDValue();
13113 
13114       // If we're going to promote the non-output-value operand(s) or SELECT or
13115       // SELECT_CC, record them for truncation.
13116       if (User->getOpcode() == ISD::SELECT) {
13117         if (User->getOperand(0) == PromOps[i])
13118           SelectTruncOp[0].insert(std::make_pair(User,
13119                                     User->getOperand(0).getValueType()));
13120       } else if (User->getOpcode() == ISD::SELECT_CC) {
13121         if (User->getOperand(0) == PromOps[i])
13122           SelectTruncOp[0].insert(std::make_pair(User,
13123                                     User->getOperand(0).getValueType()));
13124         if (User->getOperand(1) == PromOps[i])
13125           SelectTruncOp[1].insert(std::make_pair(User,
13126                                     User->getOperand(1).getValueType()));
13127       }
13128     }
13129   }
13130 
13131   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13132   bool ReallyNeedsExt = false;
13133   if (N->getOpcode() != ISD::ANY_EXTEND) {
13134     // If all of the inputs are not already sign/zero extended, then
13135     // we'll still need to do that at the end.
13136     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13137       if (isa<ConstantSDNode>(Inputs[i]))
13138         continue;
13139 
13140       unsigned OpBits =
13141         Inputs[i].getOperand(0).getValueSizeInBits();
13142       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13143 
13144       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13145            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13146                                   APInt::getHighBitsSet(OpBits,
13147                                                         OpBits-PromBits))) ||
13148           (N->getOpcode() == ISD::SIGN_EXTEND &&
13149            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13150              (OpBits-(PromBits-1)))) {
13151         ReallyNeedsExt = true;
13152         break;
13153       }
13154     }
13155   }
13156 
13157   // Replace all inputs, either with the truncation operand, or a
13158   // truncation or extension to the final output type.
13159   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13160     // Constant inputs need to be replaced with the to-be-promoted nodes that
13161     // use them because they might have users outside of the cluster of
13162     // promoted nodes.
13163     if (isa<ConstantSDNode>(Inputs[i]))
13164       continue;
13165 
13166     SDValue InSrc = Inputs[i].getOperand(0);
13167     if (Inputs[i].getValueType() == N->getValueType(0))
13168       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13169     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13170       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13171         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13172     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13173       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13174         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13175     else
13176       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13177         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13178   }
13179 
13180   std::list<HandleSDNode> PromOpHandles;
13181   for (auto &PromOp : PromOps)
13182     PromOpHandles.emplace_back(PromOp);
13183 
13184   // Replace all operations (these are all the same, but have a different
13185   // (promoted) return type). DAG.getNode will validate that the types of
13186   // a binary operator match, so go through the list in reverse so that
13187   // we've likely promoted both operands first.
13188   while (!PromOpHandles.empty()) {
13189     SDValue PromOp = PromOpHandles.back().getValue();
13190     PromOpHandles.pop_back();
13191 
13192     unsigned C;
13193     switch (PromOp.getOpcode()) {
13194     default:             C = 0; break;
13195     case ISD::SELECT:    C = 1; break;
13196     case ISD::SELECT_CC: C = 2; break;
13197     }
13198 
13199     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13200          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13201         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13202          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13203       // The to-be-promoted operands of this node have not yet been
13204       // promoted (this should be rare because we're going through the
13205       // list backward, but if one of the operands has several users in
13206       // this cluster of to-be-promoted nodes, it is possible).
13207       PromOpHandles.emplace_front(PromOp);
13208       continue;
13209     }
13210 
13211     // For SELECT and SELECT_CC nodes, we do a similar check for any
13212     // to-be-promoted comparison inputs.
13213     if (PromOp.getOpcode() == ISD::SELECT ||
13214         PromOp.getOpcode() == ISD::SELECT_CC) {
13215       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13216            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13217           (SelectTruncOp[1].count(PromOp.getNode()) &&
13218            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13219         PromOpHandles.emplace_front(PromOp);
13220         continue;
13221       }
13222     }
13223 
13224     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13225                                 PromOp.getNode()->op_end());
13226 
13227     // If this node has constant inputs, then they'll need to be promoted here.
13228     for (unsigned i = 0; i < 2; ++i) {
13229       if (!isa<ConstantSDNode>(Ops[C+i]))
13230         continue;
13231       if (Ops[C+i].getValueType() == N->getValueType(0))
13232         continue;
13233 
13234       if (N->getOpcode() == ISD::SIGN_EXTEND)
13235         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13236       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13237         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13238       else
13239         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13240     }
13241 
13242     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13243     // truncate them again to the original value type.
13244     if (PromOp.getOpcode() == ISD::SELECT ||
13245         PromOp.getOpcode() == ISD::SELECT_CC) {
13246       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13247       if (SI0 != SelectTruncOp[0].end())
13248         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13249       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13250       if (SI1 != SelectTruncOp[1].end())
13251         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13252     }
13253 
13254     DAG.ReplaceAllUsesOfValueWith(PromOp,
13255       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13256   }
13257 
13258   // Now we're left with the initial extension itself.
13259   if (!ReallyNeedsExt)
13260     return N->getOperand(0);
13261 
13262   // To zero extend, just mask off everything except for the first bit (in the
13263   // i1 case).
13264   if (N->getOpcode() == ISD::ZERO_EXTEND)
13265     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13266                        DAG.getConstant(APInt::getLowBitsSet(
13267                                          N->getValueSizeInBits(0), PromBits),
13268                                        dl, N->getValueType(0)));
13269 
13270   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13271          "Invalid extension type");
13272   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13273   SDValue ShiftCst =
13274       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13275   return DAG.getNode(
13276       ISD::SRA, dl, N->getValueType(0),
13277       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13278       ShiftCst);
13279 }
13280 
13281 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13282                                         DAGCombinerInfo &DCI) const {
13283   assert(N->getOpcode() == ISD::SETCC &&
13284          "Should be called with a SETCC node");
13285 
13286   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13287   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13288     SDValue LHS = N->getOperand(0);
13289     SDValue RHS = N->getOperand(1);
13290 
13291     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13292     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13293         LHS.hasOneUse())
13294       std::swap(LHS, RHS);
13295 
13296     // x == 0-y --> x+y == 0
13297     // x != 0-y --> x+y != 0
13298     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13299         RHS.hasOneUse()) {
13300       SDLoc DL(N);
13301       SelectionDAG &DAG = DCI.DAG;
13302       EVT VT = N->getValueType(0);
13303       EVT OpVT = LHS.getValueType();
13304       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13305       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13306     }
13307   }
13308 
13309   return DAGCombineTruncBoolExt(N, DCI);
13310 }
13311 
13312 // Is this an extending load from an f32 to an f64?
13313 static bool isFPExtLoad(SDValue Op) {
13314   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13315     return LD->getExtensionType() == ISD::EXTLOAD &&
13316       Op.getValueType() == MVT::f64;
13317   return false;
13318 }
13319 
13320 /// Reduces the number of fp-to-int conversion when building a vector.
13321 ///
13322 /// If this vector is built out of floating to integer conversions,
13323 /// transform it to a vector built out of floating point values followed by a
13324 /// single floating to integer conversion of the vector.
13325 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13326 /// becomes (fptosi (build_vector ($A, $B, ...)))
13327 SDValue PPCTargetLowering::
13328 combineElementTruncationToVectorTruncation(SDNode *N,
13329                                            DAGCombinerInfo &DCI) const {
13330   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13331          "Should be called with a BUILD_VECTOR node");
13332 
13333   SelectionDAG &DAG = DCI.DAG;
13334   SDLoc dl(N);
13335 
13336   SDValue FirstInput = N->getOperand(0);
13337   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13338          "The input operand must be an fp-to-int conversion.");
13339 
13340   // This combine happens after legalization so the fp_to_[su]i nodes are
13341   // already converted to PPCSISD nodes.
13342   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13343   if (FirstConversion == PPCISD::FCTIDZ ||
13344       FirstConversion == PPCISD::FCTIDUZ ||
13345       FirstConversion == PPCISD::FCTIWZ ||
13346       FirstConversion == PPCISD::FCTIWUZ) {
13347     bool IsSplat = true;
13348     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13349       FirstConversion == PPCISD::FCTIWUZ;
13350     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13351     SmallVector<SDValue, 4> Ops;
13352     EVT TargetVT = N->getValueType(0);
13353     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13354       SDValue NextOp = N->getOperand(i);
13355       if (NextOp.getOpcode() != PPCISD::MFVSR)
13356         return SDValue();
13357       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13358       if (NextConversion != FirstConversion)
13359         return SDValue();
13360       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13361       // This is not valid if the input was originally double precision. It is
13362       // also not profitable to do unless this is an extending load in which
13363       // case doing this combine will allow us to combine consecutive loads.
13364       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13365         return SDValue();
13366       if (N->getOperand(i) != FirstInput)
13367         IsSplat = false;
13368     }
13369 
13370     // If this is a splat, we leave it as-is since there will be only a single
13371     // fp-to-int conversion followed by a splat of the integer. This is better
13372     // for 32-bit and smaller ints and neutral for 64-bit ints.
13373     if (IsSplat)
13374       return SDValue();
13375 
13376     // Now that we know we have the right type of node, get its operands
13377     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13378       SDValue In = N->getOperand(i).getOperand(0);
13379       if (Is32Bit) {
13380         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13381         // here, we know that all inputs are extending loads so this is safe).
13382         if (In.isUndef())
13383           Ops.push_back(DAG.getUNDEF(SrcVT));
13384         else {
13385           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13386                                       MVT::f32, In.getOperand(0),
13387                                       DAG.getIntPtrConstant(1, dl));
13388           Ops.push_back(Trunc);
13389         }
13390       } else
13391         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13392     }
13393 
13394     unsigned Opcode;
13395     if (FirstConversion == PPCISD::FCTIDZ ||
13396         FirstConversion == PPCISD::FCTIWZ)
13397       Opcode = ISD::FP_TO_SINT;
13398     else
13399       Opcode = ISD::FP_TO_UINT;
13400 
13401     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13402     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13403     return DAG.getNode(Opcode, dl, TargetVT, BV);
13404   }
13405   return SDValue();
13406 }
13407 
13408 /// Reduce the number of loads when building a vector.
13409 ///
13410 /// Building a vector out of multiple loads can be converted to a load
13411 /// of the vector type if the loads are consecutive. If the loads are
13412 /// consecutive but in descending order, a shuffle is added at the end
13413 /// to reorder the vector.
13414 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13415   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13416          "Should be called with a BUILD_VECTOR node");
13417 
13418   SDLoc dl(N);
13419 
13420   // Return early for non byte-sized type, as they can't be consecutive.
13421   if (!N->getValueType(0).getVectorElementType().isByteSized())
13422     return SDValue();
13423 
13424   bool InputsAreConsecutiveLoads = true;
13425   bool InputsAreReverseConsecutive = true;
13426   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13427   SDValue FirstInput = N->getOperand(0);
13428   bool IsRoundOfExtLoad = false;
13429 
13430   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13431       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13432     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13433     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13434   }
13435   // Not a build vector of (possibly fp_rounded) loads.
13436   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13437       N->getNumOperands() == 1)
13438     return SDValue();
13439 
13440   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13441     // If any inputs are fp_round(extload), they all must be.
13442     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13443       return SDValue();
13444 
13445     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13446       N->getOperand(i);
13447     if (NextInput.getOpcode() != ISD::LOAD)
13448       return SDValue();
13449 
13450     SDValue PreviousInput =
13451       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13452     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13453     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13454 
13455     // If any inputs are fp_round(extload), they all must be.
13456     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13457       return SDValue();
13458 
13459     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13460       InputsAreConsecutiveLoads = false;
13461     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13462       InputsAreReverseConsecutive = false;
13463 
13464     // Exit early if the loads are neither consecutive nor reverse consecutive.
13465     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13466       return SDValue();
13467   }
13468 
13469   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13470          "The loads cannot be both consecutive and reverse consecutive.");
13471 
13472   SDValue FirstLoadOp =
13473     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13474   SDValue LastLoadOp =
13475     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13476                        N->getOperand(N->getNumOperands()-1);
13477 
13478   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13479   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13480   if (InputsAreConsecutiveLoads) {
13481     assert(LD1 && "Input needs to be a LoadSDNode.");
13482     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13483                        LD1->getBasePtr(), LD1->getPointerInfo(),
13484                        LD1->getAlignment());
13485   }
13486   if (InputsAreReverseConsecutive) {
13487     assert(LDL && "Input needs to be a LoadSDNode.");
13488     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13489                                LDL->getBasePtr(), LDL->getPointerInfo(),
13490                                LDL->getAlignment());
13491     SmallVector<int, 16> Ops;
13492     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13493       Ops.push_back(i);
13494 
13495     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13496                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13497   }
13498   return SDValue();
13499 }
13500 
13501 // This function adds the required vector_shuffle needed to get
13502 // the elements of the vector extract in the correct position
13503 // as specified by the CorrectElems encoding.
13504 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13505                                       SDValue Input, uint64_t Elems,
13506                                       uint64_t CorrectElems) {
13507   SDLoc dl(N);
13508 
13509   unsigned NumElems = Input.getValueType().getVectorNumElements();
13510   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13511 
13512   // Knowing the element indices being extracted from the original
13513   // vector and the order in which they're being inserted, just put
13514   // them at element indices required for the instruction.
13515   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13516     if (DAG.getDataLayout().isLittleEndian())
13517       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13518     else
13519       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13520     CorrectElems = CorrectElems >> 8;
13521     Elems = Elems >> 8;
13522   }
13523 
13524   SDValue Shuffle =
13525       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13526                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13527 
13528   EVT VT = N->getValueType(0);
13529   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13530 
13531   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13532                                Input.getValueType().getVectorElementType(),
13533                                VT.getVectorNumElements());
13534   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13535                      DAG.getValueType(ExtVT));
13536 }
13537 
13538 // Look for build vector patterns where input operands come from sign
13539 // extended vector_extract elements of specific indices. If the correct indices
13540 // aren't used, add a vector shuffle to fix up the indices and create
13541 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13542 // during instruction selection.
13543 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13544   // This array encodes the indices that the vector sign extend instructions
13545   // extract from when extending from one type to another for both BE and LE.
13546   // The right nibble of each byte corresponds to the LE incides.
13547   // and the left nibble of each byte corresponds to the BE incides.
13548   // For example: 0x3074B8FC  byte->word
13549   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13550   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13551   // For example: 0x000070F8  byte->double word
13552   // For LE: the allowed indices are: 0x0,0x8
13553   // For BE: the allowed indices are: 0x7,0xF
13554   uint64_t TargetElems[] = {
13555       0x3074B8FC, // b->w
13556       0x000070F8, // b->d
13557       0x10325476, // h->w
13558       0x00003074, // h->d
13559       0x00001032, // w->d
13560   };
13561 
13562   uint64_t Elems = 0;
13563   int Index;
13564   SDValue Input;
13565 
13566   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13567     if (!Op)
13568       return false;
13569     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13570         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13571       return false;
13572 
13573     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13574     // of the right width.
13575     SDValue Extract = Op.getOperand(0);
13576     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13577       Extract = Extract.getOperand(0);
13578     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13579       return false;
13580 
13581     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13582     if (!ExtOp)
13583       return false;
13584 
13585     Index = ExtOp->getZExtValue();
13586     if (Input && Input != Extract.getOperand(0))
13587       return false;
13588 
13589     if (!Input)
13590       Input = Extract.getOperand(0);
13591 
13592     Elems = Elems << 8;
13593     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13594     Elems |= Index;
13595 
13596     return true;
13597   };
13598 
13599   // If the build vector operands aren't sign extended vector extracts,
13600   // of the same input vector, then return.
13601   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13602     if (!isSExtOfVecExtract(N->getOperand(i))) {
13603       return SDValue();
13604     }
13605   }
13606 
13607   // If the vector extract indicies are not correct, add the appropriate
13608   // vector_shuffle.
13609   int TgtElemArrayIdx;
13610   int InputSize = Input.getValueType().getScalarSizeInBits();
13611   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13612   if (InputSize + OutputSize == 40)
13613     TgtElemArrayIdx = 0;
13614   else if (InputSize + OutputSize == 72)
13615     TgtElemArrayIdx = 1;
13616   else if (InputSize + OutputSize == 48)
13617     TgtElemArrayIdx = 2;
13618   else if (InputSize + OutputSize == 80)
13619     TgtElemArrayIdx = 3;
13620   else if (InputSize + OutputSize == 96)
13621     TgtElemArrayIdx = 4;
13622   else
13623     return SDValue();
13624 
13625   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13626   CorrectElems = DAG.getDataLayout().isLittleEndian()
13627                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13628                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13629   if (Elems != CorrectElems) {
13630     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13631   }
13632 
13633   // Regular lowering will catch cases where a shuffle is not needed.
13634   return SDValue();
13635 }
13636 
13637 // Look for the pattern of a load from a narrow width to i128, feeding
13638 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13639 // (LXVRZX). This node represents a zero extending load that will be matched
13640 // to the Load VSX Vector Rightmost instructions.
13641 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13642   SDLoc DL(N);
13643 
13644   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13645   if (N->getValueType(0) != MVT::v1i128)
13646     return SDValue();
13647 
13648   SDValue Operand = N->getOperand(0);
13649   // Proceed with the transformation if the operand to the BUILD_VECTOR
13650   // is a load instruction.
13651   if (Operand.getOpcode() != ISD::LOAD)
13652     return SDValue();
13653 
13654   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13655   EVT MemoryType = LD->getMemoryVT();
13656 
13657   // This transformation is only valid if the we are loading either a byte,
13658   // halfword, word, or doubleword.
13659   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13660                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13661 
13662   // Ensure that the load from the narrow width is being zero extended to i128.
13663   if (!ValidLDType ||
13664       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13665        LD->getExtensionType() != ISD::EXTLOAD))
13666     return SDValue();
13667 
13668   SDValue LoadOps[] = {
13669       LD->getChain(), LD->getBasePtr(),
13670       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13671 
13672   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13673                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13674                                  LoadOps, MemoryType, LD->getMemOperand());
13675 }
13676 
13677 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13678                                                  DAGCombinerInfo &DCI) const {
13679   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13680          "Should be called with a BUILD_VECTOR node");
13681 
13682   SelectionDAG &DAG = DCI.DAG;
13683   SDLoc dl(N);
13684 
13685   if (!Subtarget.hasVSX())
13686     return SDValue();
13687 
13688   // The target independent DAG combiner will leave a build_vector of
13689   // float-to-int conversions intact. We can generate MUCH better code for
13690   // a float-to-int conversion of a vector of floats.
13691   SDValue FirstInput = N->getOperand(0);
13692   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13693     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13694     if (Reduced)
13695       return Reduced;
13696   }
13697 
13698   // If we're building a vector out of consecutive loads, just load that
13699   // vector type.
13700   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13701   if (Reduced)
13702     return Reduced;
13703 
13704   // If we're building a vector out of extended elements from another vector
13705   // we have P9 vector integer extend instructions. The code assumes legal
13706   // input types (i.e. it can't handle things like v4i16) so do not run before
13707   // legalization.
13708   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13709     Reduced = combineBVOfVecSExt(N, DAG);
13710     if (Reduced)
13711       return Reduced;
13712   }
13713 
13714   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13715   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13716   // is a load from <valid narrow width> to i128.
13717   if (Subtarget.isISA3_1()) {
13718     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13719     if (BVOfZLoad)
13720       return BVOfZLoad;
13721   }
13722 
13723   if (N->getValueType(0) != MVT::v2f64)
13724     return SDValue();
13725 
13726   // Looking for:
13727   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13728   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13729       FirstInput.getOpcode() != ISD::UINT_TO_FP)
13730     return SDValue();
13731   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13732       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13733     return SDValue();
13734   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13735     return SDValue();
13736 
13737   SDValue Ext1 = FirstInput.getOperand(0);
13738   SDValue Ext2 = N->getOperand(1).getOperand(0);
13739   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13740      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13741     return SDValue();
13742 
13743   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13744   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13745   if (!Ext1Op || !Ext2Op)
13746     return SDValue();
13747   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13748       Ext1.getOperand(0) != Ext2.getOperand(0))
13749     return SDValue();
13750 
13751   int FirstElem = Ext1Op->getZExtValue();
13752   int SecondElem = Ext2Op->getZExtValue();
13753   int SubvecIdx;
13754   if (FirstElem == 0 && SecondElem == 1)
13755     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13756   else if (FirstElem == 2 && SecondElem == 3)
13757     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13758   else
13759     return SDValue();
13760 
13761   SDValue SrcVec = Ext1.getOperand(0);
13762   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13763     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13764   return DAG.getNode(NodeType, dl, MVT::v2f64,
13765                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13766 }
13767 
13768 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13769                                               DAGCombinerInfo &DCI) const {
13770   assert((N->getOpcode() == ISD::SINT_TO_FP ||
13771           N->getOpcode() == ISD::UINT_TO_FP) &&
13772          "Need an int -> FP conversion node here");
13773 
13774   if (useSoftFloat() || !Subtarget.has64BitSupport())
13775     return SDValue();
13776 
13777   SelectionDAG &DAG = DCI.DAG;
13778   SDLoc dl(N);
13779   SDValue Op(N, 0);
13780 
13781   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13782   // from the hardware.
13783   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13784     return SDValue();
13785   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13786       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13787     return SDValue();
13788 
13789   SDValue FirstOperand(Op.getOperand(0));
13790   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13791     (FirstOperand.getValueType() == MVT::i8 ||
13792      FirstOperand.getValueType() == MVT::i16);
13793   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13794     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13795     bool DstDouble = Op.getValueType() == MVT::f64;
13796     unsigned ConvOp = Signed ?
13797       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
13798       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13799     SDValue WidthConst =
13800       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13801                             dl, false);
13802     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13803     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13804     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13805                                          DAG.getVTList(MVT::f64, MVT::Other),
13806                                          Ops, MVT::i8, LDN->getMemOperand());
13807 
13808     // For signed conversion, we need to sign-extend the value in the VSR
13809     if (Signed) {
13810       SDValue ExtOps[] = { Ld, WidthConst };
13811       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13812       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13813     } else
13814       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13815   }
13816 
13817 
13818   // For i32 intermediate values, unfortunately, the conversion functions
13819   // leave the upper 32 bits of the value are undefined. Within the set of
13820   // scalar instructions, we have no method for zero- or sign-extending the
13821   // value. Thus, we cannot handle i32 intermediate values here.
13822   if (Op.getOperand(0).getValueType() == MVT::i32)
13823     return SDValue();
13824 
13825   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13826          "UINT_TO_FP is supported only with FPCVT");
13827 
13828   // If we have FCFIDS, then use it when converting to single-precision.
13829   // Otherwise, convert to double-precision and then round.
13830   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13831                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13832                                                             : PPCISD::FCFIDS)
13833                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13834                                                             : PPCISD::FCFID);
13835   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13836                   ? MVT::f32
13837                   : MVT::f64;
13838 
13839   // If we're converting from a float, to an int, and back to a float again,
13840   // then we don't need the store/load pair at all.
13841   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13842        Subtarget.hasFPCVT()) ||
13843       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13844     SDValue Src = Op.getOperand(0).getOperand(0);
13845     if (Src.getValueType() == MVT::f32) {
13846       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13847       DCI.AddToWorklist(Src.getNode());
13848     } else if (Src.getValueType() != MVT::f64) {
13849       // Make sure that we don't pick up a ppc_fp128 source value.
13850       return SDValue();
13851     }
13852 
13853     unsigned FCTOp =
13854       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13855                                                         PPCISD::FCTIDUZ;
13856 
13857     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13858     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13859 
13860     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13861       FP = DAG.getNode(ISD::FP_ROUND, dl,
13862                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13863       DCI.AddToWorklist(FP.getNode());
13864     }
13865 
13866     return FP;
13867   }
13868 
13869   return SDValue();
13870 }
13871 
13872 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13873 // builtins) into loads with swaps.
13874 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13875                                               DAGCombinerInfo &DCI) const {
13876   SelectionDAG &DAG = DCI.DAG;
13877   SDLoc dl(N);
13878   SDValue Chain;
13879   SDValue Base;
13880   MachineMemOperand *MMO;
13881 
13882   switch (N->getOpcode()) {
13883   default:
13884     llvm_unreachable("Unexpected opcode for little endian VSX load");
13885   case ISD::LOAD: {
13886     LoadSDNode *LD = cast<LoadSDNode>(N);
13887     Chain = LD->getChain();
13888     Base = LD->getBasePtr();
13889     MMO = LD->getMemOperand();
13890     // If the MMO suggests this isn't a load of a full vector, leave
13891     // things alone.  For a built-in, we have to make the change for
13892     // correctness, so if there is a size problem that will be a bug.
13893     if (MMO->getSize() < 16)
13894       return SDValue();
13895     break;
13896   }
13897   case ISD::INTRINSIC_W_CHAIN: {
13898     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13899     Chain = Intrin->getChain();
13900     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13901     // us what we want. Get operand 2 instead.
13902     Base = Intrin->getOperand(2);
13903     MMO = Intrin->getMemOperand();
13904     break;
13905   }
13906   }
13907 
13908   MVT VecTy = N->getValueType(0).getSimpleVT();
13909 
13910   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13911   // aligned and the type is a vector with elements up to 4 bytes
13912   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13913       VecTy.getScalarSizeInBits() <= 32) {
13914     return SDValue();
13915   }
13916 
13917   SDValue LoadOps[] = { Chain, Base };
13918   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13919                                          DAG.getVTList(MVT::v2f64, MVT::Other),
13920                                          LoadOps, MVT::v2f64, MMO);
13921 
13922   DCI.AddToWorklist(Load.getNode());
13923   Chain = Load.getValue(1);
13924   SDValue Swap = DAG.getNode(
13925       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13926   DCI.AddToWorklist(Swap.getNode());
13927 
13928   // Add a bitcast if the resulting load type doesn't match v2f64.
13929   if (VecTy != MVT::v2f64) {
13930     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13931     DCI.AddToWorklist(N.getNode());
13932     // Package {bitcast value, swap's chain} to match Load's shape.
13933     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13934                        N, Swap.getValue(1));
13935   }
13936 
13937   return Swap;
13938 }
13939 
13940 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13941 // builtins) into stores with swaps.
13942 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13943                                                DAGCombinerInfo &DCI) const {
13944   SelectionDAG &DAG = DCI.DAG;
13945   SDLoc dl(N);
13946   SDValue Chain;
13947   SDValue Base;
13948   unsigned SrcOpnd;
13949   MachineMemOperand *MMO;
13950 
13951   switch (N->getOpcode()) {
13952   default:
13953     llvm_unreachable("Unexpected opcode for little endian VSX store");
13954   case ISD::STORE: {
13955     StoreSDNode *ST = cast<StoreSDNode>(N);
13956     Chain = ST->getChain();
13957     Base = ST->getBasePtr();
13958     MMO = ST->getMemOperand();
13959     SrcOpnd = 1;
13960     // If the MMO suggests this isn't a store of a full vector, leave
13961     // things alone.  For a built-in, we have to make the change for
13962     // correctness, so if there is a size problem that will be a bug.
13963     if (MMO->getSize() < 16)
13964       return SDValue();
13965     break;
13966   }
13967   case ISD::INTRINSIC_VOID: {
13968     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13969     Chain = Intrin->getChain();
13970     // Intrin->getBasePtr() oddly does not get what we want.
13971     Base = Intrin->getOperand(3);
13972     MMO = Intrin->getMemOperand();
13973     SrcOpnd = 2;
13974     break;
13975   }
13976   }
13977 
13978   SDValue Src = N->getOperand(SrcOpnd);
13979   MVT VecTy = Src.getValueType().getSimpleVT();
13980 
13981   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13982   // aligned and the type is a vector with elements up to 4 bytes
13983   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
13984       VecTy.getScalarSizeInBits() <= 32) {
13985     return SDValue();
13986   }
13987 
13988   // All stores are done as v2f64 and possible bit cast.
13989   if (VecTy != MVT::v2f64) {
13990     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13991     DCI.AddToWorklist(Src.getNode());
13992   }
13993 
13994   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13995                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13996   DCI.AddToWorklist(Swap.getNode());
13997   Chain = Swap.getValue(1);
13998   SDValue StoreOps[] = { Chain, Swap, Base };
13999   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14000                                           DAG.getVTList(MVT::Other),
14001                                           StoreOps, VecTy, MMO);
14002   DCI.AddToWorklist(Store.getNode());
14003   return Store;
14004 }
14005 
14006 // Handle DAG combine for STORE (FP_TO_INT F).
14007 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14008                                                DAGCombinerInfo &DCI) const {
14009 
14010   SelectionDAG &DAG = DCI.DAG;
14011   SDLoc dl(N);
14012   unsigned Opcode = N->getOperand(1).getOpcode();
14013 
14014   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14015          && "Not a FP_TO_INT Instruction!");
14016 
14017   SDValue Val = N->getOperand(1).getOperand(0);
14018   EVT Op1VT = N->getOperand(1).getValueType();
14019   EVT ResVT = Val.getValueType();
14020 
14021   // Floating point types smaller than 32 bits are not legal on Power.
14022   if (ResVT.getScalarSizeInBits() < 32)
14023     return SDValue();
14024 
14025   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14026   bool ValidTypeForStoreFltAsInt =
14027         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14028          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14029 
14030   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14031       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14032     return SDValue();
14033 
14034   // Extend f32 values to f64
14035   if (ResVT.getScalarSizeInBits() == 32) {
14036     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14037     DCI.AddToWorklist(Val.getNode());
14038   }
14039 
14040   // Set signed or unsigned conversion opcode.
14041   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14042                           PPCISD::FP_TO_SINT_IN_VSR :
14043                           PPCISD::FP_TO_UINT_IN_VSR;
14044 
14045   Val = DAG.getNode(ConvOpcode,
14046                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14047   DCI.AddToWorklist(Val.getNode());
14048 
14049   // Set number of bytes being converted.
14050   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14051   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14052                     DAG.getIntPtrConstant(ByteSize, dl, false),
14053                     DAG.getValueType(Op1VT) };
14054 
14055   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14056           DAG.getVTList(MVT::Other), Ops,
14057           cast<StoreSDNode>(N)->getMemoryVT(),
14058           cast<StoreSDNode>(N)->getMemOperand());
14059 
14060   DCI.AddToWorklist(Val.getNode());
14061   return Val;
14062 }
14063 
14064 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14065   // Check that the source of the element keeps flipping
14066   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14067   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14068   for (int i = 1, e = Mask.size(); i < e; i++) {
14069     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14070       return false;
14071     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14072       return false;
14073     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14074   }
14075   return true;
14076 }
14077 
14078 static bool isSplatBV(SDValue Op) {
14079   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14080     return false;
14081   SDValue FirstOp;
14082 
14083   // Find first non-undef input.
14084   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14085     FirstOp = Op.getOperand(i);
14086     if (!FirstOp.isUndef())
14087       break;
14088   }
14089 
14090   // All inputs are undef or the same as the first non-undef input.
14091   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14092     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14093       return false;
14094   return true;
14095 }
14096 
14097 static SDValue isScalarToVec(SDValue Op) {
14098   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14099     return Op;
14100   if (Op.getOpcode() != ISD::BITCAST)
14101     return SDValue();
14102   Op = Op.getOperand(0);
14103   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14104     return Op;
14105   return SDValue();
14106 }
14107 
14108 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14109                                             int LHSMaxIdx, int RHSMinIdx,
14110                                             int RHSMaxIdx, int HalfVec) {
14111   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14112     int Idx = ShuffV[i];
14113     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14114       ShuffV[i] += HalfVec;
14115   }
14116   return;
14117 }
14118 
14119 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14120 // the original is:
14121 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14122 // In such a case, just change the shuffle mask to extract the element
14123 // from the permuted index.
14124 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14125   SDLoc dl(OrigSToV);
14126   EVT VT = OrigSToV.getValueType();
14127   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14128          "Expecting a SCALAR_TO_VECTOR here");
14129   SDValue Input = OrigSToV.getOperand(0);
14130 
14131   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14132     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14133     SDValue OrigVector = Input.getOperand(0);
14134 
14135     // Can't handle non-const element indices or different vector types
14136     // for the input to the extract and the output of the scalar_to_vector.
14137     if (Idx && VT == OrigVector.getValueType()) {
14138       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14139       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14140       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14141     }
14142   }
14143   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14144                      OrigSToV.getOperand(0));
14145 }
14146 
14147 // On little endian subtargets, combine shuffles such as:
14148 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14149 // into:
14150 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14151 // because the latter can be matched to a single instruction merge.
14152 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14153 // to put the value into element zero. Adjust the shuffle mask so that the
14154 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14155 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14156                                                 SelectionDAG &DAG) const {
14157   SDValue LHS = SVN->getOperand(0);
14158   SDValue RHS = SVN->getOperand(1);
14159   auto Mask = SVN->getMask();
14160   int NumElts = LHS.getValueType().getVectorNumElements();
14161   SDValue Res(SVN, 0);
14162   SDLoc dl(SVN);
14163 
14164   // None of these combines are useful on big endian systems since the ISA
14165   // already has a big endian bias.
14166   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14167     return Res;
14168 
14169   // If this is not a shuffle of a shuffle and the first element comes from
14170   // the second vector, canonicalize to the commuted form. This will make it
14171   // more likely to match one of the single instruction patterns.
14172   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14173       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14174     std::swap(LHS, RHS);
14175     Res = DAG.getCommutedVectorShuffle(*SVN);
14176     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14177   }
14178 
14179   // Adjust the shuffle mask if either input vector comes from a
14180   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14181   // form (to prevent the need for a swap).
14182   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14183   SDValue SToVLHS = isScalarToVec(LHS);
14184   SDValue SToVRHS = isScalarToVec(RHS);
14185   if (SToVLHS || SToVRHS) {
14186     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14187                             : SToVRHS.getValueType().getVectorNumElements();
14188     int NumEltsOut = ShuffV.size();
14189 
14190     // Initially assume that neither input is permuted. These will be adjusted
14191     // accordingly if either input is.
14192     int LHSMaxIdx = -1;
14193     int RHSMinIdx = -1;
14194     int RHSMaxIdx = -1;
14195     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14196 
14197     // Get the permuted scalar to vector nodes for the source(s) that come from
14198     // ISD::SCALAR_TO_VECTOR.
14199     if (SToVLHS) {
14200       // Set up the values for the shuffle vector fixup.
14201       LHSMaxIdx = NumEltsOut / NumEltsIn;
14202       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14203       if (SToVLHS.getValueType() != LHS.getValueType())
14204         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14205       LHS = SToVLHS;
14206     }
14207     if (SToVRHS) {
14208       RHSMinIdx = NumEltsOut;
14209       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14210       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14211       if (SToVRHS.getValueType() != RHS.getValueType())
14212         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14213       RHS = SToVRHS;
14214     }
14215 
14216     // Fix up the shuffle mask to reflect where the desired element actually is.
14217     // The minimum and maximum indices that correspond to element zero for both
14218     // the LHS and RHS are computed and will control which shuffle mask entries
14219     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14220     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14221     // HalfVec to refer to the corresponding element in the permuted vector.
14222     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14223                                     HalfVec);
14224     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14225 
14226     // We may have simplified away the shuffle. We won't be able to do anything
14227     // further with it here.
14228     if (!isa<ShuffleVectorSDNode>(Res))
14229       return Res;
14230     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14231   }
14232 
14233   // The common case after we commuted the shuffle is that the RHS is a splat
14234   // and we have elements coming in from the splat at indices that are not
14235   // conducive to using a merge.
14236   // Example:
14237   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14238   if (!isSplatBV(RHS))
14239     return Res;
14240 
14241   // We are looking for a mask such that all even elements are from
14242   // one vector and all odd elements from the other.
14243   if (!isAlternatingShuffMask(Mask, NumElts))
14244     return Res;
14245 
14246   // Adjust the mask so we are pulling in the same index from the splat
14247   // as the index from the interesting vector in consecutive elements.
14248   // Example (even elements from first vector):
14249   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14250   if (Mask[0] < NumElts)
14251     for (int i = 1, e = Mask.size(); i < e; i += 2)
14252       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14253   // Example (odd elements from first vector):
14254   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14255   else
14256     for (int i = 0, e = Mask.size(); i < e; i += 2)
14257       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14258 
14259   // If the RHS has undefs, we need to remove them since we may have created
14260   // a shuffle that adds those instead of the splat value.
14261   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14262   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14263 
14264   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14265   return Res;
14266 }
14267 
14268 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14269                                                 LSBaseSDNode *LSBase,
14270                                                 DAGCombinerInfo &DCI) const {
14271   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14272         "Not a reverse memop pattern!");
14273 
14274   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14275     auto Mask = SVN->getMask();
14276     int i = 0;
14277     auto I = Mask.rbegin();
14278     auto E = Mask.rend();
14279 
14280     for (; I != E; ++I) {
14281       if (*I != i)
14282         return false;
14283       i++;
14284     }
14285     return true;
14286   };
14287 
14288   SelectionDAG &DAG = DCI.DAG;
14289   EVT VT = SVN->getValueType(0);
14290 
14291   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14292     return SDValue();
14293 
14294   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14295   // See comment in PPCVSXSwapRemoval.cpp.
14296   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14297   if (!Subtarget.hasP9Vector())
14298     return SDValue();
14299 
14300   if(!IsElementReverse(SVN))
14301     return SDValue();
14302 
14303   if (LSBase->getOpcode() == ISD::LOAD) {
14304     SDLoc dl(SVN);
14305     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14306     return DAG.getMemIntrinsicNode(
14307         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14308         LSBase->getMemoryVT(), LSBase->getMemOperand());
14309   }
14310 
14311   if (LSBase->getOpcode() == ISD::STORE) {
14312     SDLoc dl(LSBase);
14313     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14314                           LSBase->getBasePtr()};
14315     return DAG.getMemIntrinsicNode(
14316         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14317         LSBase->getMemoryVT(), LSBase->getMemOperand());
14318   }
14319 
14320   llvm_unreachable("Expected a load or store node here");
14321 }
14322 
14323 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14324                                              DAGCombinerInfo &DCI) const {
14325   SelectionDAG &DAG = DCI.DAG;
14326   SDLoc dl(N);
14327   switch (N->getOpcode()) {
14328   default: break;
14329   case ISD::ADD:
14330     return combineADD(N, DCI);
14331   case ISD::SHL:
14332     return combineSHL(N, DCI);
14333   case ISD::SRA:
14334     return combineSRA(N, DCI);
14335   case ISD::SRL:
14336     return combineSRL(N, DCI);
14337   case ISD::MUL:
14338     return combineMUL(N, DCI);
14339   case ISD::FMA:
14340   case PPCISD::FNMSUB:
14341     return combineFMALike(N, DCI);
14342   case PPCISD::SHL:
14343     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14344         return N->getOperand(0);
14345     break;
14346   case PPCISD::SRL:
14347     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14348         return N->getOperand(0);
14349     break;
14350   case PPCISD::SRA:
14351     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14352       if (C->isNullValue() ||   //  0 >>s V -> 0.
14353           C->isAllOnesValue())    // -1 >>s V -> -1.
14354         return N->getOperand(0);
14355     }
14356     break;
14357   case ISD::SIGN_EXTEND:
14358   case ISD::ZERO_EXTEND:
14359   case ISD::ANY_EXTEND:
14360     return DAGCombineExtBoolTrunc(N, DCI);
14361   case ISD::TRUNCATE:
14362     return combineTRUNCATE(N, DCI);
14363   case ISD::SETCC:
14364     if (SDValue CSCC = combineSetCC(N, DCI))
14365       return CSCC;
14366     LLVM_FALLTHROUGH;
14367   case ISD::SELECT_CC:
14368     return DAGCombineTruncBoolExt(N, DCI);
14369   case ISD::SINT_TO_FP:
14370   case ISD::UINT_TO_FP:
14371     return combineFPToIntToFP(N, DCI);
14372   case ISD::VECTOR_SHUFFLE:
14373     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14374       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14375       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14376     }
14377     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14378   case ISD::STORE: {
14379 
14380     EVT Op1VT = N->getOperand(1).getValueType();
14381     unsigned Opcode = N->getOperand(1).getOpcode();
14382 
14383     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14384       SDValue Val= combineStoreFPToInt(N, DCI);
14385       if (Val)
14386         return Val;
14387     }
14388 
14389     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14390       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14391       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14392       if (Val)
14393         return Val;
14394     }
14395 
14396     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14397     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14398         N->getOperand(1).getNode()->hasOneUse() &&
14399         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14400          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14401 
14402       // STBRX can only handle simple types and it makes no sense to store less
14403       // two bytes in byte-reversed order.
14404       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14405       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14406         break;
14407 
14408       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14409       // Do an any-extend to 32-bits if this is a half-word input.
14410       if (BSwapOp.getValueType() == MVT::i16)
14411         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14412 
14413       // If the type of BSWAP operand is wider than stored memory width
14414       // it need to be shifted to the right side before STBRX.
14415       if (Op1VT.bitsGT(mVT)) {
14416         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14417         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14418                               DAG.getConstant(Shift, dl, MVT::i32));
14419         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14420         if (Op1VT == MVT::i64)
14421           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14422       }
14423 
14424       SDValue Ops[] = {
14425         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14426       };
14427       return
14428         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14429                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14430                                 cast<StoreSDNode>(N)->getMemOperand());
14431     }
14432 
14433     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14434     // So it can increase the chance of CSE constant construction.
14435     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14436         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14437       // Need to sign-extended to 64-bits to handle negative values.
14438       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14439       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14440                                     MemVT.getSizeInBits());
14441       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14442 
14443       // DAG.getTruncStore() can't be used here because it doesn't accept
14444       // the general (base + offset) addressing mode.
14445       // So we use UpdateNodeOperands and setTruncatingStore instead.
14446       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14447                              N->getOperand(3));
14448       cast<StoreSDNode>(N)->setTruncatingStore(true);
14449       return SDValue(N, 0);
14450     }
14451 
14452     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14453     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14454     if (Op1VT.isSimple()) {
14455       MVT StoreVT = Op1VT.getSimpleVT();
14456       if (Subtarget.needsSwapsForVSXMemOps() &&
14457           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14458            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14459         return expandVSXStoreForLE(N, DCI);
14460     }
14461     break;
14462   }
14463   case ISD::LOAD: {
14464     LoadSDNode *LD = cast<LoadSDNode>(N);
14465     EVT VT = LD->getValueType(0);
14466 
14467     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14468     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14469     if (VT.isSimple()) {
14470       MVT LoadVT = VT.getSimpleVT();
14471       if (Subtarget.needsSwapsForVSXMemOps() &&
14472           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14473            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14474         return expandVSXLoadForLE(N, DCI);
14475     }
14476 
14477     // We sometimes end up with a 64-bit integer load, from which we extract
14478     // two single-precision floating-point numbers. This happens with
14479     // std::complex<float>, and other similar structures, because of the way we
14480     // canonicalize structure copies. However, if we lack direct moves,
14481     // then the final bitcasts from the extracted integer values to the
14482     // floating-point numbers turn into store/load pairs. Even with direct moves,
14483     // just loading the two floating-point numbers is likely better.
14484     auto ReplaceTwoFloatLoad = [&]() {
14485       if (VT != MVT::i64)
14486         return false;
14487 
14488       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14489           LD->isVolatile())
14490         return false;
14491 
14492       //  We're looking for a sequence like this:
14493       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14494       //      t16: i64 = srl t13, Constant:i32<32>
14495       //    t17: i32 = truncate t16
14496       //  t18: f32 = bitcast t17
14497       //    t19: i32 = truncate t13
14498       //  t20: f32 = bitcast t19
14499 
14500       if (!LD->hasNUsesOfValue(2, 0))
14501         return false;
14502 
14503       auto UI = LD->use_begin();
14504       while (UI.getUse().getResNo() != 0) ++UI;
14505       SDNode *Trunc = *UI++;
14506       while (UI.getUse().getResNo() != 0) ++UI;
14507       SDNode *RightShift = *UI;
14508       if (Trunc->getOpcode() != ISD::TRUNCATE)
14509         std::swap(Trunc, RightShift);
14510 
14511       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14512           Trunc->getValueType(0) != MVT::i32 ||
14513           !Trunc->hasOneUse())
14514         return false;
14515       if (RightShift->getOpcode() != ISD::SRL ||
14516           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14517           RightShift->getConstantOperandVal(1) != 32 ||
14518           !RightShift->hasOneUse())
14519         return false;
14520 
14521       SDNode *Trunc2 = *RightShift->use_begin();
14522       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14523           Trunc2->getValueType(0) != MVT::i32 ||
14524           !Trunc2->hasOneUse())
14525         return false;
14526 
14527       SDNode *Bitcast = *Trunc->use_begin();
14528       SDNode *Bitcast2 = *Trunc2->use_begin();
14529 
14530       if (Bitcast->getOpcode() != ISD::BITCAST ||
14531           Bitcast->getValueType(0) != MVT::f32)
14532         return false;
14533       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14534           Bitcast2->getValueType(0) != MVT::f32)
14535         return false;
14536 
14537       if (Subtarget.isLittleEndian())
14538         std::swap(Bitcast, Bitcast2);
14539 
14540       // Bitcast has the second float (in memory-layout order) and Bitcast2
14541       // has the first one.
14542 
14543       SDValue BasePtr = LD->getBasePtr();
14544       if (LD->isIndexed()) {
14545         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14546                "Non-pre-inc AM on PPC?");
14547         BasePtr =
14548           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14549                       LD->getOffset());
14550       }
14551 
14552       auto MMOFlags =
14553           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14554       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14555                                       LD->getPointerInfo(), LD->getAlignment(),
14556                                       MMOFlags, LD->getAAInfo());
14557       SDValue AddPtr =
14558         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14559                     BasePtr, DAG.getIntPtrConstant(4, dl));
14560       SDValue FloatLoad2 = DAG.getLoad(
14561           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14562           LD->getPointerInfo().getWithOffset(4),
14563           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14564 
14565       if (LD->isIndexed()) {
14566         // Note that DAGCombine should re-form any pre-increment load(s) from
14567         // what is produced here if that makes sense.
14568         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14569       }
14570 
14571       DCI.CombineTo(Bitcast2, FloatLoad);
14572       DCI.CombineTo(Bitcast, FloatLoad2);
14573 
14574       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14575                                     SDValue(FloatLoad2.getNode(), 1));
14576       return true;
14577     };
14578 
14579     if (ReplaceTwoFloatLoad())
14580       return SDValue(N, 0);
14581 
14582     EVT MemVT = LD->getMemoryVT();
14583     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14584     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14585     if (LD->isUnindexed() && VT.isVector() &&
14586         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14587           // P8 and later hardware should just use LOAD.
14588           !Subtarget.hasP8Vector() &&
14589           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14590            VT == MVT::v4f32))) &&
14591         LD->getAlign() < ABIAlignment) {
14592       // This is a type-legal unaligned Altivec load.
14593       SDValue Chain = LD->getChain();
14594       SDValue Ptr = LD->getBasePtr();
14595       bool isLittleEndian = Subtarget.isLittleEndian();
14596 
14597       // This implements the loading of unaligned vectors as described in
14598       // the venerable Apple Velocity Engine overview. Specifically:
14599       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14600       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14601       //
14602       // The general idea is to expand a sequence of one or more unaligned
14603       // loads into an alignment-based permutation-control instruction (lvsl
14604       // or lvsr), a series of regular vector loads (which always truncate
14605       // their input address to an aligned address), and a series of
14606       // permutations.  The results of these permutations are the requested
14607       // loaded values.  The trick is that the last "extra" load is not taken
14608       // from the address you might suspect (sizeof(vector) bytes after the
14609       // last requested load), but rather sizeof(vector) - 1 bytes after the
14610       // last requested vector. The point of this is to avoid a page fault if
14611       // the base address happened to be aligned. This works because if the
14612       // base address is aligned, then adding less than a full vector length
14613       // will cause the last vector in the sequence to be (re)loaded.
14614       // Otherwise, the next vector will be fetched as you might suspect was
14615       // necessary.
14616 
14617       // We might be able to reuse the permutation generation from
14618       // a different base address offset from this one by an aligned amount.
14619       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14620       // optimization later.
14621       Intrinsic::ID Intr, IntrLD, IntrPerm;
14622       MVT PermCntlTy, PermTy, LDTy;
14623       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14624                             : Intrinsic::ppc_altivec_lvsl;
14625       IntrLD = Intrinsic::ppc_altivec_lvx;
14626       IntrPerm = Intrinsic::ppc_altivec_vperm;
14627       PermCntlTy = MVT::v16i8;
14628       PermTy = MVT::v4i32;
14629       LDTy = MVT::v4i32;
14630 
14631       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14632 
14633       // Create the new MMO for the new base load. It is like the original MMO,
14634       // but represents an area in memory almost twice the vector size centered
14635       // on the original address. If the address is unaligned, we might start
14636       // reading up to (sizeof(vector)-1) bytes below the address of the
14637       // original unaligned load.
14638       MachineFunction &MF = DAG.getMachineFunction();
14639       MachineMemOperand *BaseMMO =
14640         MF.getMachineMemOperand(LD->getMemOperand(),
14641                                 -(long)MemVT.getStoreSize()+1,
14642                                 2*MemVT.getStoreSize()-1);
14643 
14644       // Create the new base load.
14645       SDValue LDXIntID =
14646           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14647       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14648       SDValue BaseLoad =
14649         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14650                                 DAG.getVTList(PermTy, MVT::Other),
14651                                 BaseLoadOps, LDTy, BaseMMO);
14652 
14653       // Note that the value of IncOffset (which is provided to the next
14654       // load's pointer info offset value, and thus used to calculate the
14655       // alignment), and the value of IncValue (which is actually used to
14656       // increment the pointer value) are different! This is because we
14657       // require the next load to appear to be aligned, even though it
14658       // is actually offset from the base pointer by a lesser amount.
14659       int IncOffset = VT.getSizeInBits() / 8;
14660       int IncValue = IncOffset;
14661 
14662       // Walk (both up and down) the chain looking for another load at the real
14663       // (aligned) offset (the alignment of the other load does not matter in
14664       // this case). If found, then do not use the offset reduction trick, as
14665       // that will prevent the loads from being later combined (as they would
14666       // otherwise be duplicates).
14667       if (!findConsecutiveLoad(LD, DAG))
14668         --IncValue;
14669 
14670       SDValue Increment =
14671           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14672       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14673 
14674       MachineMemOperand *ExtraMMO =
14675         MF.getMachineMemOperand(LD->getMemOperand(),
14676                                 1, 2*MemVT.getStoreSize()-1);
14677       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14678       SDValue ExtraLoad =
14679         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14680                                 DAG.getVTList(PermTy, MVT::Other),
14681                                 ExtraLoadOps, LDTy, ExtraMMO);
14682 
14683       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14684         BaseLoad.getValue(1), ExtraLoad.getValue(1));
14685 
14686       // Because vperm has a big-endian bias, we must reverse the order
14687       // of the input vectors and complement the permute control vector
14688       // when generating little endian code.  We have already handled the
14689       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14690       // and ExtraLoad here.
14691       SDValue Perm;
14692       if (isLittleEndian)
14693         Perm = BuildIntrinsicOp(IntrPerm,
14694                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14695       else
14696         Perm = BuildIntrinsicOp(IntrPerm,
14697                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14698 
14699       if (VT != PermTy)
14700         Perm = Subtarget.hasAltivec()
14701                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14702                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14703                                  DAG.getTargetConstant(1, dl, MVT::i64));
14704                                // second argument is 1 because this rounding
14705                                // is always exact.
14706 
14707       // The output of the permutation is our loaded result, the TokenFactor is
14708       // our new chain.
14709       DCI.CombineTo(N, Perm, TF);
14710       return SDValue(N, 0);
14711     }
14712     }
14713     break;
14714     case ISD::INTRINSIC_WO_CHAIN: {
14715       bool isLittleEndian = Subtarget.isLittleEndian();
14716       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14717       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14718                                            : Intrinsic::ppc_altivec_lvsl);
14719       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14720         SDValue Add = N->getOperand(1);
14721 
14722         int Bits = 4 /* 16 byte alignment */;
14723 
14724         if (DAG.MaskedValueIsZero(Add->getOperand(1),
14725                                   APInt::getAllOnesValue(Bits /* alignment */)
14726                                       .zext(Add.getScalarValueSizeInBits()))) {
14727           SDNode *BasePtr = Add->getOperand(0).getNode();
14728           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14729                                     UE = BasePtr->use_end();
14730                UI != UE; ++UI) {
14731             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14732                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14733                     IID) {
14734               // We've found another LVSL/LVSR, and this address is an aligned
14735               // multiple of that one. The results will be the same, so use the
14736               // one we've just found instead.
14737 
14738               return SDValue(*UI, 0);
14739             }
14740           }
14741         }
14742 
14743         if (isa<ConstantSDNode>(Add->getOperand(1))) {
14744           SDNode *BasePtr = Add->getOperand(0).getNode();
14745           for (SDNode::use_iterator UI = BasePtr->use_begin(),
14746                UE = BasePtr->use_end(); UI != UE; ++UI) {
14747             if (UI->getOpcode() == ISD::ADD &&
14748                 isa<ConstantSDNode>(UI->getOperand(1)) &&
14749                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14750                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14751                 (1ULL << Bits) == 0) {
14752               SDNode *OtherAdd = *UI;
14753               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14754                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
14755                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14756                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14757                   return SDValue(*VI, 0);
14758                 }
14759               }
14760             }
14761           }
14762         }
14763       }
14764 
14765       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14766       // Expose the vabsduw/h/b opportunity for down stream
14767       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14768           (IID == Intrinsic::ppc_altivec_vmaxsw ||
14769            IID == Intrinsic::ppc_altivec_vmaxsh ||
14770            IID == Intrinsic::ppc_altivec_vmaxsb)) {
14771         SDValue V1 = N->getOperand(1);
14772         SDValue V2 = N->getOperand(2);
14773         if ((V1.getSimpleValueType() == MVT::v4i32 ||
14774              V1.getSimpleValueType() == MVT::v8i16 ||
14775              V1.getSimpleValueType() == MVT::v16i8) &&
14776             V1.getSimpleValueType() == V2.getSimpleValueType()) {
14777           // (0-a, a)
14778           if (V1.getOpcode() == ISD::SUB &&
14779               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14780               V1.getOperand(1) == V2) {
14781             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14782           }
14783           // (a, 0-a)
14784           if (V2.getOpcode() == ISD::SUB &&
14785               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14786               V2.getOperand(1) == V1) {
14787             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14788           }
14789           // (x-y, y-x)
14790           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14791               V1.getOperand(0) == V2.getOperand(1) &&
14792               V1.getOperand(1) == V2.getOperand(0)) {
14793             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14794           }
14795         }
14796       }
14797     }
14798 
14799     break;
14800   case ISD::INTRINSIC_W_CHAIN:
14801     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14802     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14803     if (Subtarget.needsSwapsForVSXMemOps()) {
14804       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14805       default:
14806         break;
14807       case Intrinsic::ppc_vsx_lxvw4x:
14808       case Intrinsic::ppc_vsx_lxvd2x:
14809         return expandVSXLoadForLE(N, DCI);
14810       }
14811     }
14812     break;
14813   case ISD::INTRINSIC_VOID:
14814     // For little endian, VSX stores require generating xxswapd/stxvd2x.
14815     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14816     if (Subtarget.needsSwapsForVSXMemOps()) {
14817       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14818       default:
14819         break;
14820       case Intrinsic::ppc_vsx_stxvw4x:
14821       case Intrinsic::ppc_vsx_stxvd2x:
14822         return expandVSXStoreForLE(N, DCI);
14823       }
14824     }
14825     break;
14826   case ISD::BSWAP:
14827     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14828     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14829         N->getOperand(0).hasOneUse() &&
14830         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14831          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14832           N->getValueType(0) == MVT::i64))) {
14833       SDValue Load = N->getOperand(0);
14834       LoadSDNode *LD = cast<LoadSDNode>(Load);
14835       // Create the byte-swapping load.
14836       SDValue Ops[] = {
14837         LD->getChain(),    // Chain
14838         LD->getBasePtr(),  // Ptr
14839         DAG.getValueType(N->getValueType(0)) // VT
14840       };
14841       SDValue BSLoad =
14842         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14843                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14844                                               MVT::i64 : MVT::i32, MVT::Other),
14845                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
14846 
14847       // If this is an i16 load, insert the truncate.
14848       SDValue ResVal = BSLoad;
14849       if (N->getValueType(0) == MVT::i16)
14850         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14851 
14852       // First, combine the bswap away.  This makes the value produced by the
14853       // load dead.
14854       DCI.CombineTo(N, ResVal);
14855 
14856       // Next, combine the load away, we give it a bogus result value but a real
14857       // chain result.  The result value is dead because the bswap is dead.
14858       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14859 
14860       // Return N so it doesn't get rechecked!
14861       return SDValue(N, 0);
14862     }
14863     break;
14864   case PPCISD::VCMP:
14865     // If a VCMPo node already exists with exactly the same operands as this
14866     // node, use its result instead of this node (VCMPo computes both a CR6 and
14867     // a normal output).
14868     //
14869     if (!N->getOperand(0).hasOneUse() &&
14870         !N->getOperand(1).hasOneUse() &&
14871         !N->getOperand(2).hasOneUse()) {
14872 
14873       // Scan all of the users of the LHS, looking for VCMPo's that match.
14874       SDNode *VCMPoNode = nullptr;
14875 
14876       SDNode *LHSN = N->getOperand(0).getNode();
14877       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14878            UI != E; ++UI)
14879         if (UI->getOpcode() == PPCISD::VCMPo &&
14880             UI->getOperand(1) == N->getOperand(1) &&
14881             UI->getOperand(2) == N->getOperand(2) &&
14882             UI->getOperand(0) == N->getOperand(0)) {
14883           VCMPoNode = *UI;
14884           break;
14885         }
14886 
14887       // If there is no VCMPo node, or if the flag value has a single use, don't
14888       // transform this.
14889       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14890         break;
14891 
14892       // Look at the (necessarily single) use of the flag value.  If it has a
14893       // chain, this transformation is more complex.  Note that multiple things
14894       // could use the value result, which we should ignore.
14895       SDNode *FlagUser = nullptr;
14896       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14897            FlagUser == nullptr; ++UI) {
14898         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14899         SDNode *User = *UI;
14900         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14901           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14902             FlagUser = User;
14903             break;
14904           }
14905         }
14906       }
14907 
14908       // If the user is a MFOCRF instruction, we know this is safe.
14909       // Otherwise we give up for right now.
14910       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14911         return SDValue(VCMPoNode, 0);
14912     }
14913     break;
14914   case ISD::BRCOND: {
14915     SDValue Cond = N->getOperand(1);
14916     SDValue Target = N->getOperand(2);
14917 
14918     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14919         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14920           Intrinsic::loop_decrement) {
14921 
14922       // We now need to make the intrinsic dead (it cannot be instruction
14923       // selected).
14924       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14925       assert(Cond.getNode()->hasOneUse() &&
14926              "Counter decrement has more than one use");
14927 
14928       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14929                          N->getOperand(0), Target);
14930     }
14931   }
14932   break;
14933   case ISD::BR_CC: {
14934     // If this is a branch on an altivec predicate comparison, lower this so
14935     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
14936     // lowering is done pre-legalize, because the legalizer lowers the predicate
14937     // compare down to code that is difficult to reassemble.
14938     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14939     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14940 
14941     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14942     // value. If so, pass-through the AND to get to the intrinsic.
14943     if (LHS.getOpcode() == ISD::AND &&
14944         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14945         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14946           Intrinsic::loop_decrement &&
14947         isa<ConstantSDNode>(LHS.getOperand(1)) &&
14948         !isNullConstant(LHS.getOperand(1)))
14949       LHS = LHS.getOperand(0);
14950 
14951     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14952         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14953           Intrinsic::loop_decrement &&
14954         isa<ConstantSDNode>(RHS)) {
14955       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14956              "Counter decrement comparison is not EQ or NE");
14957 
14958       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14959       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14960                     (CC == ISD::SETNE && !Val);
14961 
14962       // We now need to make the intrinsic dead (it cannot be instruction
14963       // selected).
14964       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14965       assert(LHS.getNode()->hasOneUse() &&
14966              "Counter decrement has more than one use");
14967 
14968       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14969                          N->getOperand(0), N->getOperand(4));
14970     }
14971 
14972     int CompareOpc;
14973     bool isDot;
14974 
14975     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14976         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14977         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14978       assert(isDot && "Can't compare against a vector result!");
14979 
14980       // If this is a comparison against something other than 0/1, then we know
14981       // that the condition is never/always true.
14982       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14983       if (Val != 0 && Val != 1) {
14984         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
14985           return N->getOperand(0);
14986         // Always !=, turn it into an unconditional branch.
14987         return DAG.getNode(ISD::BR, dl, MVT::Other,
14988                            N->getOperand(0), N->getOperand(4));
14989       }
14990 
14991       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14992 
14993       // Create the PPCISD altivec 'dot' comparison node.
14994       SDValue Ops[] = {
14995         LHS.getOperand(2),  // LHS of compare
14996         LHS.getOperand(3),  // RHS of compare
14997         DAG.getConstant(CompareOpc, dl, MVT::i32)
14998       };
14999       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15000       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15001 
15002       // Unpack the result based on how the target uses it.
15003       PPC::Predicate CompOpc;
15004       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15005       default:  // Can't happen, don't crash on invalid number though.
15006       case 0:   // Branch on the value of the EQ bit of CR6.
15007         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15008         break;
15009       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15010         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15011         break;
15012       case 2:   // Branch on the value of the LT bit of CR6.
15013         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15014         break;
15015       case 3:   // Branch on the inverted value of the LT bit of CR6.
15016         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15017         break;
15018       }
15019 
15020       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15021                          DAG.getConstant(CompOpc, dl, MVT::i32),
15022                          DAG.getRegister(PPC::CR6, MVT::i32),
15023                          N->getOperand(4), CompNode.getValue(1));
15024     }
15025     break;
15026   }
15027   case ISD::BUILD_VECTOR:
15028     return DAGCombineBuildVector(N, DCI);
15029   case ISD::ABS:
15030     return combineABS(N, DCI);
15031   case ISD::VSELECT:
15032     return combineVSelect(N, DCI);
15033   }
15034 
15035   return SDValue();
15036 }
15037 
15038 SDValue
15039 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15040                                  SelectionDAG &DAG,
15041                                  SmallVectorImpl<SDNode *> &Created) const {
15042   // fold (sdiv X, pow2)
15043   EVT VT = N->getValueType(0);
15044   if (VT == MVT::i64 && !Subtarget.isPPC64())
15045     return SDValue();
15046   if ((VT != MVT::i32 && VT != MVT::i64) ||
15047       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15048     return SDValue();
15049 
15050   SDLoc DL(N);
15051   SDValue N0 = N->getOperand(0);
15052 
15053   bool IsNegPow2 = (-Divisor).isPowerOf2();
15054   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15055   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15056 
15057   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15058   Created.push_back(Op.getNode());
15059 
15060   if (IsNegPow2) {
15061     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15062     Created.push_back(Op.getNode());
15063   }
15064 
15065   return Op;
15066 }
15067 
15068 //===----------------------------------------------------------------------===//
15069 // Inline Assembly Support
15070 //===----------------------------------------------------------------------===//
15071 
15072 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15073                                                       KnownBits &Known,
15074                                                       const APInt &DemandedElts,
15075                                                       const SelectionDAG &DAG,
15076                                                       unsigned Depth) const {
15077   Known.resetAll();
15078   switch (Op.getOpcode()) {
15079   default: break;
15080   case PPCISD::LBRX: {
15081     // lhbrx is known to have the top bits cleared out.
15082     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15083       Known.Zero = 0xFFFF0000;
15084     break;
15085   }
15086   case ISD::INTRINSIC_WO_CHAIN: {
15087     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15088     default: break;
15089     case Intrinsic::ppc_altivec_vcmpbfp_p:
15090     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15091     case Intrinsic::ppc_altivec_vcmpequb_p:
15092     case Intrinsic::ppc_altivec_vcmpequh_p:
15093     case Intrinsic::ppc_altivec_vcmpequw_p:
15094     case Intrinsic::ppc_altivec_vcmpequd_p:
15095     case Intrinsic::ppc_altivec_vcmpgefp_p:
15096     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15097     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15098     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15099     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15100     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15101     case Intrinsic::ppc_altivec_vcmpgtub_p:
15102     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15103     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15104     case Intrinsic::ppc_altivec_vcmpgtud_p:
15105       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15106       break;
15107     }
15108   }
15109   }
15110 }
15111 
15112 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15113   switch (Subtarget.getCPUDirective()) {
15114   default: break;
15115   case PPC::DIR_970:
15116   case PPC::DIR_PWR4:
15117   case PPC::DIR_PWR5:
15118   case PPC::DIR_PWR5X:
15119   case PPC::DIR_PWR6:
15120   case PPC::DIR_PWR6X:
15121   case PPC::DIR_PWR7:
15122   case PPC::DIR_PWR8:
15123   case PPC::DIR_PWR9:
15124   case PPC::DIR_PWR10:
15125   case PPC::DIR_PWR_FUTURE: {
15126     if (!ML)
15127       break;
15128 
15129     if (!DisableInnermostLoopAlign32) {
15130       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15131       // so that we can decrease cache misses and branch-prediction misses.
15132       // Actual alignment of the loop will depend on the hotness check and other
15133       // logic in alignBlocks.
15134       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15135         return Align(32);
15136     }
15137 
15138     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15139 
15140     // For small loops (between 5 and 8 instructions), align to a 32-byte
15141     // boundary so that the entire loop fits in one instruction-cache line.
15142     uint64_t LoopSize = 0;
15143     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15144       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15145         LoopSize += TII->getInstSizeInBytes(*J);
15146         if (LoopSize > 32)
15147           break;
15148       }
15149 
15150     if (LoopSize > 16 && LoopSize <= 32)
15151       return Align(32);
15152 
15153     break;
15154   }
15155   }
15156 
15157   return TargetLowering::getPrefLoopAlignment(ML);
15158 }
15159 
15160 /// getConstraintType - Given a constraint, return the type of
15161 /// constraint it is for this target.
15162 PPCTargetLowering::ConstraintType
15163 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15164   if (Constraint.size() == 1) {
15165     switch (Constraint[0]) {
15166     default: break;
15167     case 'b':
15168     case 'r':
15169     case 'f':
15170     case 'd':
15171     case 'v':
15172     case 'y':
15173       return C_RegisterClass;
15174     case 'Z':
15175       // FIXME: While Z does indicate a memory constraint, it specifically
15176       // indicates an r+r address (used in conjunction with the 'y' modifier
15177       // in the replacement string). Currently, we're forcing the base
15178       // register to be r0 in the asm printer (which is interpreted as zero)
15179       // and forming the complete address in the second register. This is
15180       // suboptimal.
15181       return C_Memory;
15182     }
15183   } else if (Constraint == "wc") { // individual CR bits.
15184     return C_RegisterClass;
15185   } else if (Constraint == "wa" || Constraint == "wd" ||
15186              Constraint == "wf" || Constraint == "ws" ||
15187              Constraint == "wi" || Constraint == "ww") {
15188     return C_RegisterClass; // VSX registers.
15189   }
15190   return TargetLowering::getConstraintType(Constraint);
15191 }
15192 
15193 /// Examine constraint type and operand type and determine a weight value.
15194 /// This object must already have been set up with the operand type
15195 /// and the current alternative constraint selected.
15196 TargetLowering::ConstraintWeight
15197 PPCTargetLowering::getSingleConstraintMatchWeight(
15198     AsmOperandInfo &info, const char *constraint) const {
15199   ConstraintWeight weight = CW_Invalid;
15200   Value *CallOperandVal = info.CallOperandVal;
15201     // If we don't have a value, we can't do a match,
15202     // but allow it at the lowest weight.
15203   if (!CallOperandVal)
15204     return CW_Default;
15205   Type *type = CallOperandVal->getType();
15206 
15207   // Look at the constraint type.
15208   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15209     return CW_Register; // an individual CR bit.
15210   else if ((StringRef(constraint) == "wa" ||
15211             StringRef(constraint) == "wd" ||
15212             StringRef(constraint) == "wf") &&
15213            type->isVectorTy())
15214     return CW_Register;
15215   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15216     return CW_Register; // just hold 64-bit integers data.
15217   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15218     return CW_Register;
15219   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15220     return CW_Register;
15221 
15222   switch (*constraint) {
15223   default:
15224     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15225     break;
15226   case 'b':
15227     if (type->isIntegerTy())
15228       weight = CW_Register;
15229     break;
15230   case 'f':
15231     if (type->isFloatTy())
15232       weight = CW_Register;
15233     break;
15234   case 'd':
15235     if (type->isDoubleTy())
15236       weight = CW_Register;
15237     break;
15238   case 'v':
15239     if (type->isVectorTy())
15240       weight = CW_Register;
15241     break;
15242   case 'y':
15243     weight = CW_Register;
15244     break;
15245   case 'Z':
15246     weight = CW_Memory;
15247     break;
15248   }
15249   return weight;
15250 }
15251 
15252 std::pair<unsigned, const TargetRegisterClass *>
15253 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15254                                                 StringRef Constraint,
15255                                                 MVT VT) const {
15256   if (Constraint.size() == 1) {
15257     // GCC RS6000 Constraint Letters
15258     switch (Constraint[0]) {
15259     case 'b':   // R1-R31
15260       if (VT == MVT::i64 && Subtarget.isPPC64())
15261         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15262       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15263     case 'r':   // R0-R31
15264       if (VT == MVT::i64 && Subtarget.isPPC64())
15265         return std::make_pair(0U, &PPC::G8RCRegClass);
15266       return std::make_pair(0U, &PPC::GPRCRegClass);
15267     // 'd' and 'f' constraints are both defined to be "the floating point
15268     // registers", where one is for 32-bit and the other for 64-bit. We don't
15269     // really care overly much here so just give them all the same reg classes.
15270     case 'd':
15271     case 'f':
15272       if (Subtarget.hasSPE()) {
15273         if (VT == MVT::f32 || VT == MVT::i32)
15274           return std::make_pair(0U, &PPC::GPRCRegClass);
15275         if (VT == MVT::f64 || VT == MVT::i64)
15276           return std::make_pair(0U, &PPC::SPERCRegClass);
15277       } else {
15278         if (VT == MVT::f32 || VT == MVT::i32)
15279           return std::make_pair(0U, &PPC::F4RCRegClass);
15280         if (VT == MVT::f64 || VT == MVT::i64)
15281           return std::make_pair(0U, &PPC::F8RCRegClass);
15282       }
15283       break;
15284     case 'v':
15285       if (Subtarget.hasAltivec())
15286         return std::make_pair(0U, &PPC::VRRCRegClass);
15287       break;
15288     case 'y':   // crrc
15289       return std::make_pair(0U, &PPC::CRRCRegClass);
15290     }
15291   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15292     // An individual CR bit.
15293     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15294   } else if ((Constraint == "wa" || Constraint == "wd" ||
15295              Constraint == "wf" || Constraint == "wi") &&
15296              Subtarget.hasVSX()) {
15297     return std::make_pair(0U, &PPC::VSRCRegClass);
15298   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15299     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15300       return std::make_pair(0U, &PPC::VSSRCRegClass);
15301     else
15302       return std::make_pair(0U, &PPC::VSFRCRegClass);
15303   }
15304 
15305   // If we name a VSX register, we can't defer to the base class because it
15306   // will not recognize the correct register (their names will be VSL{0-31}
15307   // and V{0-31} so they won't match). So we match them here.
15308   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15309     int VSNum = atoi(Constraint.data() + 3);
15310     assert(VSNum >= 0 && VSNum <= 63 &&
15311            "Attempted to access a vsr out of range");
15312     if (VSNum < 32)
15313       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15314     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15315   }
15316   std::pair<unsigned, const TargetRegisterClass *> R =
15317       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15318 
15319   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15320   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15321   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15322   // register.
15323   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15324   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15325   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15326       PPC::GPRCRegClass.contains(R.first))
15327     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15328                             PPC::sub_32, &PPC::G8RCRegClass),
15329                           &PPC::G8RCRegClass);
15330 
15331   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15332   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15333     R.first = PPC::CR0;
15334     R.second = &PPC::CRRCRegClass;
15335   }
15336 
15337   return R;
15338 }
15339 
15340 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15341 /// vector.  If it is invalid, don't add anything to Ops.
15342 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15343                                                      std::string &Constraint,
15344                                                      std::vector<SDValue>&Ops,
15345                                                      SelectionDAG &DAG) const {
15346   SDValue Result;
15347 
15348   // Only support length 1 constraints.
15349   if (Constraint.length() > 1) return;
15350 
15351   char Letter = Constraint[0];
15352   switch (Letter) {
15353   default: break;
15354   case 'I':
15355   case 'J':
15356   case 'K':
15357   case 'L':
15358   case 'M':
15359   case 'N':
15360   case 'O':
15361   case 'P': {
15362     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15363     if (!CST) return; // Must be an immediate to match.
15364     SDLoc dl(Op);
15365     int64_t Value = CST->getSExtValue();
15366     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15367                          // numbers are printed as such.
15368     switch (Letter) {
15369     default: llvm_unreachable("Unknown constraint letter!");
15370     case 'I':  // "I" is a signed 16-bit constant.
15371       if (isInt<16>(Value))
15372         Result = DAG.getTargetConstant(Value, dl, TCVT);
15373       break;
15374     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15375       if (isShiftedUInt<16, 16>(Value))
15376         Result = DAG.getTargetConstant(Value, dl, TCVT);
15377       break;
15378     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15379       if (isShiftedInt<16, 16>(Value))
15380         Result = DAG.getTargetConstant(Value, dl, TCVT);
15381       break;
15382     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15383       if (isUInt<16>(Value))
15384         Result = DAG.getTargetConstant(Value, dl, TCVT);
15385       break;
15386     case 'M':  // "M" is a constant that is greater than 31.
15387       if (Value > 31)
15388         Result = DAG.getTargetConstant(Value, dl, TCVT);
15389       break;
15390     case 'N':  // "N" is a positive constant that is an exact power of two.
15391       if (Value > 0 && isPowerOf2_64(Value))
15392         Result = DAG.getTargetConstant(Value, dl, TCVT);
15393       break;
15394     case 'O':  // "O" is the constant zero.
15395       if (Value == 0)
15396         Result = DAG.getTargetConstant(Value, dl, TCVT);
15397       break;
15398     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15399       if (isInt<16>(-Value))
15400         Result = DAG.getTargetConstant(Value, dl, TCVT);
15401       break;
15402     }
15403     break;
15404   }
15405   }
15406 
15407   if (Result.getNode()) {
15408     Ops.push_back(Result);
15409     return;
15410   }
15411 
15412   // Handle standard constraint letters.
15413   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15414 }
15415 
15416 // isLegalAddressingMode - Return true if the addressing mode represented
15417 // by AM is legal for this target, for a load/store of the specified type.
15418 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15419                                               const AddrMode &AM, Type *Ty,
15420                                               unsigned AS,
15421                                               Instruction *I) const {
15422   // Vector type r+i form is supported since power9 as DQ form. We don't check
15423   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15424   // imm form is preferred and the offset can be adjusted to use imm form later
15425   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15426   // max offset to check legal addressing mode, we should be a little aggressive
15427   // to contain other offsets for that LSRUse.
15428   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15429     return false;
15430 
15431   // PPC allows a sign-extended 16-bit immediate field.
15432   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15433     return false;
15434 
15435   // No global is ever allowed as a base.
15436   if (AM.BaseGV)
15437     return false;
15438 
15439   // PPC only support r+r,
15440   switch (AM.Scale) {
15441   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15442     break;
15443   case 1:
15444     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15445       return false;
15446     // Otherwise we have r+r or r+i.
15447     break;
15448   case 2:
15449     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15450       return false;
15451     // Allow 2*r as r+r.
15452     break;
15453   default:
15454     // No other scales are supported.
15455     return false;
15456   }
15457 
15458   return true;
15459 }
15460 
15461 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15462                                            SelectionDAG &DAG) const {
15463   MachineFunction &MF = DAG.getMachineFunction();
15464   MachineFrameInfo &MFI = MF.getFrameInfo();
15465   MFI.setReturnAddressIsTaken(true);
15466 
15467   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15468     return SDValue();
15469 
15470   SDLoc dl(Op);
15471   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15472 
15473   // Make sure the function does not optimize away the store of the RA to
15474   // the stack.
15475   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15476   FuncInfo->setLRStoreRequired();
15477   bool isPPC64 = Subtarget.isPPC64();
15478   auto PtrVT = getPointerTy(MF.getDataLayout());
15479 
15480   if (Depth > 0) {
15481     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15482     SDValue Offset =
15483         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15484                         isPPC64 ? MVT::i64 : MVT::i32);
15485     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15486                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15487                        MachinePointerInfo());
15488   }
15489 
15490   // Just load the return address off the stack.
15491   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15492   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15493                      MachinePointerInfo());
15494 }
15495 
15496 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15497                                           SelectionDAG &DAG) const {
15498   SDLoc dl(Op);
15499   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15500 
15501   MachineFunction &MF = DAG.getMachineFunction();
15502   MachineFrameInfo &MFI = MF.getFrameInfo();
15503   MFI.setFrameAddressIsTaken(true);
15504 
15505   EVT PtrVT = getPointerTy(MF.getDataLayout());
15506   bool isPPC64 = PtrVT == MVT::i64;
15507 
15508   // Naked functions never have a frame pointer, and so we use r1. For all
15509   // other functions, this decision must be delayed until during PEI.
15510   unsigned FrameReg;
15511   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15512     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15513   else
15514     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15515 
15516   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15517                                          PtrVT);
15518   while (Depth--)
15519     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15520                             FrameAddr, MachinePointerInfo());
15521   return FrameAddr;
15522 }
15523 
15524 // FIXME? Maybe this could be a TableGen attribute on some registers and
15525 // this table could be generated automatically from RegInfo.
15526 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15527                                               const MachineFunction &MF) const {
15528   bool isPPC64 = Subtarget.isPPC64();
15529 
15530   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15531   if (!is64Bit && VT != LLT::scalar(32))
15532     report_fatal_error("Invalid register global variable type");
15533 
15534   Register Reg = StringSwitch<Register>(RegName)
15535                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15536                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15537                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15538                      .Default(Register());
15539 
15540   if (Reg)
15541     return Reg;
15542   report_fatal_error("Invalid register name global variable");
15543 }
15544 
15545 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15546   // 32-bit SVR4 ABI access everything as got-indirect.
15547   if (Subtarget.is32BitELFABI())
15548     return true;
15549 
15550   // AIX accesses everything indirectly through the TOC, which is similar to
15551   // the GOT.
15552   if (Subtarget.isAIXABI())
15553     return true;
15554 
15555   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15556   // If it is small or large code model, module locals are accessed
15557   // indirectly by loading their address from .toc/.got.
15558   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15559     return true;
15560 
15561   // JumpTable and BlockAddress are accessed as got-indirect.
15562   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15563     return true;
15564 
15565   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15566     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15567 
15568   return false;
15569 }
15570 
15571 bool
15572 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15573   // The PowerPC target isn't yet aware of offsets.
15574   return false;
15575 }
15576 
15577 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15578                                            const CallInst &I,
15579                                            MachineFunction &MF,
15580                                            unsigned Intrinsic) const {
15581   switch (Intrinsic) {
15582   case Intrinsic::ppc_altivec_lvx:
15583   case Intrinsic::ppc_altivec_lvxl:
15584   case Intrinsic::ppc_altivec_lvebx:
15585   case Intrinsic::ppc_altivec_lvehx:
15586   case Intrinsic::ppc_altivec_lvewx:
15587   case Intrinsic::ppc_vsx_lxvd2x:
15588   case Intrinsic::ppc_vsx_lxvw4x: {
15589     EVT VT;
15590     switch (Intrinsic) {
15591     case Intrinsic::ppc_altivec_lvebx:
15592       VT = MVT::i8;
15593       break;
15594     case Intrinsic::ppc_altivec_lvehx:
15595       VT = MVT::i16;
15596       break;
15597     case Intrinsic::ppc_altivec_lvewx:
15598       VT = MVT::i32;
15599       break;
15600     case Intrinsic::ppc_vsx_lxvd2x:
15601       VT = MVT::v2f64;
15602       break;
15603     default:
15604       VT = MVT::v4i32;
15605       break;
15606     }
15607 
15608     Info.opc = ISD::INTRINSIC_W_CHAIN;
15609     Info.memVT = VT;
15610     Info.ptrVal = I.getArgOperand(0);
15611     Info.offset = -VT.getStoreSize()+1;
15612     Info.size = 2*VT.getStoreSize()-1;
15613     Info.align = Align(1);
15614     Info.flags = MachineMemOperand::MOLoad;
15615     return true;
15616   }
15617   case Intrinsic::ppc_altivec_stvx:
15618   case Intrinsic::ppc_altivec_stvxl:
15619   case Intrinsic::ppc_altivec_stvebx:
15620   case Intrinsic::ppc_altivec_stvehx:
15621   case Intrinsic::ppc_altivec_stvewx:
15622   case Intrinsic::ppc_vsx_stxvd2x:
15623   case Intrinsic::ppc_vsx_stxvw4x: {
15624     EVT VT;
15625     switch (Intrinsic) {
15626     case Intrinsic::ppc_altivec_stvebx:
15627       VT = MVT::i8;
15628       break;
15629     case Intrinsic::ppc_altivec_stvehx:
15630       VT = MVT::i16;
15631       break;
15632     case Intrinsic::ppc_altivec_stvewx:
15633       VT = MVT::i32;
15634       break;
15635     case Intrinsic::ppc_vsx_stxvd2x:
15636       VT = MVT::v2f64;
15637       break;
15638     default:
15639       VT = MVT::v4i32;
15640       break;
15641     }
15642 
15643     Info.opc = ISD::INTRINSIC_VOID;
15644     Info.memVT = VT;
15645     Info.ptrVal = I.getArgOperand(1);
15646     Info.offset = -VT.getStoreSize()+1;
15647     Info.size = 2*VT.getStoreSize()-1;
15648     Info.align = Align(1);
15649     Info.flags = MachineMemOperand::MOStore;
15650     return true;
15651   }
15652   default:
15653     break;
15654   }
15655 
15656   return false;
15657 }
15658 
15659 /// It returns EVT::Other if the type should be determined using generic
15660 /// target-independent logic.
15661 EVT PPCTargetLowering::getOptimalMemOpType(
15662     const MemOp &Op, const AttributeList &FuncAttributes) const {
15663   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15664     // We should use Altivec/VSX loads and stores when available. For unaligned
15665     // addresses, unaligned VSX loads are only fast starting with the P8.
15666     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15667         (Op.isAligned(Align(16)) ||
15668          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15669       return MVT::v4i32;
15670   }
15671 
15672   if (Subtarget.isPPC64()) {
15673     return MVT::i64;
15674   }
15675 
15676   return MVT::i32;
15677 }
15678 
15679 /// Returns true if it is beneficial to convert a load of a constant
15680 /// to just the constant itself.
15681 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15682                                                           Type *Ty) const {
15683   assert(Ty->isIntegerTy());
15684 
15685   unsigned BitSize = Ty->getPrimitiveSizeInBits();
15686   return !(BitSize == 0 || BitSize > 64);
15687 }
15688 
15689 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15690   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15691     return false;
15692   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15693   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15694   return NumBits1 == 64 && NumBits2 == 32;
15695 }
15696 
15697 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15698   if (!VT1.isInteger() || !VT2.isInteger())
15699     return false;
15700   unsigned NumBits1 = VT1.getSizeInBits();
15701   unsigned NumBits2 = VT2.getSizeInBits();
15702   return NumBits1 == 64 && NumBits2 == 32;
15703 }
15704 
15705 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15706   // Generally speaking, zexts are not free, but they are free when they can be
15707   // folded with other operations.
15708   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15709     EVT MemVT = LD->getMemoryVT();
15710     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15711          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15712         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15713          LD->getExtensionType() == ISD::ZEXTLOAD))
15714       return true;
15715   }
15716 
15717   // FIXME: Add other cases...
15718   //  - 32-bit shifts with a zext to i64
15719   //  - zext after ctlz, bswap, etc.
15720   //  - zext after and by a constant mask
15721 
15722   return TargetLowering::isZExtFree(Val, VT2);
15723 }
15724 
15725 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15726   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15727          "invalid fpext types");
15728   // Extending to float128 is not free.
15729   if (DestVT == MVT::f128)
15730     return false;
15731   return true;
15732 }
15733 
15734 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15735   return isInt<16>(Imm) || isUInt<16>(Imm);
15736 }
15737 
15738 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15739   return isInt<16>(Imm) || isUInt<16>(Imm);
15740 }
15741 
15742 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15743                                                        unsigned,
15744                                                        unsigned,
15745                                                        MachineMemOperand::Flags,
15746                                                        bool *Fast) const {
15747   if (DisablePPCUnaligned)
15748     return false;
15749 
15750   // PowerPC supports unaligned memory access for simple non-vector types.
15751   // Although accessing unaligned addresses is not as efficient as accessing
15752   // aligned addresses, it is generally more efficient than manual expansion,
15753   // and generally only traps for software emulation when crossing page
15754   // boundaries.
15755 
15756   if (!VT.isSimple())
15757     return false;
15758 
15759   if (VT.isFloatingPoint() && !VT.isVector() &&
15760       !Subtarget.allowsUnalignedFPAccess())
15761     return false;
15762 
15763   if (VT.getSimpleVT().isVector()) {
15764     if (Subtarget.hasVSX()) {
15765       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15766           VT != MVT::v4f32 && VT != MVT::v4i32)
15767         return false;
15768     } else {
15769       return false;
15770     }
15771   }
15772 
15773   if (VT == MVT::ppcf128)
15774     return false;
15775 
15776   if (Fast)
15777     *Fast = true;
15778 
15779   return true;
15780 }
15781 
15782 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15783                                                    EVT VT) const {
15784   return isFMAFasterThanFMulAndFAdd(
15785       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15786 }
15787 
15788 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15789                                                    Type *Ty) const {
15790   switch (Ty->getScalarType()->getTypeID()) {
15791   case Type::FloatTyID:
15792   case Type::DoubleTyID:
15793     return true;
15794   case Type::FP128TyID:
15795     return Subtarget.hasP9Vector();
15796   default:
15797     return false;
15798   }
15799 }
15800 
15801 // FIXME: add more patterns which are not profitable to hoist.
15802 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15803   if (!I->hasOneUse())
15804     return true;
15805 
15806   Instruction *User = I->user_back();
15807   assert(User && "A single use instruction with no uses.");
15808 
15809   switch (I->getOpcode()) {
15810   case Instruction::FMul: {
15811     // Don't break FMA, PowerPC prefers FMA.
15812     if (User->getOpcode() != Instruction::FSub &&
15813         User->getOpcode() != Instruction::FAdd)
15814       return true;
15815 
15816     const TargetOptions &Options = getTargetMachine().Options;
15817     const Function *F = I->getFunction();
15818     const DataLayout &DL = F->getParent()->getDataLayout();
15819     Type *Ty = User->getOperand(0)->getType();
15820 
15821     return !(
15822         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
15823         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
15824         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
15825   }
15826   case Instruction::Load: {
15827     // Don't break "store (load float*)" pattern, this pattern will be combined
15828     // to "store (load int32)" in later InstCombine pass. See function
15829     // combineLoadToOperationType. On PowerPC, loading a float point takes more
15830     // cycles than loading a 32 bit integer.
15831     LoadInst *LI = cast<LoadInst>(I);
15832     // For the loads that combineLoadToOperationType does nothing, like
15833     // ordered load, it should be profitable to hoist them.
15834     // For swifterror load, it can only be used for pointer to pointer type, so
15835     // later type check should get rid of this case.
15836     if (!LI->isUnordered())
15837       return true;
15838 
15839     if (User->getOpcode() != Instruction::Store)
15840       return true;
15841 
15842     if (I->getType()->getTypeID() != Type::FloatTyID)
15843       return true;
15844 
15845     return false;
15846   }
15847   default:
15848     return true;
15849   }
15850   return true;
15851 }
15852 
15853 const MCPhysReg *
15854 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15855   // LR is a callee-save register, but we must treat it as clobbered by any call
15856   // site. Hence we include LR in the scratch registers, which are in turn added
15857   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15858   // to CTR, which is used by any indirect call.
15859   static const MCPhysReg ScratchRegs[] = {
15860     PPC::X12, PPC::LR8, PPC::CTR8, 0
15861   };
15862 
15863   return ScratchRegs;
15864 }
15865 
15866 Register PPCTargetLowering::getExceptionPointerRegister(
15867     const Constant *PersonalityFn) const {
15868   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15869 }
15870 
15871 Register PPCTargetLowering::getExceptionSelectorRegister(
15872     const Constant *PersonalityFn) const {
15873   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15874 }
15875 
15876 bool
15877 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15878                      EVT VT , unsigned DefinedValues) const {
15879   if (VT == MVT::v2i64)
15880     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15881 
15882   if (Subtarget.hasVSX())
15883     return true;
15884 
15885   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15886 }
15887 
15888 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15889   if (DisableILPPref || Subtarget.enableMachineScheduler())
15890     return TargetLowering::getSchedulingPreference(N);
15891 
15892   return Sched::ILP;
15893 }
15894 
15895 // Create a fast isel object.
15896 FastISel *
15897 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15898                                   const TargetLibraryInfo *LibInfo) const {
15899   return PPC::createFastISel(FuncInfo, LibInfo);
15900 }
15901 
15902 // 'Inverted' means the FMA opcode after negating one multiplicand.
15903 // For example, (fma -a b c) = (fnmsub a b c)
15904 static unsigned invertFMAOpcode(unsigned Opc) {
15905   switch (Opc) {
15906   default:
15907     llvm_unreachable("Invalid FMA opcode for PowerPC!");
15908   case ISD::FMA:
15909     return PPCISD::FNMSUB;
15910   case PPCISD::FNMSUB:
15911     return ISD::FMA;
15912   }
15913 }
15914 
15915 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
15916                                                 bool LegalOps, bool OptForSize,
15917                                                 NegatibleCost &Cost,
15918                                                 unsigned Depth) const {
15919   if (Depth > SelectionDAG::MaxRecursionDepth)
15920     return SDValue();
15921 
15922   unsigned Opc = Op.getOpcode();
15923   EVT VT = Op.getValueType();
15924   SDNodeFlags Flags = Op.getNode()->getFlags();
15925 
15926   switch (Opc) {
15927   case PPCISD::FNMSUB:
15928     if (!Op.hasOneUse() || !isTypeLegal(VT))
15929       break;
15930 
15931     const TargetOptions &Options = getTargetMachine().Options;
15932     SDValue N0 = Op.getOperand(0);
15933     SDValue N1 = Op.getOperand(1);
15934     SDValue N2 = Op.getOperand(2);
15935     SDLoc Loc(Op);
15936 
15937     NegatibleCost N2Cost = NegatibleCost::Expensive;
15938     SDValue NegN2 =
15939         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
15940 
15941     if (!NegN2)
15942       return SDValue();
15943 
15944     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
15945     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
15946     // These transformations may change sign of zeroes. For example,
15947     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
15948     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
15949       // Try and choose the cheaper one to negate.
15950       NegatibleCost N0Cost = NegatibleCost::Expensive;
15951       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
15952                                            N0Cost, Depth + 1);
15953 
15954       NegatibleCost N1Cost = NegatibleCost::Expensive;
15955       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
15956                                            N1Cost, Depth + 1);
15957 
15958       if (NegN0 && N0Cost <= N1Cost) {
15959         Cost = std::min(N0Cost, N2Cost);
15960         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
15961       } else if (NegN1) {
15962         Cost = std::min(N1Cost, N2Cost);
15963         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
15964       }
15965     }
15966 
15967     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
15968     if (isOperationLegal(ISD::FMA, VT)) {
15969       Cost = N2Cost;
15970       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
15971     }
15972 
15973     break;
15974   }
15975 
15976   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
15977                                               Cost, Depth);
15978 }
15979 
15980 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15981 bool PPCTargetLowering::useLoadStackGuardNode() const {
15982   if (!Subtarget.isTargetLinux())
15983     return TargetLowering::useLoadStackGuardNode();
15984   return true;
15985 }
15986 
15987 // Override to disable global variable loading on Linux.
15988 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15989   if (!Subtarget.isTargetLinux())
15990     return TargetLowering::insertSSPDeclarations(M);
15991 }
15992 
15993 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15994                                      bool ForCodeSize) const {
15995   if (!VT.isSimple() || !Subtarget.hasVSX())
15996     return false;
15997 
15998   switch(VT.getSimpleVT().SimpleTy) {
15999   default:
16000     // For FP types that are currently not supported by PPC backend, return
16001     // false. Examples: f16, f80.
16002     return false;
16003   case MVT::f32:
16004   case MVT::f64:
16005     if (Subtarget.hasPrefixInstrs()) {
16006       // With prefixed instructions, we can materialize anything that can be
16007       // represented with a 32-bit immediate, not just positive zero.
16008       APFloat APFloatOfImm = Imm;
16009       return convertToNonDenormSingle(APFloatOfImm);
16010     }
16011     LLVM_FALLTHROUGH;
16012   case MVT::ppcf128:
16013     return Imm.isPosZero();
16014   }
16015 }
16016 
16017 // For vector shift operation op, fold
16018 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16019 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16020                                   SelectionDAG &DAG) {
16021   SDValue N0 = N->getOperand(0);
16022   SDValue N1 = N->getOperand(1);
16023   EVT VT = N0.getValueType();
16024   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16025   unsigned Opcode = N->getOpcode();
16026   unsigned TargetOpcode;
16027 
16028   switch (Opcode) {
16029   default:
16030     llvm_unreachable("Unexpected shift operation");
16031   case ISD::SHL:
16032     TargetOpcode = PPCISD::SHL;
16033     break;
16034   case ISD::SRL:
16035     TargetOpcode = PPCISD::SRL;
16036     break;
16037   case ISD::SRA:
16038     TargetOpcode = PPCISD::SRA;
16039     break;
16040   }
16041 
16042   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16043       N1->getOpcode() == ISD::AND)
16044     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16045       if (Mask->getZExtValue() == OpSizeInBits - 1)
16046         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16047 
16048   return SDValue();
16049 }
16050 
16051 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16052   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16053     return Value;
16054 
16055   SDValue N0 = N->getOperand(0);
16056   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16057   if (!Subtarget.isISA3_0() ||
16058       N0.getOpcode() != ISD::SIGN_EXTEND ||
16059       N0.getOperand(0).getValueType() != MVT::i32 ||
16060       CN1 == nullptr || N->getValueType(0) != MVT::i64)
16061     return SDValue();
16062 
16063   // We can't save an operation here if the value is already extended, and
16064   // the existing shift is easier to combine.
16065   SDValue ExtsSrc = N0.getOperand(0);
16066   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16067       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16068     return SDValue();
16069 
16070   SDLoc DL(N0);
16071   SDValue ShiftBy = SDValue(CN1, 0);
16072   // We want the shift amount to be i32 on the extswli, but the shift could
16073   // have an i64.
16074   if (ShiftBy.getValueType() == MVT::i64)
16075     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16076 
16077   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16078                          ShiftBy);
16079 }
16080 
16081 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16082   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16083     return Value;
16084 
16085   return SDValue();
16086 }
16087 
16088 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16089   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16090     return Value;
16091 
16092   return SDValue();
16093 }
16094 
16095 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16096 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16097 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16098 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16099 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16100                                  const PPCSubtarget &Subtarget) {
16101   if (!Subtarget.isPPC64())
16102     return SDValue();
16103 
16104   SDValue LHS = N->getOperand(0);
16105   SDValue RHS = N->getOperand(1);
16106 
16107   auto isZextOfCompareWithConstant = [](SDValue Op) {
16108     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16109         Op.getValueType() != MVT::i64)
16110       return false;
16111 
16112     SDValue Cmp = Op.getOperand(0);
16113     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16114         Cmp.getOperand(0).getValueType() != MVT::i64)
16115       return false;
16116 
16117     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16118       int64_t NegConstant = 0 - Constant->getSExtValue();
16119       // Due to the limitations of the addi instruction,
16120       // -C is required to be [-32768, 32767].
16121       return isInt<16>(NegConstant);
16122     }
16123 
16124     return false;
16125   };
16126 
16127   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16128   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16129 
16130   // If there is a pattern, canonicalize a zext operand to the RHS.
16131   if (LHSHasPattern && !RHSHasPattern)
16132     std::swap(LHS, RHS);
16133   else if (!LHSHasPattern && !RHSHasPattern)
16134     return SDValue();
16135 
16136   SDLoc DL(N);
16137   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16138   SDValue Cmp = RHS.getOperand(0);
16139   SDValue Z = Cmp.getOperand(0);
16140   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16141 
16142   assert(Constant && "Constant Should not be a null pointer.");
16143   int64_t NegConstant = 0 - Constant->getSExtValue();
16144 
16145   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16146   default: break;
16147   case ISD::SETNE: {
16148     //                                 when C == 0
16149     //                             --> addze X, (addic Z, -1).carry
16150     //                            /
16151     // add X, (zext(setne Z, C))--
16152     //                            \    when -32768 <= -C <= 32767 && C != 0
16153     //                             --> addze X, (addic (addi Z, -C), -1).carry
16154     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16155                               DAG.getConstant(NegConstant, DL, MVT::i64));
16156     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16157     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16158                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16159     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16160                        SDValue(Addc.getNode(), 1));
16161     }
16162   case ISD::SETEQ: {
16163     //                                 when C == 0
16164     //                             --> addze X, (subfic Z, 0).carry
16165     //                            /
16166     // add X, (zext(sete  Z, C))--
16167     //                            \    when -32768 <= -C <= 32767 && C != 0
16168     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16169     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16170                               DAG.getConstant(NegConstant, DL, MVT::i64));
16171     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16172     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16173                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16174     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16175                        SDValue(Subc.getNode(), 1));
16176     }
16177   }
16178 
16179   return SDValue();
16180 }
16181 
16182 // Transform
16183 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16184 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16185 // In this case both C1 and C2 must be known constants.
16186 // C1+C2 must fit into a 34 bit signed integer.
16187 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16188                                           const PPCSubtarget &Subtarget) {
16189   if (!Subtarget.isUsingPCRelativeCalls())
16190     return SDValue();
16191 
16192   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16193   // If we find that node try to cast the Global Address and the Constant.
16194   SDValue LHS = N->getOperand(0);
16195   SDValue RHS = N->getOperand(1);
16196 
16197   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16198     std::swap(LHS, RHS);
16199 
16200   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16201     return SDValue();
16202 
16203   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16204   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16205   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16206 
16207   // Check that both casts succeeded.
16208   if (!GSDN || !ConstNode)
16209     return SDValue();
16210 
16211   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16212   SDLoc DL(GSDN);
16213 
16214   // The signed int offset needs to fit in 34 bits.
16215   if (!isInt<34>(NewOffset))
16216     return SDValue();
16217 
16218   // The new global address is a copy of the old global address except
16219   // that it has the updated Offset.
16220   SDValue GA =
16221       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16222                                  NewOffset, GSDN->getTargetFlags());
16223   SDValue MatPCRel =
16224       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16225   return MatPCRel;
16226 }
16227 
16228 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16229   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16230     return Value;
16231 
16232   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16233     return Value;
16234 
16235   return SDValue();
16236 }
16237 
16238 // Detect TRUNCATE operations on bitcasts of float128 values.
16239 // What we are looking for here is the situtation where we extract a subset
16240 // of bits from a 128 bit float.
16241 // This can be of two forms:
16242 // 1) BITCAST of f128 feeding TRUNCATE
16243 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16244 // The reason this is required is because we do not have a legal i128 type
16245 // and so we want to prevent having to store the f128 and then reload part
16246 // of it.
16247 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16248                                            DAGCombinerInfo &DCI) const {
16249   // If we are using CRBits then try that first.
16250   if (Subtarget.useCRBits()) {
16251     // Check if CRBits did anything and return that if it did.
16252     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16253       return CRTruncValue;
16254   }
16255 
16256   SDLoc dl(N);
16257   SDValue Op0 = N->getOperand(0);
16258 
16259   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16260   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16261     EVT VT = N->getValueType(0);
16262     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16263       return SDValue();
16264     SDValue Sub = Op0.getOperand(0);
16265     if (Sub.getOpcode() == ISD::SUB) {
16266       SDValue SubOp0 = Sub.getOperand(0);
16267       SDValue SubOp1 = Sub.getOperand(1);
16268       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16269           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16270         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16271                                SubOp1.getOperand(0),
16272                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16273       }
16274     }
16275   }
16276 
16277   // Looking for a truncate of i128 to i64.
16278   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16279     return SDValue();
16280 
16281   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16282 
16283   // SRL feeding TRUNCATE.
16284   if (Op0.getOpcode() == ISD::SRL) {
16285     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16286     // The right shift has to be by 64 bits.
16287     if (!ConstNode || ConstNode->getZExtValue() != 64)
16288       return SDValue();
16289 
16290     // Switch the element number to extract.
16291     EltToExtract = EltToExtract ? 0 : 1;
16292     // Update Op0 past the SRL.
16293     Op0 = Op0.getOperand(0);
16294   }
16295 
16296   // BITCAST feeding a TRUNCATE possibly via SRL.
16297   if (Op0.getOpcode() == ISD::BITCAST &&
16298       Op0.getValueType() == MVT::i128 &&
16299       Op0.getOperand(0).getValueType() == MVT::f128) {
16300     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16301     return DCI.DAG.getNode(
16302         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16303         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16304   }
16305   return SDValue();
16306 }
16307 
16308 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16309   SelectionDAG &DAG = DCI.DAG;
16310 
16311   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16312   if (!ConstOpOrElement)
16313     return SDValue();
16314 
16315   // An imul is usually smaller than the alternative sequence for legal type.
16316   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16317       isOperationLegal(ISD::MUL, N->getValueType(0)))
16318     return SDValue();
16319 
16320   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16321     switch (this->Subtarget.getCPUDirective()) {
16322     default:
16323       // TODO: enhance the condition for subtarget before pwr8
16324       return false;
16325     case PPC::DIR_PWR8:
16326       //  type        mul     add    shl
16327       // scalar        4       1      1
16328       // vector        7       2      2
16329       return true;
16330     case PPC::DIR_PWR9:
16331     case PPC::DIR_PWR10:
16332     case PPC::DIR_PWR_FUTURE:
16333       //  type        mul     add    shl
16334       // scalar        5       2      2
16335       // vector        7       2      2
16336 
16337       // The cycle RATIO of related operations are showed as a table above.
16338       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16339       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16340       // are 4, it is always profitable; but for 3 instrs patterns
16341       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16342       // So we should only do it for vector type.
16343       return IsAddOne && IsNeg ? VT.isVector() : true;
16344     }
16345   };
16346 
16347   EVT VT = N->getValueType(0);
16348   SDLoc DL(N);
16349 
16350   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16351   bool IsNeg = MulAmt.isNegative();
16352   APInt MulAmtAbs = MulAmt.abs();
16353 
16354   if ((MulAmtAbs - 1).isPowerOf2()) {
16355     // (mul x, 2^N + 1) => (add (shl x, N), x)
16356     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16357 
16358     if (!IsProfitable(IsNeg, true, VT))
16359       return SDValue();
16360 
16361     SDValue Op0 = N->getOperand(0);
16362     SDValue Op1 =
16363         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16364                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16365     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16366 
16367     if (!IsNeg)
16368       return Res;
16369 
16370     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16371   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16372     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16373     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16374 
16375     if (!IsProfitable(IsNeg, false, VT))
16376       return SDValue();
16377 
16378     SDValue Op0 = N->getOperand(0);
16379     SDValue Op1 =
16380         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16381                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16382 
16383     if (!IsNeg)
16384       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16385     else
16386       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16387 
16388   } else {
16389     return SDValue();
16390   }
16391 }
16392 
16393 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16394 // in combiner since we need to check SD flags and other subtarget features.
16395 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16396                                           DAGCombinerInfo &DCI) const {
16397   SDValue N0 = N->getOperand(0);
16398   SDValue N1 = N->getOperand(1);
16399   SDValue N2 = N->getOperand(2);
16400   SDNodeFlags Flags = N->getFlags();
16401   EVT VT = N->getValueType(0);
16402   SelectionDAG &DAG = DCI.DAG;
16403   const TargetOptions &Options = getTargetMachine().Options;
16404   unsigned Opc = N->getOpcode();
16405   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16406   bool LegalOps = !DCI.isBeforeLegalizeOps();
16407   SDLoc Loc(N);
16408 
16409   if (!isOperationLegal(ISD::FMA, VT))
16410     return SDValue();
16411 
16412   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16413   // since (fnmsub a b c)=-0 while c-ab=+0.
16414   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16415     return SDValue();
16416 
16417   // (fma (fneg a) b c) => (fnmsub a b c)
16418   // (fnmsub (fneg a) b c) => (fma a b c)
16419   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16420     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16421 
16422   // (fma a (fneg b) c) => (fnmsub a b c)
16423   // (fnmsub a (fneg b) c) => (fma a b c)
16424   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16425     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16426 
16427   return SDValue();
16428 }
16429 
16430 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16431   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16432   if (!Subtarget.is64BitELFABI())
16433     return false;
16434 
16435   // If not a tail call then no need to proceed.
16436   if (!CI->isTailCall())
16437     return false;
16438 
16439   // If sibling calls have been disabled and tail-calls aren't guaranteed
16440   // there is no reason to duplicate.
16441   auto &TM = getTargetMachine();
16442   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16443     return false;
16444 
16445   // Can't tail call a function called indirectly, or if it has variadic args.
16446   const Function *Callee = CI->getCalledFunction();
16447   if (!Callee || Callee->isVarArg())
16448     return false;
16449 
16450   // Make sure the callee and caller calling conventions are eligible for tco.
16451   const Function *Caller = CI->getParent()->getParent();
16452   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16453                                            CI->getCallingConv()))
16454       return false;
16455 
16456   // If the function is local then we have a good chance at tail-calling it
16457   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16458 }
16459 
16460 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16461   if (!Subtarget.hasVSX())
16462     return false;
16463   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16464     return true;
16465   return VT == MVT::f32 || VT == MVT::f64 ||
16466     VT == MVT::v4f32 || VT == MVT::v2f64;
16467 }
16468 
16469 bool PPCTargetLowering::
16470 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16471   const Value *Mask = AndI.getOperand(1);
16472   // If the mask is suitable for andi. or andis. we should sink the and.
16473   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16474     // Can't handle constants wider than 64-bits.
16475     if (CI->getBitWidth() > 64)
16476       return false;
16477     int64_t ConstVal = CI->getZExtValue();
16478     return isUInt<16>(ConstVal) ||
16479       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16480   }
16481 
16482   // For non-constant masks, we can always use the record-form and.
16483   return true;
16484 }
16485 
16486 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16487 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16488 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16489 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16490 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16491 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16492   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16493   assert(Subtarget.hasP9Altivec() &&
16494          "Only combine this when P9 altivec supported!");
16495   EVT VT = N->getValueType(0);
16496   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16497     return SDValue();
16498 
16499   SelectionDAG &DAG = DCI.DAG;
16500   SDLoc dl(N);
16501   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16502     // Even for signed integers, if it's known to be positive (as signed
16503     // integer) due to zero-extended inputs.
16504     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16505     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16506     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16507          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16508         (SubOpcd1 == ISD::ZERO_EXTEND ||
16509          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16510       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16511                          N->getOperand(0)->getOperand(0),
16512                          N->getOperand(0)->getOperand(1),
16513                          DAG.getTargetConstant(0, dl, MVT::i32));
16514     }
16515 
16516     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16517     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16518         N->getOperand(0).hasOneUse()) {
16519       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16520                          N->getOperand(0)->getOperand(0),
16521                          N->getOperand(0)->getOperand(1),
16522                          DAG.getTargetConstant(1, dl, MVT::i32));
16523     }
16524   }
16525 
16526   return SDValue();
16527 }
16528 
16529 // For type v4i32/v8ii16/v16i8, transform
16530 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16531 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16532 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16533 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16534 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16535                                           DAGCombinerInfo &DCI) const {
16536   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16537   assert(Subtarget.hasP9Altivec() &&
16538          "Only combine this when P9 altivec supported!");
16539 
16540   SelectionDAG &DAG = DCI.DAG;
16541   SDLoc dl(N);
16542   SDValue Cond = N->getOperand(0);
16543   SDValue TrueOpnd = N->getOperand(1);
16544   SDValue FalseOpnd = N->getOperand(2);
16545   EVT VT = N->getOperand(1).getValueType();
16546 
16547   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16548       FalseOpnd.getOpcode() != ISD::SUB)
16549     return SDValue();
16550 
16551   // ABSD only available for type v4i32/v8i16/v16i8
16552   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16553     return SDValue();
16554 
16555   // At least to save one more dependent computation
16556   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16557     return SDValue();
16558 
16559   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16560 
16561   // Can only handle unsigned comparison here
16562   switch (CC) {
16563   default:
16564     return SDValue();
16565   case ISD::SETUGT:
16566   case ISD::SETUGE:
16567     break;
16568   case ISD::SETULT:
16569   case ISD::SETULE:
16570     std::swap(TrueOpnd, FalseOpnd);
16571     break;
16572   }
16573 
16574   SDValue CmpOpnd1 = Cond.getOperand(0);
16575   SDValue CmpOpnd2 = Cond.getOperand(1);
16576 
16577   // SETCC CmpOpnd1 CmpOpnd2 cond
16578   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16579   // FalseOpnd = CmpOpnd2 - CmpOpnd1
16580   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16581       TrueOpnd.getOperand(1) == CmpOpnd2 &&
16582       FalseOpnd.getOperand(0) == CmpOpnd2 &&
16583       FalseOpnd.getOperand(1) == CmpOpnd1) {
16584     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16585                        CmpOpnd1, CmpOpnd2,
16586                        DAG.getTargetConstant(0, dl, MVT::i32));
16587   }
16588 
16589   return SDValue();
16590 }
16591