1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 // TODO - Remove this option if soft fp128 has been fully supported .
125 static cl::opt<bool>
126     EnableSoftFP128("enable-soft-fp128",
127                     cl::desc("temp option to enable soft fp128"), cl::Hidden);
128 
129 STATISTIC(NumTailCalls, "Number of tail calls");
130 STATISTIC(NumSiblingCalls, "Number of sibling calls");
131 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
132 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
133 
134 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135 
136 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137 
138 // FIXME: Remove this once the bug has been fixed!
139 extern cl::opt<bool> ANDIGlueBug;
140 
141 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142                                      const PPCSubtarget &STI)
143     : TargetLowering(TM), Subtarget(STI) {
144   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145   // arguments are at least 4/8 bytes aligned.
146   bool isPPC64 = Subtarget.isPPC64();
147   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148 
149   // Set up the register classes.
150   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151   if (!useSoftFloat()) {
152     if (hasSPE()) {
153       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
155     } else {
156       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
157       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
158     }
159   }
160 
161   // Match BITREVERSE to customized fast code sequence in the td file.
162   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
163   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
164 
165   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
166   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
167 
168   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
169   for (MVT VT : MVT::integer_valuetypes()) {
170     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
171     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
172   }
173 
174   if (Subtarget.isISA3_0()) {
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
176     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
177     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
178     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
179   } else {
180     // No extending loads from f16 or HW conversions back and forth.
181     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
182     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
183     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
184     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
185     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
186     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
187     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
188     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
189   }
190 
191   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 
193   // PowerPC has pre-inc load and store's.
194   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
195   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
196   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
197   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
198   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
199   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
204   if (!Subtarget.hasSPE()) {
205     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
206     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
207     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
208     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
209   }
210 
211   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
212   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
213   for (MVT VT : ScalarIntVTs) {
214     setOperationAction(ISD::ADDC, VT, Legal);
215     setOperationAction(ISD::ADDE, VT, Legal);
216     setOperationAction(ISD::SUBC, VT, Legal);
217     setOperationAction(ISD::SUBE, VT, Legal);
218   }
219 
220   if (Subtarget.useCRBits()) {
221     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
222 
223     if (isPPC64 || Subtarget.hasFPCVT()) {
224       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
225       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
226                         isPPC64 ? MVT::i64 : MVT::i32);
227       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
228       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
229                         isPPC64 ? MVT::i64 : MVT::i32);
230 
231       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
232       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
233                          isPPC64 ? MVT::i64 : MVT::i32);
234       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
235       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
236                         isPPC64 ? MVT::i64 : MVT::i32);
237 
238       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
239       AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
240                         isPPC64 ? MVT::i64 : MVT::i32);
241       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
242       AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
243                         isPPC64 ? MVT::i64 : MVT::i32);
244 
245       setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
246       AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
247                         isPPC64 ? MVT::i64 : MVT::i32);
248       setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
249       AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
250                         isPPC64 ? MVT::i64 : MVT::i32);
251     } else {
252       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
253       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
254       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
255       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
256     }
257 
258     // PowerPC does not support direct load/store of condition registers.
259     setOperationAction(ISD::LOAD, MVT::i1, Custom);
260     setOperationAction(ISD::STORE, MVT::i1, Custom);
261 
262     // FIXME: Remove this once the ANDI glue bug is fixed:
263     if (ANDIGlueBug)
264       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
265 
266     for (MVT VT : MVT::integer_valuetypes()) {
267       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
268       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
269       setTruncStoreAction(VT, MVT::i1, Expand);
270     }
271 
272     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
273   }
274 
275   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
276   // PPC (the libcall is not available).
277   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
278   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
279   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
280   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
281 
282   // We do not currently implement these libm ops for PowerPC.
283   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
284   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
285   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
286   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
287   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
288   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
289 
290   // PowerPC has no SREM/UREM instructions unless we are on P9
291   // On P9 we may use a hardware instruction to compute the remainder.
292   // When the result of both the remainder and the division is required it is
293   // more efficient to compute the remainder from the result of the division
294   // rather than use the remainder instruction. The instructions are legalized
295   // directly because the DivRemPairsPass performs the transformation at the IR
296   // level.
297   if (Subtarget.isISA3_0()) {
298     setOperationAction(ISD::SREM, MVT::i32, Legal);
299     setOperationAction(ISD::UREM, MVT::i32, Legal);
300     setOperationAction(ISD::SREM, MVT::i64, Legal);
301     setOperationAction(ISD::UREM, MVT::i64, Legal);
302   } else {
303     setOperationAction(ISD::SREM, MVT::i32, Expand);
304     setOperationAction(ISD::UREM, MVT::i32, Expand);
305     setOperationAction(ISD::SREM, MVT::i64, Expand);
306     setOperationAction(ISD::UREM, MVT::i64, Expand);
307   }
308 
309   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
310   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
311   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
312   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
313   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
314   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
315   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
316   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
317   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
318 
319   // Handle constrained floating-point operations of scalar.
320   // TODO: Handle SPE specific operation.
321   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
322   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
323   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
324   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
325   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
326   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
327 
328   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
329   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
330   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
331   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
332   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
333   if (Subtarget.hasVSX()) {
334     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
335     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
336   }
337 
338   if (Subtarget.hasFSQRT()) {
339     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
340     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
341   }
342 
343   if (Subtarget.hasFPRND()) {
344     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
345     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
346     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
347     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
348 
349     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
350     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
351     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
352     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
353   }
354 
355   // We don't support sin/cos/sqrt/fmod/pow
356   setOperationAction(ISD::FSIN , MVT::f64, Expand);
357   setOperationAction(ISD::FCOS , MVT::f64, Expand);
358   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
359   setOperationAction(ISD::FREM , MVT::f64, Expand);
360   setOperationAction(ISD::FPOW , MVT::f64, Expand);
361   setOperationAction(ISD::FSIN , MVT::f32, Expand);
362   setOperationAction(ISD::FCOS , MVT::f32, Expand);
363   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
364   setOperationAction(ISD::FREM , MVT::f32, Expand);
365   setOperationAction(ISD::FPOW , MVT::f32, Expand);
366   if (Subtarget.hasSPE()) {
367     setOperationAction(ISD::FMA  , MVT::f64, Expand);
368     setOperationAction(ISD::FMA  , MVT::f32, Expand);
369   } else {
370     setOperationAction(ISD::FMA  , MVT::f64, Legal);
371     setOperationAction(ISD::FMA  , MVT::f32, Legal);
372   }
373 
374   if (Subtarget.hasSPE())
375     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
376 
377   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
378 
379   // If we're enabling GP optimizations, use hardware square root
380   if (!Subtarget.hasFSQRT() &&
381       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
382         Subtarget.hasFRE()))
383     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
384 
385   if (!Subtarget.hasFSQRT() &&
386       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
387         Subtarget.hasFRES()))
388     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
389 
390   if (Subtarget.hasFCPSGN()) {
391     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
392     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
393   } else {
394     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
395     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
396   }
397 
398   if (Subtarget.hasFPRND()) {
399     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
400     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
401     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
402     setOperationAction(ISD::FROUND, MVT::f64, Legal);
403 
404     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
405     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
406     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
407     setOperationAction(ISD::FROUND, MVT::f32, Legal);
408   }
409 
410   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
411   // to speed up scalar BSWAP64.
412   // CTPOP or CTTZ were introduced in P8/P9 respectively
413   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
414   if (Subtarget.hasP9Vector())
415     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
416   else
417     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
418   if (Subtarget.isISA3_0()) {
419     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
420     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
421   } else {
422     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
423     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
424   }
425 
426   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
427     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
428     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
429   } else {
430     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
431     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
432   }
433 
434   // PowerPC does not have ROTR
435   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
436   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
437 
438   if (!Subtarget.useCRBits()) {
439     // PowerPC does not have Select
440     setOperationAction(ISD::SELECT, MVT::i32, Expand);
441     setOperationAction(ISD::SELECT, MVT::i64, Expand);
442     setOperationAction(ISD::SELECT, MVT::f32, Expand);
443     setOperationAction(ISD::SELECT, MVT::f64, Expand);
444   }
445 
446   // PowerPC wants to turn select_cc of FP into fsel when possible.
447   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
448   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
449 
450   // PowerPC wants to optimize integer setcc a bit
451   if (!Subtarget.useCRBits())
452     setOperationAction(ISD::SETCC, MVT::i32, Custom);
453 
454   if (Subtarget.hasFPU()) {
455     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
456     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
457     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
458 
459     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
460     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
461     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
462   }
463 
464   // PowerPC does not have BRCOND which requires SetCC
465   if (!Subtarget.useCRBits())
466     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
467 
468   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
469 
470   if (Subtarget.hasSPE()) {
471     // SPE has built-in conversions
472     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
473     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
474     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
475     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
476     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
477     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
478   } else {
479     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
480     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
481     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
482 
483     // PowerPC does not have [U|S]INT_TO_FP
484     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
485     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
486     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
487     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
488   }
489 
490   if (Subtarget.hasDirectMove() && isPPC64) {
491     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
492     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
493     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
494     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
495     if (TM.Options.UnsafeFPMath) {
496       setOperationAction(ISD::LRINT, MVT::f64, Legal);
497       setOperationAction(ISD::LRINT, MVT::f32, Legal);
498       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
499       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
500       setOperationAction(ISD::LROUND, MVT::f64, Legal);
501       setOperationAction(ISD::LROUND, MVT::f32, Legal);
502       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
503       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
504     }
505   } else {
506     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
507     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
508     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
509     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
510   }
511 
512   // We cannot sextinreg(i1).  Expand to shifts.
513   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
514 
515   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
516   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
517   // support continuation, user-level threading, and etc.. As a result, no
518   // other SjLj exception interfaces are implemented and please don't build
519   // your own exception handling based on them.
520   // LLVM/Clang supports zero-cost DWARF exception handling.
521   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
522   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
523 
524   // We want to legalize GlobalAddress and ConstantPool nodes into the
525   // appropriate instructions to materialize the address.
526   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
527   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
528   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
529   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
530   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
531   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
532   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
533   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
534   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
535   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
536 
537   // TRAP is legal.
538   setOperationAction(ISD::TRAP, MVT::Other, Legal);
539 
540   // TRAMPOLINE is custom lowered.
541   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
542   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
543 
544   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
545   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
546 
547   if (Subtarget.is64BitELFABI()) {
548     // VAARG always uses double-word chunks, so promote anything smaller.
549     setOperationAction(ISD::VAARG, MVT::i1, Promote);
550     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
551     setOperationAction(ISD::VAARG, MVT::i8, Promote);
552     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
553     setOperationAction(ISD::VAARG, MVT::i16, Promote);
554     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
555     setOperationAction(ISD::VAARG, MVT::i32, Promote);
556     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
557     setOperationAction(ISD::VAARG, MVT::Other, Expand);
558   } else if (Subtarget.is32BitELFABI()) {
559     // VAARG is custom lowered with the 32-bit SVR4 ABI.
560     setOperationAction(ISD::VAARG, MVT::Other, Custom);
561     setOperationAction(ISD::VAARG, MVT::i64, Custom);
562   } else
563     setOperationAction(ISD::VAARG, MVT::Other, Expand);
564 
565   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
566   if (Subtarget.is32BitELFABI())
567     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
568   else
569     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
570 
571   // Use the default implementation.
572   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
573   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
574   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
575   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
576   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
577   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
578   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
579   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
580   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
581 
582   // We want to custom lower some of our intrinsics.
583   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
584 
585   // To handle counter-based loop conditions.
586   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
587 
588   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
589   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
590   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
591   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
592 
593   // Comparisons that require checking two conditions.
594   if (Subtarget.hasSPE()) {
595     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
596     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
597     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
598     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
599   }
600   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
601   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
602   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
603   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
604   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
605   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
606   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
607   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
608   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
609   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
610   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
611   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
612 
613   if (Subtarget.has64BitSupport()) {
614     // They also have instructions for converting between i64 and fp.
615     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
616     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
617     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
618     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
619     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
620     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
621     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
622     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
623     // This is just the low 32 bits of a (signed) fp->i64 conversion.
624     // We cannot do this with Promote because i64 is not a legal type.
625     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
626     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
627 
628     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
629       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
630       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
631     }
632   } else {
633     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
634     if (Subtarget.hasSPE()) {
635       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
636       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
637     } else {
638       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
639       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
640     }
641   }
642 
643   // With the instructions enabled under FPCVT, we can do everything.
644   if (Subtarget.hasFPCVT()) {
645     if (Subtarget.has64BitSupport()) {
646       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
647       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
648       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
649       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
650       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
651       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
652       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
653       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
654     }
655 
656     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
657     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
658     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
659     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
660     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
661     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
662     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
663     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
664   }
665 
666   if (Subtarget.use64BitRegs()) {
667     // 64-bit PowerPC implementations can support i64 types directly
668     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
669     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
670     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
671     // 64-bit PowerPC wants to expand i128 shifts itself.
672     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
673     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
674     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
675   } else {
676     // 32-bit PowerPC wants to expand i64 shifts itself.
677     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
678     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
679     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
680   }
681 
682   // PowerPC has better expansions for funnel shifts than the generic
683   // TargetLowering::expandFunnelShift.
684   if (Subtarget.has64BitSupport()) {
685     setOperationAction(ISD::FSHL, MVT::i64, Custom);
686     setOperationAction(ISD::FSHR, MVT::i64, Custom);
687   }
688   setOperationAction(ISD::FSHL, MVT::i32, Custom);
689   setOperationAction(ISD::FSHR, MVT::i32, Custom);
690 
691   if (Subtarget.hasVSX()) {
692     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
693     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
694     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
695     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
696   }
697 
698   if (Subtarget.hasAltivec()) {
699     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
700       setOperationAction(ISD::SADDSAT, VT, Legal);
701       setOperationAction(ISD::SSUBSAT, VT, Legal);
702       setOperationAction(ISD::UADDSAT, VT, Legal);
703       setOperationAction(ISD::USUBSAT, VT, Legal);
704     }
705     // First set operation action for all vector types to expand. Then we
706     // will selectively turn on ones that can be effectively codegen'd.
707     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
708       // add/sub are legal for all supported vector VT's.
709       setOperationAction(ISD::ADD, VT, Legal);
710       setOperationAction(ISD::SUB, VT, Legal);
711 
712       // For v2i64, these are only valid with P8Vector. This is corrected after
713       // the loop.
714       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
715         setOperationAction(ISD::SMAX, VT, Legal);
716         setOperationAction(ISD::SMIN, VT, Legal);
717         setOperationAction(ISD::UMAX, VT, Legal);
718         setOperationAction(ISD::UMIN, VT, Legal);
719       }
720       else {
721         setOperationAction(ISD::SMAX, VT, Expand);
722         setOperationAction(ISD::SMIN, VT, Expand);
723         setOperationAction(ISD::UMAX, VT, Expand);
724         setOperationAction(ISD::UMIN, VT, Expand);
725       }
726 
727       if (Subtarget.hasVSX()) {
728         setOperationAction(ISD::FMAXNUM, VT, Legal);
729         setOperationAction(ISD::FMINNUM, VT, Legal);
730       }
731 
732       // Vector instructions introduced in P8
733       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
734         setOperationAction(ISD::CTPOP, VT, Legal);
735         setOperationAction(ISD::CTLZ, VT, Legal);
736       }
737       else {
738         setOperationAction(ISD::CTPOP, VT, Expand);
739         setOperationAction(ISD::CTLZ, VT, Expand);
740       }
741 
742       // Vector instructions introduced in P9
743       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
744         setOperationAction(ISD::CTTZ, VT, Legal);
745       else
746         setOperationAction(ISD::CTTZ, VT, Expand);
747 
748       // We promote all shuffles to v16i8.
749       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
750       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
751 
752       // We promote all non-typed operations to v4i32.
753       setOperationAction(ISD::AND   , VT, Promote);
754       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
755       setOperationAction(ISD::OR    , VT, Promote);
756       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
757       setOperationAction(ISD::XOR   , VT, Promote);
758       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
759       setOperationAction(ISD::LOAD  , VT, Promote);
760       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
761       setOperationAction(ISD::SELECT, VT, Promote);
762       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
763       setOperationAction(ISD::VSELECT, VT, Legal);
764       setOperationAction(ISD::SELECT_CC, VT, Promote);
765       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
766       setOperationAction(ISD::STORE, VT, Promote);
767       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
768 
769       // No other operations are legal.
770       setOperationAction(ISD::MUL , VT, Expand);
771       setOperationAction(ISD::SDIV, VT, Expand);
772       setOperationAction(ISD::SREM, VT, Expand);
773       setOperationAction(ISD::UDIV, VT, Expand);
774       setOperationAction(ISD::UREM, VT, Expand);
775       setOperationAction(ISD::FDIV, VT, Expand);
776       setOperationAction(ISD::FREM, VT, Expand);
777       setOperationAction(ISD::FNEG, VT, Expand);
778       setOperationAction(ISD::FSQRT, VT, Expand);
779       setOperationAction(ISD::FLOG, VT, Expand);
780       setOperationAction(ISD::FLOG10, VT, Expand);
781       setOperationAction(ISD::FLOG2, VT, Expand);
782       setOperationAction(ISD::FEXP, VT, Expand);
783       setOperationAction(ISD::FEXP2, VT, Expand);
784       setOperationAction(ISD::FSIN, VT, Expand);
785       setOperationAction(ISD::FCOS, VT, Expand);
786       setOperationAction(ISD::FABS, VT, Expand);
787       setOperationAction(ISD::FFLOOR, VT, Expand);
788       setOperationAction(ISD::FCEIL,  VT, Expand);
789       setOperationAction(ISD::FTRUNC, VT, Expand);
790       setOperationAction(ISD::FRINT,  VT, Expand);
791       setOperationAction(ISD::FNEARBYINT, VT, Expand);
792       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
793       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
794       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
795       setOperationAction(ISD::MULHU, VT, Expand);
796       setOperationAction(ISD::MULHS, VT, Expand);
797       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
798       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
799       setOperationAction(ISD::UDIVREM, VT, Expand);
800       setOperationAction(ISD::SDIVREM, VT, Expand);
801       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
802       setOperationAction(ISD::FPOW, VT, Expand);
803       setOperationAction(ISD::BSWAP, VT, Expand);
804       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
805       setOperationAction(ISD::ROTL, VT, Expand);
806       setOperationAction(ISD::ROTR, VT, Expand);
807 
808       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
809         setTruncStoreAction(VT, InnerVT, Expand);
810         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
811         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
812         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
813       }
814     }
815     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
816     if (!Subtarget.hasP8Vector()) {
817       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
818       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
819       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
820       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
821     }
822 
823     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
824     // with merges, splats, etc.
825     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
826 
827     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
828     // are cheap, so handle them before they get expanded to scalar.
829     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
830     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
831     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
832     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
833     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
834 
835     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
836     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
837     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
838     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
839     setOperationAction(ISD::SELECT, MVT::v4i32,
840                        Subtarget.useCRBits() ? Legal : Expand);
841     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
842     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
843     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
844     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
845     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
846     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
847     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
848     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
849     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
850     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
851     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
852     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
853     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
854 
855     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
856     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
857     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
858     if (Subtarget.hasAltivec())
859       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
860         setOperationAction(ISD::ROTL, VT, Legal);
861     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
862     if (Subtarget.hasP8Altivec())
863       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
864 
865     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
866     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
867     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
868     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
869 
870     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
871     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
872 
873     if (Subtarget.hasVSX()) {
874       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
875       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
876     }
877 
878     if (Subtarget.hasP8Altivec())
879       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
880     else
881       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
882 
883     if (Subtarget.isISA3_1()) {
884       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
885       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
886       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
887       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
888       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
889       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
890       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
891       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
892       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
893       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
894       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
895       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
896       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
897       setOperationAction(ISD::UREM, MVT::v1i128, Legal);
898       setOperationAction(ISD::SREM, MVT::v1i128, Legal);
899       setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
900       setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
901       setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
902     }
903 
904     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
905     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
906 
907     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
908     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
909 
910     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
911     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
912     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
913     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
914 
915     // Altivec does not contain unordered floating-point compare instructions
916     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
917     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
918     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
919     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
920 
921     if (Subtarget.hasVSX()) {
922       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
923       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
924       if (Subtarget.hasP8Vector()) {
925         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
926         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
927       }
928       if (Subtarget.hasDirectMove() && isPPC64) {
929         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
930         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
931         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
932         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
933         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
934         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
935         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
936         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
937       }
938       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
939 
940       // The nearbyint variants are not allowed to raise the inexact exception
941       // so we can only code-gen them with unsafe math.
942       if (TM.Options.UnsafeFPMath) {
943         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
944         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
945       }
946 
947       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
948       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
949       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
950       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
951       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
952       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
953       setOperationAction(ISD::FROUND, MVT::f64, Legal);
954       setOperationAction(ISD::FRINT, MVT::f64, Legal);
955 
956       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
957       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
958       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
959       setOperationAction(ISD::FROUND, MVT::f32, Legal);
960       setOperationAction(ISD::FRINT, MVT::f32, Legal);
961 
962       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
963       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
964 
965       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
966       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
967 
968       // Share the Altivec comparison restrictions.
969       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
970       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
971       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
972       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
973 
974       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
975       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
976 
977       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
978 
979       if (Subtarget.hasP8Vector())
980         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
981 
982       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
983 
984       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
985       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
986       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
987 
988       if (Subtarget.hasP8Altivec()) {
989         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
990         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
991         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
992 
993         // 128 bit shifts can be accomplished via 3 instructions for SHL and
994         // SRL, but not for SRA because of the instructions available:
995         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
996         // doing
997         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
998         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
999         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1000 
1001         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1002       }
1003       else {
1004         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1005         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1006         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1007 
1008         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1009 
1010         // VSX v2i64 only supports non-arithmetic operations.
1011         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1012         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1013       }
1014 
1015       if (Subtarget.isISA3_1())
1016         setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1017       else
1018         setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1019 
1020       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1021       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1022       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1023       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1024 
1025       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1026 
1027       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1028       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1029       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1030       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1031       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1032       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1033       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1034       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1035 
1036       // Custom handling for partial vectors of integers converted to
1037       // floating point. We already have optimal handling for v2i32 through
1038       // the DAG combine, so those aren't necessary.
1039       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1040       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1041       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1042       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1043       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1044       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1045       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1046       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1047       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1048       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1049       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1050       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1051       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1052       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1053       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1054       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1055 
1056       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1057       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1058       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1059       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1060       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1061       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1062 
1063       if (Subtarget.hasDirectMove())
1064         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1065       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1066 
1067       // Handle constrained floating-point operations of vector.
1068       // The predictor is `hasVSX` because altivec instruction has
1069       // no exception but VSX vector instruction has.
1070       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1071       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1072       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1073       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1074       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1075       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1076       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1077       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1078       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1079       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1080       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1081       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1082       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1083 
1084       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1085       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1086       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1087       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1088       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1089       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1090       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1091       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1092       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1093       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1094       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1095       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1096       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1097 
1098       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1099     }
1100 
1101     if (Subtarget.hasP8Altivec()) {
1102       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1103       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1104     }
1105 
1106     if (Subtarget.hasP9Vector()) {
1107       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1108       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1109 
1110       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1111       // SRL, but not for SRA because of the instructions available:
1112       // VS{RL} and VS{RL}O.
1113       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1114       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1115       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1116 
1117       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1118       setOperationAction(ISD::FADD, MVT::f128, Legal);
1119       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1120       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1121       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1122       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1123       // No extending loads to f128 on PPC.
1124       for (MVT FPT : MVT::fp_valuetypes())
1125         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1126       setOperationAction(ISD::FMA, MVT::f128, Legal);
1127       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1128       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1129       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1130       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1131       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1132       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1133 
1134       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1135       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1136       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1137       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1138       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1139       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1140 
1141       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1142       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1143       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1144       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1145       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1146       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1147       // No implementation for these ops for PowerPC.
1148       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1149       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1150       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1151       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1152       setOperationAction(ISD::FREM, MVT::f128, Expand);
1153 
1154       // Handle constrained floating-point operations of fp128
1155       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1156       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1157       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1158       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1159       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1160       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1161       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1162       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1163       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1164       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1165       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1166       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1167       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1168       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1169       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1170       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1171       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1172       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1173       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1174       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1175     } else if (Subtarget.hasAltivec() && EnableSoftFP128) {
1176       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1177 
1178       for (MVT FPT : MVT::fp_valuetypes())
1179         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1180 
1181       setOperationAction(ISD::LOAD, MVT::f128, Promote);
1182       setOperationAction(ISD::STORE, MVT::f128, Promote);
1183 
1184       AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1185       AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1186 
1187       setOperationAction(ISD::FADD, MVT::f128, Expand);
1188       setOperationAction(ISD::FSUB, MVT::f128, Expand);
1189       setOperationAction(ISD::FMUL, MVT::f128, Expand);
1190       setOperationAction(ISD::FDIV, MVT::f128, Expand);
1191       setOperationAction(ISD::FNEG, MVT::f128, Expand);
1192       setOperationAction(ISD::FABS, MVT::f128, Expand);
1193       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1194       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1195       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1196       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1197       setOperationAction(ISD::FREM, MVT::f128, Expand);
1198       setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1199       setOperationAction(ISD::FMA, MVT::f128, Expand);
1200       setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1201     }
1202 
1203     if (Subtarget.hasP9Altivec()) {
1204       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1205       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1206 
1207       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1208       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1209       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1210       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1211       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1212       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1213       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1214     }
1215   }
1216 
1217   if (Subtarget.pairedVectorMemops()) {
1218     addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1219     setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1220     setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1221   }
1222   if (Subtarget.hasMMA()) {
1223     addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1224     setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1225     setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1226     setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1227   }
1228 
1229   if (Subtarget.has64BitSupport())
1230     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1231 
1232   if (Subtarget.isISA3_1())
1233     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1234 
1235   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1236 
1237   if (!isPPC64) {
1238     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1239     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1240   }
1241 
1242   setBooleanContents(ZeroOrOneBooleanContent);
1243 
1244   if (Subtarget.hasAltivec()) {
1245     // Altivec instructions set fields to all zeros or all ones.
1246     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1247   }
1248 
1249   if (!isPPC64) {
1250     // These libcalls are not available in 32-bit.
1251     setLibcallName(RTLIB::SHL_I128, nullptr);
1252     setLibcallName(RTLIB::SRL_I128, nullptr);
1253     setLibcallName(RTLIB::SRA_I128, nullptr);
1254   }
1255 
1256   if (!isPPC64)
1257     setMaxAtomicSizeInBitsSupported(32);
1258 
1259   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1260 
1261   // We have target-specific dag combine patterns for the following nodes:
1262   setTargetDAGCombine(ISD::ADD);
1263   setTargetDAGCombine(ISD::SHL);
1264   setTargetDAGCombine(ISD::SRA);
1265   setTargetDAGCombine(ISD::SRL);
1266   setTargetDAGCombine(ISD::MUL);
1267   setTargetDAGCombine(ISD::FMA);
1268   setTargetDAGCombine(ISD::SINT_TO_FP);
1269   setTargetDAGCombine(ISD::BUILD_VECTOR);
1270   if (Subtarget.hasFPCVT())
1271     setTargetDAGCombine(ISD::UINT_TO_FP);
1272   setTargetDAGCombine(ISD::LOAD);
1273   setTargetDAGCombine(ISD::STORE);
1274   setTargetDAGCombine(ISD::BR_CC);
1275   if (Subtarget.useCRBits())
1276     setTargetDAGCombine(ISD::BRCOND);
1277   setTargetDAGCombine(ISD::BSWAP);
1278   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1279   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1280   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1281 
1282   setTargetDAGCombine(ISD::SIGN_EXTEND);
1283   setTargetDAGCombine(ISD::ZERO_EXTEND);
1284   setTargetDAGCombine(ISD::ANY_EXTEND);
1285 
1286   setTargetDAGCombine(ISD::TRUNCATE);
1287   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1288 
1289 
1290   if (Subtarget.useCRBits()) {
1291     setTargetDAGCombine(ISD::TRUNCATE);
1292     setTargetDAGCombine(ISD::SETCC);
1293     setTargetDAGCombine(ISD::SELECT_CC);
1294   }
1295 
1296   if (Subtarget.hasP9Altivec()) {
1297     setTargetDAGCombine(ISD::ABS);
1298     setTargetDAGCombine(ISD::VSELECT);
1299   }
1300 
1301   setLibcallName(RTLIB::LOG_F128, "logf128");
1302   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1303   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1304   setLibcallName(RTLIB::EXP_F128, "expf128");
1305   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1306   setLibcallName(RTLIB::SIN_F128, "sinf128");
1307   setLibcallName(RTLIB::COS_F128, "cosf128");
1308   setLibcallName(RTLIB::POW_F128, "powf128");
1309   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1310   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1311   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1312   setLibcallName(RTLIB::REM_F128, "fmodf128");
1313 
1314   // With 32 condition bits, we don't need to sink (and duplicate) compares
1315   // aggressively in CodeGenPrep.
1316   if (Subtarget.useCRBits()) {
1317     setHasMultipleConditionRegisters();
1318     setJumpIsExpensive();
1319   }
1320 
1321   setMinFunctionAlignment(Align(4));
1322 
1323   switch (Subtarget.getCPUDirective()) {
1324   default: break;
1325   case PPC::DIR_970:
1326   case PPC::DIR_A2:
1327   case PPC::DIR_E500:
1328   case PPC::DIR_E500mc:
1329   case PPC::DIR_E5500:
1330   case PPC::DIR_PWR4:
1331   case PPC::DIR_PWR5:
1332   case PPC::DIR_PWR5X:
1333   case PPC::DIR_PWR6:
1334   case PPC::DIR_PWR6X:
1335   case PPC::DIR_PWR7:
1336   case PPC::DIR_PWR8:
1337   case PPC::DIR_PWR9:
1338   case PPC::DIR_PWR10:
1339   case PPC::DIR_PWR_FUTURE:
1340     setPrefLoopAlignment(Align(16));
1341     setPrefFunctionAlignment(Align(16));
1342     break;
1343   }
1344 
1345   if (Subtarget.enableMachineScheduler())
1346     setSchedulingPreference(Sched::Source);
1347   else
1348     setSchedulingPreference(Sched::Hybrid);
1349 
1350   computeRegisterProperties(STI.getRegisterInfo());
1351 
1352   // The Freescale cores do better with aggressive inlining of memcpy and
1353   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1354   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1355       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1356     MaxStoresPerMemset = 32;
1357     MaxStoresPerMemsetOptSize = 16;
1358     MaxStoresPerMemcpy = 32;
1359     MaxStoresPerMemcpyOptSize = 8;
1360     MaxStoresPerMemmove = 32;
1361     MaxStoresPerMemmoveOptSize = 8;
1362   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1363     // The A2 also benefits from (very) aggressive inlining of memcpy and
1364     // friends. The overhead of a the function call, even when warm, can be
1365     // over one hundred cycles.
1366     MaxStoresPerMemset = 128;
1367     MaxStoresPerMemcpy = 128;
1368     MaxStoresPerMemmove = 128;
1369     MaxLoadsPerMemcmp = 128;
1370   } else {
1371     MaxLoadsPerMemcmp = 8;
1372     MaxLoadsPerMemcmpOptSize = 4;
1373   }
1374 
1375   IsStrictFPEnabled = true;
1376 
1377   // Let the subtarget (CPU) decide if a predictable select is more expensive
1378   // than the corresponding branch. This information is used in CGP to decide
1379   // when to convert selects into branches.
1380   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1381 }
1382 
1383 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1384 /// the desired ByVal argument alignment.
1385 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1386   if (MaxAlign == MaxMaxAlign)
1387     return;
1388   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1389     if (MaxMaxAlign >= 32 &&
1390         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1391       MaxAlign = Align(32);
1392     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1393              MaxAlign < 16)
1394       MaxAlign = Align(16);
1395   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1396     Align EltAlign;
1397     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1398     if (EltAlign > MaxAlign)
1399       MaxAlign = EltAlign;
1400   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1401     for (auto *EltTy : STy->elements()) {
1402       Align EltAlign;
1403       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1404       if (EltAlign > MaxAlign)
1405         MaxAlign = EltAlign;
1406       if (MaxAlign == MaxMaxAlign)
1407         break;
1408     }
1409   }
1410 }
1411 
1412 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1413 /// function arguments in the caller parameter area.
1414 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1415                                                   const DataLayout &DL) const {
1416   // 16byte and wider vectors are passed on 16byte boundary.
1417   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1418   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1419   if (Subtarget.hasAltivec())
1420     getMaxByValAlign(Ty, Alignment, Align(16));
1421   return Alignment.value();
1422 }
1423 
1424 bool PPCTargetLowering::useSoftFloat() const {
1425   return Subtarget.useSoftFloat();
1426 }
1427 
1428 bool PPCTargetLowering::hasSPE() const {
1429   return Subtarget.hasSPE();
1430 }
1431 
1432 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1433   return VT.isScalarInteger();
1434 }
1435 
1436 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1437   switch ((PPCISD::NodeType)Opcode) {
1438   case PPCISD::FIRST_NUMBER:    break;
1439   case PPCISD::FSEL:            return "PPCISD::FSEL";
1440   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1441   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1442   case PPCISD::FCFID:           return "PPCISD::FCFID";
1443   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1444   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1445   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1446   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1447   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1448   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1449   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1450   case PPCISD::FP_TO_UINT_IN_VSR:
1451                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1452   case PPCISD::FP_TO_SINT_IN_VSR:
1453                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1454   case PPCISD::FRE:             return "PPCISD::FRE";
1455   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1456   case PPCISD::FTSQRT:
1457     return "PPCISD::FTSQRT";
1458   case PPCISD::FSQRT:
1459     return "PPCISD::FSQRT";
1460   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1461   case PPCISD::VPERM:           return "PPCISD::VPERM";
1462   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1463   case PPCISD::XXSPLTI_SP_TO_DP:
1464     return "PPCISD::XXSPLTI_SP_TO_DP";
1465   case PPCISD::XXSPLTI32DX:
1466     return "PPCISD::XXSPLTI32DX";
1467   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1468   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1469   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1470   case PPCISD::CMPB:            return "PPCISD::CMPB";
1471   case PPCISD::Hi:              return "PPCISD::Hi";
1472   case PPCISD::Lo:              return "PPCISD::Lo";
1473   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1474   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1475   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1476   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1477   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1478   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1479   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1480   case PPCISD::SRL:             return "PPCISD::SRL";
1481   case PPCISD::SRA:             return "PPCISD::SRA";
1482   case PPCISD::SHL:             return "PPCISD::SHL";
1483   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1484   case PPCISD::CALL:            return "PPCISD::CALL";
1485   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1486   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1487   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1488   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1489   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1490   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1491   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1492   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1493   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1494   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1495   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1496   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1497   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1498   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1499   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1500   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1501     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1502   case PPCISD::ANDI_rec_1_EQ_BIT:
1503     return "PPCISD::ANDI_rec_1_EQ_BIT";
1504   case PPCISD::ANDI_rec_1_GT_BIT:
1505     return "PPCISD::ANDI_rec_1_GT_BIT";
1506   case PPCISD::VCMP:            return "PPCISD::VCMP";
1507   case PPCISD::VCMP_rec:        return "PPCISD::VCMP_rec";
1508   case PPCISD::LBRX:            return "PPCISD::LBRX";
1509   case PPCISD::STBRX:           return "PPCISD::STBRX";
1510   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1511   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1512   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1513   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1514   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1515   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1516   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1517   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1518   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1519   case PPCISD::ST_VSR_SCAL_INT:
1520                                 return "PPCISD::ST_VSR_SCAL_INT";
1521   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1522   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1523   case PPCISD::BDZ:             return "PPCISD::BDZ";
1524   case PPCISD::MFFS:            return "PPCISD::MFFS";
1525   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1526   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1527   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1528   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1529   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1530   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1531   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1532   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1533   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1534   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1535   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1536   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1537   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1538   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1539   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1540   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1541   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1542   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1543   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1544   case PPCISD::PADDI_DTPREL:
1545     return "PPCISD::PADDI_DTPREL";
1546   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1547   case PPCISD::SC:              return "PPCISD::SC";
1548   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1549   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1550   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1551   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1552   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1553   case PPCISD::VABSD:           return "PPCISD::VABSD";
1554   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1555   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1556   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1557   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1558   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1559   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1560   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1561   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1562     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1563   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1564     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1565   case PPCISD::ACC_BUILD:       return "PPCISD::ACC_BUILD";
1566   case PPCISD::PAIR_BUILD:      return "PPCISD::PAIR_BUILD";
1567   case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1568   case PPCISD::XXMFACC:         return "PPCISD::XXMFACC";
1569   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1570   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1571   case PPCISD::STRICT_FADDRTZ:
1572     return "PPCISD::STRICT_FADDRTZ";
1573   case PPCISD::STRICT_FCTIDZ:
1574     return "PPCISD::STRICT_FCTIDZ";
1575   case PPCISD::STRICT_FCTIWZ:
1576     return "PPCISD::STRICT_FCTIWZ";
1577   case PPCISD::STRICT_FCTIDUZ:
1578     return "PPCISD::STRICT_FCTIDUZ";
1579   case PPCISD::STRICT_FCTIWUZ:
1580     return "PPCISD::STRICT_FCTIWUZ";
1581   case PPCISD::STRICT_FCFID:
1582     return "PPCISD::STRICT_FCFID";
1583   case PPCISD::STRICT_FCFIDU:
1584     return "PPCISD::STRICT_FCFIDU";
1585   case PPCISD::STRICT_FCFIDS:
1586     return "PPCISD::STRICT_FCFIDS";
1587   case PPCISD::STRICT_FCFIDUS:
1588     return "PPCISD::STRICT_FCFIDUS";
1589   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1590   }
1591   return nullptr;
1592 }
1593 
1594 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1595                                           EVT VT) const {
1596   if (!VT.isVector())
1597     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1598 
1599   return VT.changeVectorElementTypeToInteger();
1600 }
1601 
1602 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1603   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1604   return true;
1605 }
1606 
1607 //===----------------------------------------------------------------------===//
1608 // Node matching predicates, for use by the tblgen matching code.
1609 //===----------------------------------------------------------------------===//
1610 
1611 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1612 static bool isFloatingPointZero(SDValue Op) {
1613   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1614     return CFP->getValueAPF().isZero();
1615   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1616     // Maybe this has already been legalized into the constant pool?
1617     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1618       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1619         return CFP->getValueAPF().isZero();
1620   }
1621   return false;
1622 }
1623 
1624 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1625 /// true if Op is undef or if it matches the specified value.
1626 static bool isConstantOrUndef(int Op, int Val) {
1627   return Op < 0 || Op == Val;
1628 }
1629 
1630 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1631 /// VPKUHUM instruction.
1632 /// The ShuffleKind distinguishes between big-endian operations with
1633 /// two different inputs (0), either-endian operations with two identical
1634 /// inputs (1), and little-endian operations with two different inputs (2).
1635 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1636 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1637                                SelectionDAG &DAG) {
1638   bool IsLE = DAG.getDataLayout().isLittleEndian();
1639   if (ShuffleKind == 0) {
1640     if (IsLE)
1641       return false;
1642     for (unsigned i = 0; i != 16; ++i)
1643       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1644         return false;
1645   } else if (ShuffleKind == 2) {
1646     if (!IsLE)
1647       return false;
1648     for (unsigned i = 0; i != 16; ++i)
1649       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1650         return false;
1651   } else if (ShuffleKind == 1) {
1652     unsigned j = IsLE ? 0 : 1;
1653     for (unsigned i = 0; i != 8; ++i)
1654       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1655           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1656         return false;
1657   }
1658   return true;
1659 }
1660 
1661 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1662 /// VPKUWUM instruction.
1663 /// The ShuffleKind distinguishes between big-endian operations with
1664 /// two different inputs (0), either-endian operations with two identical
1665 /// inputs (1), and little-endian operations with two different inputs (2).
1666 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1667 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1668                                SelectionDAG &DAG) {
1669   bool IsLE = DAG.getDataLayout().isLittleEndian();
1670   if (ShuffleKind == 0) {
1671     if (IsLE)
1672       return false;
1673     for (unsigned i = 0; i != 16; i += 2)
1674       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1675           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1676         return false;
1677   } else if (ShuffleKind == 2) {
1678     if (!IsLE)
1679       return false;
1680     for (unsigned i = 0; i != 16; i += 2)
1681       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1682           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1683         return false;
1684   } else if (ShuffleKind == 1) {
1685     unsigned j = IsLE ? 0 : 2;
1686     for (unsigned i = 0; i != 8; i += 2)
1687       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1688           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1689           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1690           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1691         return false;
1692   }
1693   return true;
1694 }
1695 
1696 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1697 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1698 /// current subtarget.
1699 ///
1700 /// The ShuffleKind distinguishes between big-endian operations with
1701 /// two different inputs (0), either-endian operations with two identical
1702 /// inputs (1), and little-endian operations with two different inputs (2).
1703 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1704 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1705                                SelectionDAG &DAG) {
1706   const PPCSubtarget& Subtarget =
1707       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1708   if (!Subtarget.hasP8Vector())
1709     return false;
1710 
1711   bool IsLE = DAG.getDataLayout().isLittleEndian();
1712   if (ShuffleKind == 0) {
1713     if (IsLE)
1714       return false;
1715     for (unsigned i = 0; i != 16; i += 4)
1716       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1717           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1718           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1719           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1720         return false;
1721   } else if (ShuffleKind == 2) {
1722     if (!IsLE)
1723       return false;
1724     for (unsigned i = 0; i != 16; i += 4)
1725       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1726           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1727           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1728           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1729         return false;
1730   } else if (ShuffleKind == 1) {
1731     unsigned j = IsLE ? 0 : 4;
1732     for (unsigned i = 0; i != 8; i += 4)
1733       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1734           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1735           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1736           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1737           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1738           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1739           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1740           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1741         return false;
1742   }
1743   return true;
1744 }
1745 
1746 /// isVMerge - Common function, used to match vmrg* shuffles.
1747 ///
1748 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1749                      unsigned LHSStart, unsigned RHSStart) {
1750   if (N->getValueType(0) != MVT::v16i8)
1751     return false;
1752   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1753          "Unsupported merge size!");
1754 
1755   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1756     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1757       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1758                              LHSStart+j+i*UnitSize) ||
1759           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1760                              RHSStart+j+i*UnitSize))
1761         return false;
1762     }
1763   return true;
1764 }
1765 
1766 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1767 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1768 /// The ShuffleKind distinguishes between big-endian merges with two
1769 /// different inputs (0), either-endian merges with two identical inputs (1),
1770 /// and little-endian merges with two different inputs (2).  For the latter,
1771 /// the input operands are swapped (see PPCInstrAltivec.td).
1772 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1773                              unsigned ShuffleKind, SelectionDAG &DAG) {
1774   if (DAG.getDataLayout().isLittleEndian()) {
1775     if (ShuffleKind == 1) // unary
1776       return isVMerge(N, UnitSize, 0, 0);
1777     else if (ShuffleKind == 2) // swapped
1778       return isVMerge(N, UnitSize, 0, 16);
1779     else
1780       return false;
1781   } else {
1782     if (ShuffleKind == 1) // unary
1783       return isVMerge(N, UnitSize, 8, 8);
1784     else if (ShuffleKind == 0) // normal
1785       return isVMerge(N, UnitSize, 8, 24);
1786     else
1787       return false;
1788   }
1789 }
1790 
1791 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1792 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1793 /// The ShuffleKind distinguishes between big-endian merges with two
1794 /// different inputs (0), either-endian merges with two identical inputs (1),
1795 /// and little-endian merges with two different inputs (2).  For the latter,
1796 /// the input operands are swapped (see PPCInstrAltivec.td).
1797 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1798                              unsigned ShuffleKind, SelectionDAG &DAG) {
1799   if (DAG.getDataLayout().isLittleEndian()) {
1800     if (ShuffleKind == 1) // unary
1801       return isVMerge(N, UnitSize, 8, 8);
1802     else if (ShuffleKind == 2) // swapped
1803       return isVMerge(N, UnitSize, 8, 24);
1804     else
1805       return false;
1806   } else {
1807     if (ShuffleKind == 1) // unary
1808       return isVMerge(N, UnitSize, 0, 0);
1809     else if (ShuffleKind == 0) // normal
1810       return isVMerge(N, UnitSize, 0, 16);
1811     else
1812       return false;
1813   }
1814 }
1815 
1816 /**
1817  * Common function used to match vmrgew and vmrgow shuffles
1818  *
1819  * The indexOffset determines whether to look for even or odd words in
1820  * the shuffle mask. This is based on the of the endianness of the target
1821  * machine.
1822  *   - Little Endian:
1823  *     - Use offset of 0 to check for odd elements
1824  *     - Use offset of 4 to check for even elements
1825  *   - Big Endian:
1826  *     - Use offset of 0 to check for even elements
1827  *     - Use offset of 4 to check for odd elements
1828  * A detailed description of the vector element ordering for little endian and
1829  * big endian can be found at
1830  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1831  * Targeting your applications - what little endian and big endian IBM XL C/C++
1832  * compiler differences mean to you
1833  *
1834  * The mask to the shuffle vector instruction specifies the indices of the
1835  * elements from the two input vectors to place in the result. The elements are
1836  * numbered in array-access order, starting with the first vector. These vectors
1837  * are always of type v16i8, thus each vector will contain 16 elements of size
1838  * 8. More info on the shuffle vector can be found in the
1839  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1840  * Language Reference.
1841  *
1842  * The RHSStartValue indicates whether the same input vectors are used (unary)
1843  * or two different input vectors are used, based on the following:
1844  *   - If the instruction uses the same vector for both inputs, the range of the
1845  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1846  *     be 0.
1847  *   - If the instruction has two different vectors then the range of the
1848  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1849  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1850  *     to 31 specify elements in the second vector).
1851  *
1852  * \param[in] N The shuffle vector SD Node to analyze
1853  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1854  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1855  * vector to the shuffle_vector instruction
1856  * \return true iff this shuffle vector represents an even or odd word merge
1857  */
1858 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1859                      unsigned RHSStartValue) {
1860   if (N->getValueType(0) != MVT::v16i8)
1861     return false;
1862 
1863   for (unsigned i = 0; i < 2; ++i)
1864     for (unsigned j = 0; j < 4; ++j)
1865       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1866                              i*RHSStartValue+j+IndexOffset) ||
1867           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1868                              i*RHSStartValue+j+IndexOffset+8))
1869         return false;
1870   return true;
1871 }
1872 
1873 /**
1874  * Determine if the specified shuffle mask is suitable for the vmrgew or
1875  * vmrgow instructions.
1876  *
1877  * \param[in] N The shuffle vector SD Node to analyze
1878  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1879  * \param[in] ShuffleKind Identify the type of merge:
1880  *   - 0 = big-endian merge with two different inputs;
1881  *   - 1 = either-endian merge with two identical inputs;
1882  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1883  *     little-endian merges).
1884  * \param[in] DAG The current SelectionDAG
1885  * \return true iff this shuffle mask
1886  */
1887 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1888                               unsigned ShuffleKind, SelectionDAG &DAG) {
1889   if (DAG.getDataLayout().isLittleEndian()) {
1890     unsigned indexOffset = CheckEven ? 4 : 0;
1891     if (ShuffleKind == 1) // Unary
1892       return isVMerge(N, indexOffset, 0);
1893     else if (ShuffleKind == 2) // swapped
1894       return isVMerge(N, indexOffset, 16);
1895     else
1896       return false;
1897   }
1898   else {
1899     unsigned indexOffset = CheckEven ? 0 : 4;
1900     if (ShuffleKind == 1) // Unary
1901       return isVMerge(N, indexOffset, 0);
1902     else if (ShuffleKind == 0) // Normal
1903       return isVMerge(N, indexOffset, 16);
1904     else
1905       return false;
1906   }
1907   return false;
1908 }
1909 
1910 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1911 /// amount, otherwise return -1.
1912 /// The ShuffleKind distinguishes between big-endian operations with two
1913 /// different inputs (0), either-endian operations with two identical inputs
1914 /// (1), and little-endian operations with two different inputs (2).  For the
1915 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1916 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1917                              SelectionDAG &DAG) {
1918   if (N->getValueType(0) != MVT::v16i8)
1919     return -1;
1920 
1921   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1922 
1923   // Find the first non-undef value in the shuffle mask.
1924   unsigned i;
1925   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1926     /*search*/;
1927 
1928   if (i == 16) return -1;  // all undef.
1929 
1930   // Otherwise, check to see if the rest of the elements are consecutively
1931   // numbered from this value.
1932   unsigned ShiftAmt = SVOp->getMaskElt(i);
1933   if (ShiftAmt < i) return -1;
1934 
1935   ShiftAmt -= i;
1936   bool isLE = DAG.getDataLayout().isLittleEndian();
1937 
1938   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1939     // Check the rest of the elements to see if they are consecutive.
1940     for (++i; i != 16; ++i)
1941       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1942         return -1;
1943   } else if (ShuffleKind == 1) {
1944     // Check the rest of the elements to see if they are consecutive.
1945     for (++i; i != 16; ++i)
1946       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1947         return -1;
1948   } else
1949     return -1;
1950 
1951   if (isLE)
1952     ShiftAmt = 16 - ShiftAmt;
1953 
1954   return ShiftAmt;
1955 }
1956 
1957 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1958 /// specifies a splat of a single element that is suitable for input to
1959 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1960 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1961   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1962          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1963 
1964   // The consecutive indices need to specify an element, not part of two
1965   // different elements.  So abandon ship early if this isn't the case.
1966   if (N->getMaskElt(0) % EltSize != 0)
1967     return false;
1968 
1969   // This is a splat operation if each element of the permute is the same, and
1970   // if the value doesn't reference the second vector.
1971   unsigned ElementBase = N->getMaskElt(0);
1972 
1973   // FIXME: Handle UNDEF elements too!
1974   if (ElementBase >= 16)
1975     return false;
1976 
1977   // Check that the indices are consecutive, in the case of a multi-byte element
1978   // splatted with a v16i8 mask.
1979   for (unsigned i = 1; i != EltSize; ++i)
1980     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1981       return false;
1982 
1983   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1984     if (N->getMaskElt(i) < 0) continue;
1985     for (unsigned j = 0; j != EltSize; ++j)
1986       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1987         return false;
1988   }
1989   return true;
1990 }
1991 
1992 /// Check that the mask is shuffling N byte elements. Within each N byte
1993 /// element of the mask, the indices could be either in increasing or
1994 /// decreasing order as long as they are consecutive.
1995 /// \param[in] N the shuffle vector SD Node to analyze
1996 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1997 /// Word/DoubleWord/QuadWord).
1998 /// \param[in] StepLen the delta indices number among the N byte element, if
1999 /// the mask is in increasing/decreasing order then it is 1/-1.
2000 /// \return true iff the mask is shuffling N byte elements.
2001 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2002                                    int StepLen) {
2003   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2004          "Unexpected element width.");
2005   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2006 
2007   unsigned NumOfElem = 16 / Width;
2008   unsigned MaskVal[16]; //  Width is never greater than 16
2009   for (unsigned i = 0; i < NumOfElem; ++i) {
2010     MaskVal[0] = N->getMaskElt(i * Width);
2011     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2012       return false;
2013     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2014       return false;
2015     }
2016 
2017     for (unsigned int j = 1; j < Width; ++j) {
2018       MaskVal[j] = N->getMaskElt(i * Width + j);
2019       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2020         return false;
2021       }
2022     }
2023   }
2024 
2025   return true;
2026 }
2027 
2028 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2029                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2030   if (!isNByteElemShuffleMask(N, 4, 1))
2031     return false;
2032 
2033   // Now we look at mask elements 0,4,8,12
2034   unsigned M0 = N->getMaskElt(0) / 4;
2035   unsigned M1 = N->getMaskElt(4) / 4;
2036   unsigned M2 = N->getMaskElt(8) / 4;
2037   unsigned M3 = N->getMaskElt(12) / 4;
2038   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2039   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2040 
2041   // Below, let H and L be arbitrary elements of the shuffle mask
2042   // where H is in the range [4,7] and L is in the range [0,3].
2043   // H, 1, 2, 3 or L, 5, 6, 7
2044   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2045       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2046     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2047     InsertAtByte = IsLE ? 12 : 0;
2048     Swap = M0 < 4;
2049     return true;
2050   }
2051   // 0, H, 2, 3 or 4, L, 6, 7
2052   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2053       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2054     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2055     InsertAtByte = IsLE ? 8 : 4;
2056     Swap = M1 < 4;
2057     return true;
2058   }
2059   // 0, 1, H, 3 or 4, 5, L, 7
2060   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2061       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2062     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2063     InsertAtByte = IsLE ? 4 : 8;
2064     Swap = M2 < 4;
2065     return true;
2066   }
2067   // 0, 1, 2, H or 4, 5, 6, L
2068   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2069       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2070     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2071     InsertAtByte = IsLE ? 0 : 12;
2072     Swap = M3 < 4;
2073     return true;
2074   }
2075 
2076   // If both vector operands for the shuffle are the same vector, the mask will
2077   // contain only elements from the first one and the second one will be undef.
2078   if (N->getOperand(1).isUndef()) {
2079     ShiftElts = 0;
2080     Swap = true;
2081     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2082     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2083       InsertAtByte = IsLE ? 12 : 0;
2084       return true;
2085     }
2086     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2087       InsertAtByte = IsLE ? 8 : 4;
2088       return true;
2089     }
2090     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2091       InsertAtByte = IsLE ? 4 : 8;
2092       return true;
2093     }
2094     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2095       InsertAtByte = IsLE ? 0 : 12;
2096       return true;
2097     }
2098   }
2099 
2100   return false;
2101 }
2102 
2103 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2104                                bool &Swap, bool IsLE) {
2105   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2106   // Ensure each byte index of the word is consecutive.
2107   if (!isNByteElemShuffleMask(N, 4, 1))
2108     return false;
2109 
2110   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2111   unsigned M0 = N->getMaskElt(0) / 4;
2112   unsigned M1 = N->getMaskElt(4) / 4;
2113   unsigned M2 = N->getMaskElt(8) / 4;
2114   unsigned M3 = N->getMaskElt(12) / 4;
2115 
2116   // If both vector operands for the shuffle are the same vector, the mask will
2117   // contain only elements from the first one and the second one will be undef.
2118   if (N->getOperand(1).isUndef()) {
2119     assert(M0 < 4 && "Indexing into an undef vector?");
2120     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2121       return false;
2122 
2123     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2124     Swap = false;
2125     return true;
2126   }
2127 
2128   // Ensure each word index of the ShuffleVector Mask is consecutive.
2129   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2130     return false;
2131 
2132   if (IsLE) {
2133     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2134       // Input vectors don't need to be swapped if the leading element
2135       // of the result is one of the 3 left elements of the second vector
2136       // (or if there is no shift to be done at all).
2137       Swap = false;
2138       ShiftElts = (8 - M0) % 8;
2139     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2140       // Input vectors need to be swapped if the leading element
2141       // of the result is one of the 3 left elements of the first vector
2142       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2143       Swap = true;
2144       ShiftElts = (4 - M0) % 4;
2145     }
2146 
2147     return true;
2148   } else {                                          // BE
2149     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2150       // Input vectors don't need to be swapped if the leading element
2151       // of the result is one of the 4 elements of the first vector.
2152       Swap = false;
2153       ShiftElts = M0;
2154     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2155       // Input vectors need to be swapped if the leading element
2156       // of the result is one of the 4 elements of the right vector.
2157       Swap = true;
2158       ShiftElts = M0 - 4;
2159     }
2160 
2161     return true;
2162   }
2163 }
2164 
2165 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2166   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2167 
2168   if (!isNByteElemShuffleMask(N, Width, -1))
2169     return false;
2170 
2171   for (int i = 0; i < 16; i += Width)
2172     if (N->getMaskElt(i) != i + Width - 1)
2173       return false;
2174 
2175   return true;
2176 }
2177 
2178 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2179   return isXXBRShuffleMaskHelper(N, 2);
2180 }
2181 
2182 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2183   return isXXBRShuffleMaskHelper(N, 4);
2184 }
2185 
2186 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2187   return isXXBRShuffleMaskHelper(N, 8);
2188 }
2189 
2190 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2191   return isXXBRShuffleMaskHelper(N, 16);
2192 }
2193 
2194 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2195 /// if the inputs to the instruction should be swapped and set \p DM to the
2196 /// value for the immediate.
2197 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2198 /// AND element 0 of the result comes from the first input (LE) or second input
2199 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2200 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2201 /// mask.
2202 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2203                                bool &Swap, bool IsLE) {
2204   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2205 
2206   // Ensure each byte index of the double word is consecutive.
2207   if (!isNByteElemShuffleMask(N, 8, 1))
2208     return false;
2209 
2210   unsigned M0 = N->getMaskElt(0) / 8;
2211   unsigned M1 = N->getMaskElt(8) / 8;
2212   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2213 
2214   // If both vector operands for the shuffle are the same vector, the mask will
2215   // contain only elements from the first one and the second one will be undef.
2216   if (N->getOperand(1).isUndef()) {
2217     if ((M0 | M1) < 2) {
2218       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2219       Swap = false;
2220       return true;
2221     } else
2222       return false;
2223   }
2224 
2225   if (IsLE) {
2226     if (M0 > 1 && M1 < 2) {
2227       Swap = false;
2228     } else if (M0 < 2 && M1 > 1) {
2229       M0 = (M0 + 2) % 4;
2230       M1 = (M1 + 2) % 4;
2231       Swap = true;
2232     } else
2233       return false;
2234 
2235     // Note: if control flow comes here that means Swap is already set above
2236     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2237     return true;
2238   } else { // BE
2239     if (M0 < 2 && M1 > 1) {
2240       Swap = false;
2241     } else if (M0 > 1 && M1 < 2) {
2242       M0 = (M0 + 2) % 4;
2243       M1 = (M1 + 2) % 4;
2244       Swap = true;
2245     } else
2246       return false;
2247 
2248     // Note: if control flow comes here that means Swap is already set above
2249     DM = (M0 << 1) + (M1 & 1);
2250     return true;
2251   }
2252 }
2253 
2254 
2255 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2256 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2257 /// elements are counted from the left of the vector register).
2258 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2259                                          SelectionDAG &DAG) {
2260   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2261   assert(isSplatShuffleMask(SVOp, EltSize));
2262   if (DAG.getDataLayout().isLittleEndian())
2263     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2264   else
2265     return SVOp->getMaskElt(0) / EltSize;
2266 }
2267 
2268 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2269 /// by using a vspltis[bhw] instruction of the specified element size, return
2270 /// the constant being splatted.  The ByteSize field indicates the number of
2271 /// bytes of each element [124] -> [bhw].
2272 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2273   SDValue OpVal(nullptr, 0);
2274 
2275   // If ByteSize of the splat is bigger than the element size of the
2276   // build_vector, then we have a case where we are checking for a splat where
2277   // multiple elements of the buildvector are folded together into a single
2278   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2279   unsigned EltSize = 16/N->getNumOperands();
2280   if (EltSize < ByteSize) {
2281     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2282     SDValue UniquedVals[4];
2283     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2284 
2285     // See if all of the elements in the buildvector agree across.
2286     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2287       if (N->getOperand(i).isUndef()) continue;
2288       // If the element isn't a constant, bail fully out.
2289       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2290 
2291       if (!UniquedVals[i&(Multiple-1)].getNode())
2292         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2293       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2294         return SDValue();  // no match.
2295     }
2296 
2297     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2298     // either constant or undef values that are identical for each chunk.  See
2299     // if these chunks can form into a larger vspltis*.
2300 
2301     // Check to see if all of the leading entries are either 0 or -1.  If
2302     // neither, then this won't fit into the immediate field.
2303     bool LeadingZero = true;
2304     bool LeadingOnes = true;
2305     for (unsigned i = 0; i != Multiple-1; ++i) {
2306       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2307 
2308       LeadingZero &= isNullConstant(UniquedVals[i]);
2309       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2310     }
2311     // Finally, check the least significant entry.
2312     if (LeadingZero) {
2313       if (!UniquedVals[Multiple-1].getNode())
2314         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2315       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2316       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2317         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2318     }
2319     if (LeadingOnes) {
2320       if (!UniquedVals[Multiple-1].getNode())
2321         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2322       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2323       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2324         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2325     }
2326 
2327     return SDValue();
2328   }
2329 
2330   // Check to see if this buildvec has a single non-undef value in its elements.
2331   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2332     if (N->getOperand(i).isUndef()) continue;
2333     if (!OpVal.getNode())
2334       OpVal = N->getOperand(i);
2335     else if (OpVal != N->getOperand(i))
2336       return SDValue();
2337   }
2338 
2339   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2340 
2341   unsigned ValSizeInBytes = EltSize;
2342   uint64_t Value = 0;
2343   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2344     Value = CN->getZExtValue();
2345   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2346     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2347     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2348   }
2349 
2350   // If the splat value is larger than the element value, then we can never do
2351   // this splat.  The only case that we could fit the replicated bits into our
2352   // immediate field for would be zero, and we prefer to use vxor for it.
2353   if (ValSizeInBytes < ByteSize) return SDValue();
2354 
2355   // If the element value is larger than the splat value, check if it consists
2356   // of a repeated bit pattern of size ByteSize.
2357   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2358     return SDValue();
2359 
2360   // Properly sign extend the value.
2361   int MaskVal = SignExtend32(Value, ByteSize * 8);
2362 
2363   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2364   if (MaskVal == 0) return SDValue();
2365 
2366   // Finally, if this value fits in a 5 bit sext field, return it
2367   if (SignExtend32<5>(MaskVal) == MaskVal)
2368     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2369   return SDValue();
2370 }
2371 
2372 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2373 /// amount, otherwise return -1.
2374 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2375   EVT VT = N->getValueType(0);
2376   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2377     return -1;
2378 
2379   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2380 
2381   // Find the first non-undef value in the shuffle mask.
2382   unsigned i;
2383   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2384     /*search*/;
2385 
2386   if (i == 4) return -1;  // all undef.
2387 
2388   // Otherwise, check to see if the rest of the elements are consecutively
2389   // numbered from this value.
2390   unsigned ShiftAmt = SVOp->getMaskElt(i);
2391   if (ShiftAmt < i) return -1;
2392   ShiftAmt -= i;
2393 
2394   // Check the rest of the elements to see if they are consecutive.
2395   for (++i; i != 4; ++i)
2396     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2397       return -1;
2398 
2399   return ShiftAmt;
2400 }
2401 
2402 //===----------------------------------------------------------------------===//
2403 //  Addressing Mode Selection
2404 //===----------------------------------------------------------------------===//
2405 
2406 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2407 /// or 64-bit immediate, and if the value can be accurately represented as a
2408 /// sign extension from a 16-bit value.  If so, this returns true and the
2409 /// immediate.
2410 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2411   if (!isa<ConstantSDNode>(N))
2412     return false;
2413 
2414   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2415   if (N->getValueType(0) == MVT::i32)
2416     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2417   else
2418     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2419 }
2420 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2421   return isIntS16Immediate(Op.getNode(), Imm);
2422 }
2423 
2424 
2425 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2426 /// be represented as an indexed [r+r] operation.
2427 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2428                                                SDValue &Index,
2429                                                SelectionDAG &DAG) const {
2430   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2431       UI != E; ++UI) {
2432     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2433       if (Memop->getMemoryVT() == MVT::f64) {
2434           Base = N.getOperand(0);
2435           Index = N.getOperand(1);
2436           return true;
2437       }
2438     }
2439   }
2440   return false;
2441 }
2442 
2443 /// isIntS34Immediate - This method tests if value of node given can be
2444 /// accurately represented as a sign extension from a 34-bit value.  If so,
2445 /// this returns true and the immediate.
2446 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2447   if (!isa<ConstantSDNode>(N))
2448     return false;
2449 
2450   Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2451   return isInt<34>(Imm);
2452 }
2453 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2454   return isIntS34Immediate(Op.getNode(), Imm);
2455 }
2456 
2457 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2458 /// can be represented as an indexed [r+r] operation.  Returns false if it
2459 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2460 /// non-zero and N can be represented by a base register plus a signed 16-bit
2461 /// displacement, make a more precise judgement by checking (displacement % \p
2462 /// EncodingAlignment).
2463 bool PPCTargetLowering::SelectAddressRegReg(
2464     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2465     MaybeAlign EncodingAlignment) const {
2466   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2467   // a [pc+imm].
2468   if (SelectAddressPCRel(N, Base))
2469     return false;
2470 
2471   int16_t Imm = 0;
2472   if (N.getOpcode() == ISD::ADD) {
2473     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2474     // SPE load/store can only handle 8-bit offsets.
2475     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2476         return true;
2477     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2478         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2479       return false; // r+i
2480     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2481       return false;    // r+i
2482 
2483     Base = N.getOperand(0);
2484     Index = N.getOperand(1);
2485     return true;
2486   } else if (N.getOpcode() == ISD::OR) {
2487     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2488         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2489       return false; // r+i can fold it if we can.
2490 
2491     // If this is an or of disjoint bitfields, we can codegen this as an add
2492     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2493     // disjoint.
2494     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2495 
2496     if (LHSKnown.Zero.getBoolValue()) {
2497       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2498       // If all of the bits are known zero on the LHS or RHS, the add won't
2499       // carry.
2500       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2501         Base = N.getOperand(0);
2502         Index = N.getOperand(1);
2503         return true;
2504       }
2505     }
2506   }
2507 
2508   return false;
2509 }
2510 
2511 // If we happen to be doing an i64 load or store into a stack slot that has
2512 // less than a 4-byte alignment, then the frame-index elimination may need to
2513 // use an indexed load or store instruction (because the offset may not be a
2514 // multiple of 4). The extra register needed to hold the offset comes from the
2515 // register scavenger, and it is possible that the scavenger will need to use
2516 // an emergency spill slot. As a result, we need to make sure that a spill slot
2517 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2518 // stack slot.
2519 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2520   // FIXME: This does not handle the LWA case.
2521   if (VT != MVT::i64)
2522     return;
2523 
2524   // NOTE: We'll exclude negative FIs here, which come from argument
2525   // lowering, because there are no known test cases triggering this problem
2526   // using packed structures (or similar). We can remove this exclusion if
2527   // we find such a test case. The reason why this is so test-case driven is
2528   // because this entire 'fixup' is only to prevent crashes (from the
2529   // register scavenger) on not-really-valid inputs. For example, if we have:
2530   //   %a = alloca i1
2531   //   %b = bitcast i1* %a to i64*
2532   //   store i64* a, i64 b
2533   // then the store should really be marked as 'align 1', but is not. If it
2534   // were marked as 'align 1' then the indexed form would have been
2535   // instruction-selected initially, and the problem this 'fixup' is preventing
2536   // won't happen regardless.
2537   if (FrameIdx < 0)
2538     return;
2539 
2540   MachineFunction &MF = DAG.getMachineFunction();
2541   MachineFrameInfo &MFI = MF.getFrameInfo();
2542 
2543   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2544     return;
2545 
2546   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2547   FuncInfo->setHasNonRISpills();
2548 }
2549 
2550 /// Returns true if the address N can be represented by a base register plus
2551 /// a signed 16-bit displacement [r+imm], and if it is not better
2552 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2553 /// displacements that are multiples of that value.
2554 bool PPCTargetLowering::SelectAddressRegImm(
2555     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2556     MaybeAlign EncodingAlignment) const {
2557   // FIXME dl should come from parent load or store, not from address
2558   SDLoc dl(N);
2559 
2560   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2561   // a [pc+imm].
2562   if (SelectAddressPCRel(N, Base))
2563     return false;
2564 
2565   // If this can be more profitably realized as r+r, fail.
2566   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2567     return false;
2568 
2569   if (N.getOpcode() == ISD::ADD) {
2570     int16_t imm = 0;
2571     if (isIntS16Immediate(N.getOperand(1), imm) &&
2572         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2573       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2574       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2575         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2576         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2577       } else {
2578         Base = N.getOperand(0);
2579       }
2580       return true; // [r+i]
2581     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2582       // Match LOAD (ADD (X, Lo(G))).
2583       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2584              && "Cannot handle constant offsets yet!");
2585       Disp = N.getOperand(1).getOperand(0);  // The global address.
2586       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2587              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2588              Disp.getOpcode() == ISD::TargetConstantPool ||
2589              Disp.getOpcode() == ISD::TargetJumpTable);
2590       Base = N.getOperand(0);
2591       return true;  // [&g+r]
2592     }
2593   } else if (N.getOpcode() == ISD::OR) {
2594     int16_t imm = 0;
2595     if (isIntS16Immediate(N.getOperand(1), imm) &&
2596         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2597       // If this is an or of disjoint bitfields, we can codegen this as an add
2598       // (for better address arithmetic) if the LHS and RHS of the OR are
2599       // provably disjoint.
2600       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2601 
2602       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2603         // If all of the bits are known zero on the LHS or RHS, the add won't
2604         // carry.
2605         if (FrameIndexSDNode *FI =
2606               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2607           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2608           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2609         } else {
2610           Base = N.getOperand(0);
2611         }
2612         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2613         return true;
2614       }
2615     }
2616   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2617     // Loading from a constant address.
2618 
2619     // If this address fits entirely in a 16-bit sext immediate field, codegen
2620     // this as "d, 0"
2621     int16_t Imm;
2622     if (isIntS16Immediate(CN, Imm) &&
2623         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2624       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2625       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2626                              CN->getValueType(0));
2627       return true;
2628     }
2629 
2630     // Handle 32-bit sext immediates with LIS + addr mode.
2631     if ((CN->getValueType(0) == MVT::i32 ||
2632          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2633         (!EncodingAlignment ||
2634          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2635       int Addr = (int)CN->getZExtValue();
2636 
2637       // Otherwise, break this down into an LIS + disp.
2638       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2639 
2640       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2641                                    MVT::i32);
2642       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2643       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2644       return true;
2645     }
2646   }
2647 
2648   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2649   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2650     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2651     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2652   } else
2653     Base = N;
2654   return true;      // [r+0]
2655 }
2656 
2657 /// Similar to the 16-bit case but for instructions that take a 34-bit
2658 /// displacement field (prefixed loads/stores).
2659 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2660                                               SDValue &Base,
2661                                               SelectionDAG &DAG) const {
2662   // Only on 64-bit targets.
2663   if (N.getValueType() != MVT::i64)
2664     return false;
2665 
2666   SDLoc dl(N);
2667   int64_t Imm = 0;
2668 
2669   if (N.getOpcode() == ISD::ADD) {
2670     if (!isIntS34Immediate(N.getOperand(1), Imm))
2671       return false;
2672     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2673     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2674       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2675     else
2676       Base = N.getOperand(0);
2677     return true;
2678   }
2679 
2680   if (N.getOpcode() == ISD::OR) {
2681     if (!isIntS34Immediate(N.getOperand(1), Imm))
2682       return false;
2683     // If this is an or of disjoint bitfields, we can codegen this as an add
2684     // (for better address arithmetic) if the LHS and RHS of the OR are
2685     // provably disjoint.
2686     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2687     if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2688       return false;
2689     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2690       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2691     else
2692       Base = N.getOperand(0);
2693     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2694     return true;
2695   }
2696 
2697   if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2698     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2699     Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2700     return true;
2701   }
2702 
2703   return false;
2704 }
2705 
2706 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2707 /// represented as an indexed [r+r] operation.
2708 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2709                                                 SDValue &Index,
2710                                                 SelectionDAG &DAG) const {
2711   // Check to see if we can easily represent this as an [r+r] address.  This
2712   // will fail if it thinks that the address is more profitably represented as
2713   // reg+imm, e.g. where imm = 0.
2714   if (SelectAddressRegReg(N, Base, Index, DAG))
2715     return true;
2716 
2717   // If the address is the result of an add, we will utilize the fact that the
2718   // address calculation includes an implicit add.  However, we can reduce
2719   // register pressure if we do not materialize a constant just for use as the
2720   // index register.  We only get rid of the add if it is not an add of a
2721   // value and a 16-bit signed constant and both have a single use.
2722   int16_t imm = 0;
2723   if (N.getOpcode() == ISD::ADD &&
2724       (!isIntS16Immediate(N.getOperand(1), imm) ||
2725        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2726     Base = N.getOperand(0);
2727     Index = N.getOperand(1);
2728     return true;
2729   }
2730 
2731   // Otherwise, do it the hard way, using R0 as the base register.
2732   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2733                          N.getValueType());
2734   Index = N;
2735   return true;
2736 }
2737 
2738 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2739   Ty *PCRelCand = dyn_cast<Ty>(N);
2740   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2741 }
2742 
2743 /// Returns true if this address is a PC Relative address.
2744 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2745 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2746 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2747   // This is a materialize PC Relative node. Always select this as PC Relative.
2748   Base = N;
2749   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2750     return true;
2751   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2752       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2753       isValidPCRelNode<JumpTableSDNode>(N) ||
2754       isValidPCRelNode<BlockAddressSDNode>(N))
2755     return true;
2756   return false;
2757 }
2758 
2759 /// Returns true if we should use a direct load into vector instruction
2760 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2761 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2762 
2763   // If there are any other uses other than scalar to vector, then we should
2764   // keep it as a scalar load -> direct move pattern to prevent multiple
2765   // loads.
2766   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2767   if (!LD)
2768     return false;
2769 
2770   EVT MemVT = LD->getMemoryVT();
2771   if (!MemVT.isSimple())
2772     return false;
2773   switch(MemVT.getSimpleVT().SimpleTy) {
2774   case MVT::i64:
2775     break;
2776   case MVT::i32:
2777     if (!ST.hasP8Vector())
2778       return false;
2779     break;
2780   case MVT::i16:
2781   case MVT::i8:
2782     if (!ST.hasP9Vector())
2783       return false;
2784     break;
2785   default:
2786     return false;
2787   }
2788 
2789   SDValue LoadedVal(N, 0);
2790   if (!LoadedVal.hasOneUse())
2791     return false;
2792 
2793   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2794        UI != UE; ++UI)
2795     if (UI.getUse().get().getResNo() == 0 &&
2796         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2797         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2798       return false;
2799 
2800   return true;
2801 }
2802 
2803 /// getPreIndexedAddressParts - returns true by value, base pointer and
2804 /// offset pointer and addressing mode by reference if the node's address
2805 /// can be legally represented as pre-indexed load / store address.
2806 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2807                                                   SDValue &Offset,
2808                                                   ISD::MemIndexedMode &AM,
2809                                                   SelectionDAG &DAG) const {
2810   if (DisablePPCPreinc) return false;
2811 
2812   bool isLoad = true;
2813   SDValue Ptr;
2814   EVT VT;
2815   unsigned Alignment;
2816   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2817     Ptr = LD->getBasePtr();
2818     VT = LD->getMemoryVT();
2819     Alignment = LD->getAlignment();
2820   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2821     Ptr = ST->getBasePtr();
2822     VT  = ST->getMemoryVT();
2823     Alignment = ST->getAlignment();
2824     isLoad = false;
2825   } else
2826     return false;
2827 
2828   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2829   // instructions because we can fold these into a more efficient instruction
2830   // instead, (such as LXSD).
2831   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2832     return false;
2833   }
2834 
2835   // PowerPC doesn't have preinc load/store instructions for vectors
2836   if (VT.isVector())
2837     return false;
2838 
2839   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2840     // Common code will reject creating a pre-inc form if the base pointer
2841     // is a frame index, or if N is a store and the base pointer is either
2842     // the same as or a predecessor of the value being stored.  Check for
2843     // those situations here, and try with swapped Base/Offset instead.
2844     bool Swap = false;
2845 
2846     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2847       Swap = true;
2848     else if (!isLoad) {
2849       SDValue Val = cast<StoreSDNode>(N)->getValue();
2850       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2851         Swap = true;
2852     }
2853 
2854     if (Swap)
2855       std::swap(Base, Offset);
2856 
2857     AM = ISD::PRE_INC;
2858     return true;
2859   }
2860 
2861   // LDU/STU can only handle immediates that are a multiple of 4.
2862   if (VT != MVT::i64) {
2863     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2864       return false;
2865   } else {
2866     // LDU/STU need an address with at least 4-byte alignment.
2867     if (Alignment < 4)
2868       return false;
2869 
2870     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2871       return false;
2872   }
2873 
2874   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2875     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2876     // sext i32 to i64 when addr mode is r+i.
2877     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2878         LD->getExtensionType() == ISD::SEXTLOAD &&
2879         isa<ConstantSDNode>(Offset))
2880       return false;
2881   }
2882 
2883   AM = ISD::PRE_INC;
2884   return true;
2885 }
2886 
2887 //===----------------------------------------------------------------------===//
2888 //  LowerOperation implementation
2889 //===----------------------------------------------------------------------===//
2890 
2891 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2892 /// and LoOpFlags to the target MO flags.
2893 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2894                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2895                                const GlobalValue *GV = nullptr) {
2896   HiOpFlags = PPCII::MO_HA;
2897   LoOpFlags = PPCII::MO_LO;
2898 
2899   // Don't use the pic base if not in PIC relocation model.
2900   if (IsPIC) {
2901     HiOpFlags |= PPCII::MO_PIC_FLAG;
2902     LoOpFlags |= PPCII::MO_PIC_FLAG;
2903   }
2904 }
2905 
2906 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2907                              SelectionDAG &DAG) {
2908   SDLoc DL(HiPart);
2909   EVT PtrVT = HiPart.getValueType();
2910   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2911 
2912   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2913   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2914 
2915   // With PIC, the first instruction is actually "GR+hi(&G)".
2916   if (isPIC)
2917     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2918                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2919 
2920   // Generate non-pic code that has direct accesses to the constant pool.
2921   // The address of the global is just (hi(&g)+lo(&g)).
2922   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2923 }
2924 
2925 static void setUsesTOCBasePtr(MachineFunction &MF) {
2926   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2927   FuncInfo->setUsesTOCBasePtr();
2928 }
2929 
2930 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2931   setUsesTOCBasePtr(DAG.getMachineFunction());
2932 }
2933 
2934 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2935                                        SDValue GA) const {
2936   const bool Is64Bit = Subtarget.isPPC64();
2937   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2938   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2939                         : Subtarget.isAIXABI()
2940                               ? DAG.getRegister(PPC::R2, VT)
2941                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2942   SDValue Ops[] = { GA, Reg };
2943   return DAG.getMemIntrinsicNode(
2944       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2945       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2946       MachineMemOperand::MOLoad);
2947 }
2948 
2949 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2950                                              SelectionDAG &DAG) const {
2951   EVT PtrVT = Op.getValueType();
2952   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2953   const Constant *C = CP->getConstVal();
2954 
2955   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2956   // The actual address of the GlobalValue is stored in the TOC.
2957   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2958     if (Subtarget.isUsingPCRelativeCalls()) {
2959       SDLoc DL(CP);
2960       EVT Ty = getPointerTy(DAG.getDataLayout());
2961       SDValue ConstPool = DAG.getTargetConstantPool(
2962           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2963       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2964     }
2965     setUsesTOCBasePtr(DAG);
2966     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2967     return getTOCEntry(DAG, SDLoc(CP), GA);
2968   }
2969 
2970   unsigned MOHiFlag, MOLoFlag;
2971   bool IsPIC = isPositionIndependent();
2972   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2973 
2974   if (IsPIC && Subtarget.isSVR4ABI()) {
2975     SDValue GA =
2976         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2977     return getTOCEntry(DAG, SDLoc(CP), GA);
2978   }
2979 
2980   SDValue CPIHi =
2981       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2982   SDValue CPILo =
2983       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2984   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2985 }
2986 
2987 // For 64-bit PowerPC, prefer the more compact relative encodings.
2988 // This trades 32 bits per jump table entry for one or two instructions
2989 // on the jump site.
2990 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2991   if (isJumpTableRelative())
2992     return MachineJumpTableInfo::EK_LabelDifference32;
2993 
2994   return TargetLowering::getJumpTableEncoding();
2995 }
2996 
2997 bool PPCTargetLowering::isJumpTableRelative() const {
2998   if (UseAbsoluteJumpTables)
2999     return false;
3000   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3001     return true;
3002   return TargetLowering::isJumpTableRelative();
3003 }
3004 
3005 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3006                                                     SelectionDAG &DAG) const {
3007   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3008     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3009 
3010   switch (getTargetMachine().getCodeModel()) {
3011   case CodeModel::Small:
3012   case CodeModel::Medium:
3013     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3014   default:
3015     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3016                        getPointerTy(DAG.getDataLayout()));
3017   }
3018 }
3019 
3020 const MCExpr *
3021 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3022                                                 unsigned JTI,
3023                                                 MCContext &Ctx) const {
3024   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3025     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3026 
3027   switch (getTargetMachine().getCodeModel()) {
3028   case CodeModel::Small:
3029   case CodeModel::Medium:
3030     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3031   default:
3032     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3033   }
3034 }
3035 
3036 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3037   EVT PtrVT = Op.getValueType();
3038   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3039 
3040   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3041   if (Subtarget.isUsingPCRelativeCalls()) {
3042     SDLoc DL(JT);
3043     EVT Ty = getPointerTy(DAG.getDataLayout());
3044     SDValue GA =
3045         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3046     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3047     return MatAddr;
3048   }
3049 
3050   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3051   // The actual address of the GlobalValue is stored in the TOC.
3052   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3053     setUsesTOCBasePtr(DAG);
3054     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3055     return getTOCEntry(DAG, SDLoc(JT), GA);
3056   }
3057 
3058   unsigned MOHiFlag, MOLoFlag;
3059   bool IsPIC = isPositionIndependent();
3060   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3061 
3062   if (IsPIC && Subtarget.isSVR4ABI()) {
3063     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3064                                         PPCII::MO_PIC_FLAG);
3065     return getTOCEntry(DAG, SDLoc(GA), GA);
3066   }
3067 
3068   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3069   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3070   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3071 }
3072 
3073 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3074                                              SelectionDAG &DAG) const {
3075   EVT PtrVT = Op.getValueType();
3076   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3077   const BlockAddress *BA = BASDN->getBlockAddress();
3078 
3079   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3080   if (Subtarget.isUsingPCRelativeCalls()) {
3081     SDLoc DL(BASDN);
3082     EVT Ty = getPointerTy(DAG.getDataLayout());
3083     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3084                                            PPCII::MO_PCREL_FLAG);
3085     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3086     return MatAddr;
3087   }
3088 
3089   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3090   // The actual BlockAddress is stored in the TOC.
3091   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3092     setUsesTOCBasePtr(DAG);
3093     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3094     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3095   }
3096 
3097   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3098   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3099     return getTOCEntry(
3100         DAG, SDLoc(BASDN),
3101         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3102 
3103   unsigned MOHiFlag, MOLoFlag;
3104   bool IsPIC = isPositionIndependent();
3105   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3106   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3107   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3108   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3109 }
3110 
3111 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3112                                               SelectionDAG &DAG) const {
3113   // FIXME: TLS addresses currently use medium model code sequences,
3114   // which is the most useful form.  Eventually support for small and
3115   // large models could be added if users need it, at the cost of
3116   // additional complexity.
3117   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3118   if (DAG.getTarget().useEmulatedTLS())
3119     return LowerToTLSEmulatedModel(GA, DAG);
3120 
3121   SDLoc dl(GA);
3122   const GlobalValue *GV = GA->getGlobal();
3123   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3124   bool is64bit = Subtarget.isPPC64();
3125   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3126   PICLevel::Level picLevel = M->getPICLevel();
3127 
3128   const TargetMachine &TM = getTargetMachine();
3129   TLSModel::Model Model = TM.getTLSModel(GV);
3130 
3131   if (Model == TLSModel::LocalExec) {
3132     if (Subtarget.isUsingPCRelativeCalls()) {
3133       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3134       SDValue TGA = DAG.getTargetGlobalAddress(
3135           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3136       SDValue MatAddr =
3137           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3138       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3139     }
3140 
3141     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3142                                                PPCII::MO_TPREL_HA);
3143     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3144                                                PPCII::MO_TPREL_LO);
3145     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3146                              : DAG.getRegister(PPC::R2, MVT::i32);
3147 
3148     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3149     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3150   }
3151 
3152   if (Model == TLSModel::InitialExec) {
3153     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3154     SDValue TGA = DAG.getTargetGlobalAddress(
3155         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3156     SDValue TGATLS = DAG.getTargetGlobalAddress(
3157         GV, dl, PtrVT, 0,
3158         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3159     SDValue TPOffset;
3160     if (IsPCRel) {
3161       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3162       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3163                              MachinePointerInfo());
3164     } else {
3165       SDValue GOTPtr;
3166       if (is64bit) {
3167         setUsesTOCBasePtr(DAG);
3168         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3169         GOTPtr =
3170             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3171       } else {
3172         if (!TM.isPositionIndependent())
3173           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3174         else if (picLevel == PICLevel::SmallPIC)
3175           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3176         else
3177           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3178       }
3179       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3180     }
3181     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3182   }
3183 
3184   if (Model == TLSModel::GeneralDynamic) {
3185     if (Subtarget.isUsingPCRelativeCalls()) {
3186       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3187                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3188       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3189     }
3190 
3191     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3192     SDValue GOTPtr;
3193     if (is64bit) {
3194       setUsesTOCBasePtr(DAG);
3195       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3196       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3197                                    GOTReg, TGA);
3198     } else {
3199       if (picLevel == PICLevel::SmallPIC)
3200         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3201       else
3202         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3203     }
3204     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3205                        GOTPtr, TGA, TGA);
3206   }
3207 
3208   if (Model == TLSModel::LocalDynamic) {
3209     if (Subtarget.isUsingPCRelativeCalls()) {
3210       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3211                                                PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3212       SDValue MatPCRel =
3213           DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3214       return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3215     }
3216 
3217     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3218     SDValue GOTPtr;
3219     if (is64bit) {
3220       setUsesTOCBasePtr(DAG);
3221       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3222       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3223                            GOTReg, TGA);
3224     } else {
3225       if (picLevel == PICLevel::SmallPIC)
3226         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3227       else
3228         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3229     }
3230     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3231                                   PtrVT, GOTPtr, TGA, TGA);
3232     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3233                                       PtrVT, TLSAddr, TGA);
3234     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3235   }
3236 
3237   llvm_unreachable("Unknown TLS model!");
3238 }
3239 
3240 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3241                                               SelectionDAG &DAG) const {
3242   EVT PtrVT = Op.getValueType();
3243   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3244   SDLoc DL(GSDN);
3245   const GlobalValue *GV = GSDN->getGlobal();
3246 
3247   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3248   // The actual address of the GlobalValue is stored in the TOC.
3249   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3250     if (Subtarget.isUsingPCRelativeCalls()) {
3251       EVT Ty = getPointerTy(DAG.getDataLayout());
3252       if (isAccessedAsGotIndirect(Op)) {
3253         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3254                                                 PPCII::MO_PCREL_FLAG |
3255                                                     PPCII::MO_GOT_FLAG);
3256         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3257         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3258                                    MachinePointerInfo());
3259         return Load;
3260       } else {
3261         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3262                                                 PPCII::MO_PCREL_FLAG);
3263         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3264       }
3265     }
3266     setUsesTOCBasePtr(DAG);
3267     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3268     return getTOCEntry(DAG, DL, GA);
3269   }
3270 
3271   unsigned MOHiFlag, MOLoFlag;
3272   bool IsPIC = isPositionIndependent();
3273   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3274 
3275   if (IsPIC && Subtarget.isSVR4ABI()) {
3276     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3277                                             GSDN->getOffset(),
3278                                             PPCII::MO_PIC_FLAG);
3279     return getTOCEntry(DAG, DL, GA);
3280   }
3281 
3282   SDValue GAHi =
3283     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3284   SDValue GALo =
3285     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3286 
3287   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3288 }
3289 
3290 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3291   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3292   SDLoc dl(Op);
3293 
3294   if (Op.getValueType() == MVT::v2i64) {
3295     // When the operands themselves are v2i64 values, we need to do something
3296     // special because VSX has no underlying comparison operations for these.
3297     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3298       // Equality can be handled by casting to the legal type for Altivec
3299       // comparisons, everything else needs to be expanded.
3300       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3301         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3302                  DAG.getSetCC(dl, MVT::v4i32,
3303                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3304                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3305                    CC));
3306       }
3307 
3308       return SDValue();
3309     }
3310 
3311     // We handle most of these in the usual way.
3312     return Op;
3313   }
3314 
3315   // If we're comparing for equality to zero, expose the fact that this is
3316   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3317   // fold the new nodes.
3318   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3319     return V;
3320 
3321   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3322     // Leave comparisons against 0 and -1 alone for now, since they're usually
3323     // optimized.  FIXME: revisit this when we can custom lower all setcc
3324     // optimizations.
3325     if (C->isAllOnesValue() || C->isNullValue())
3326       return SDValue();
3327   }
3328 
3329   // If we have an integer seteq/setne, turn it into a compare against zero
3330   // by xor'ing the rhs with the lhs, which is faster than setting a
3331   // condition register, reading it back out, and masking the correct bit.  The
3332   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3333   // the result to other bit-twiddling opportunities.
3334   EVT LHSVT = Op.getOperand(0).getValueType();
3335   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3336     EVT VT = Op.getValueType();
3337     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3338                                 Op.getOperand(1));
3339     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3340   }
3341   return SDValue();
3342 }
3343 
3344 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3345   SDNode *Node = Op.getNode();
3346   EVT VT = Node->getValueType(0);
3347   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3348   SDValue InChain = Node->getOperand(0);
3349   SDValue VAListPtr = Node->getOperand(1);
3350   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3351   SDLoc dl(Node);
3352 
3353   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3354 
3355   // gpr_index
3356   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3357                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3358   InChain = GprIndex.getValue(1);
3359 
3360   if (VT == MVT::i64) {
3361     // Check if GprIndex is even
3362     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3363                                  DAG.getConstant(1, dl, MVT::i32));
3364     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3365                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3366     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3367                                           DAG.getConstant(1, dl, MVT::i32));
3368     // Align GprIndex to be even if it isn't
3369     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3370                            GprIndex);
3371   }
3372 
3373   // fpr index is 1 byte after gpr
3374   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3375                                DAG.getConstant(1, dl, MVT::i32));
3376 
3377   // fpr
3378   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3379                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3380   InChain = FprIndex.getValue(1);
3381 
3382   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3383                                        DAG.getConstant(8, dl, MVT::i32));
3384 
3385   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3386                                         DAG.getConstant(4, dl, MVT::i32));
3387 
3388   // areas
3389   SDValue OverflowArea =
3390       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3391   InChain = OverflowArea.getValue(1);
3392 
3393   SDValue RegSaveArea =
3394       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3395   InChain = RegSaveArea.getValue(1);
3396 
3397   // select overflow_area if index > 8
3398   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3399                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3400 
3401   // adjustment constant gpr_index * 4/8
3402   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3403                                     VT.isInteger() ? GprIndex : FprIndex,
3404                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3405                                                     MVT::i32));
3406 
3407   // OurReg = RegSaveArea + RegConstant
3408   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3409                                RegConstant);
3410 
3411   // Floating types are 32 bytes into RegSaveArea
3412   if (VT.isFloatingPoint())
3413     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3414                          DAG.getConstant(32, dl, MVT::i32));
3415 
3416   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3417   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3418                                    VT.isInteger() ? GprIndex : FprIndex,
3419                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3420                                                    MVT::i32));
3421 
3422   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3423                               VT.isInteger() ? VAListPtr : FprPtr,
3424                               MachinePointerInfo(SV), MVT::i8);
3425 
3426   // determine if we should load from reg_save_area or overflow_area
3427   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3428 
3429   // increase overflow_area by 4/8 if gpr/fpr > 8
3430   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3431                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3432                                           dl, MVT::i32));
3433 
3434   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3435                              OverflowAreaPlusN);
3436 
3437   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3438                               MachinePointerInfo(), MVT::i32);
3439 
3440   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3441 }
3442 
3443 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3444   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3445 
3446   // We have to copy the entire va_list struct:
3447   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3448   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3449                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3450                        false, true, false, MachinePointerInfo(),
3451                        MachinePointerInfo());
3452 }
3453 
3454 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3455                                                   SelectionDAG &DAG) const {
3456   if (Subtarget.isAIXABI())
3457     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3458 
3459   return Op.getOperand(0);
3460 }
3461 
3462 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3463                                                 SelectionDAG &DAG) const {
3464   if (Subtarget.isAIXABI())
3465     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3466 
3467   SDValue Chain = Op.getOperand(0);
3468   SDValue Trmp = Op.getOperand(1); // trampoline
3469   SDValue FPtr = Op.getOperand(2); // nested function
3470   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3471   SDLoc dl(Op);
3472 
3473   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3474   bool isPPC64 = (PtrVT == MVT::i64);
3475   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3476 
3477   TargetLowering::ArgListTy Args;
3478   TargetLowering::ArgListEntry Entry;
3479 
3480   Entry.Ty = IntPtrTy;
3481   Entry.Node = Trmp; Args.push_back(Entry);
3482 
3483   // TrampSize == (isPPC64 ? 48 : 40);
3484   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3485                                isPPC64 ? MVT::i64 : MVT::i32);
3486   Args.push_back(Entry);
3487 
3488   Entry.Node = FPtr; Args.push_back(Entry);
3489   Entry.Node = Nest; Args.push_back(Entry);
3490 
3491   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3492   TargetLowering::CallLoweringInfo CLI(DAG);
3493   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3494       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3495       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3496 
3497   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3498   return CallResult.second;
3499 }
3500 
3501 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3502   MachineFunction &MF = DAG.getMachineFunction();
3503   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3504   EVT PtrVT = getPointerTy(MF.getDataLayout());
3505 
3506   SDLoc dl(Op);
3507 
3508   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3509     // vastart just stores the address of the VarArgsFrameIndex slot into the
3510     // memory location argument.
3511     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3512     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3513     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3514                         MachinePointerInfo(SV));
3515   }
3516 
3517   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3518   // We suppose the given va_list is already allocated.
3519   //
3520   // typedef struct {
3521   //  char gpr;     /* index into the array of 8 GPRs
3522   //                 * stored in the register save area
3523   //                 * gpr=0 corresponds to r3,
3524   //                 * gpr=1 to r4, etc.
3525   //                 */
3526   //  char fpr;     /* index into the array of 8 FPRs
3527   //                 * stored in the register save area
3528   //                 * fpr=0 corresponds to f1,
3529   //                 * fpr=1 to f2, etc.
3530   //                 */
3531   //  char *overflow_arg_area;
3532   //                /* location on stack that holds
3533   //                 * the next overflow argument
3534   //                 */
3535   //  char *reg_save_area;
3536   //               /* where r3:r10 and f1:f8 (if saved)
3537   //                * are stored
3538   //                */
3539   // } va_list[1];
3540 
3541   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3542   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3543   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3544                                             PtrVT);
3545   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3546                                  PtrVT);
3547 
3548   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3549   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3550 
3551   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3552   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3553 
3554   uint64_t FPROffset = 1;
3555   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3556 
3557   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3558 
3559   // Store first byte : number of int regs
3560   SDValue firstStore =
3561       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3562                         MachinePointerInfo(SV), MVT::i8);
3563   uint64_t nextOffset = FPROffset;
3564   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3565                                   ConstFPROffset);
3566 
3567   // Store second byte : number of float regs
3568   SDValue secondStore =
3569       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3570                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3571   nextOffset += StackOffset;
3572   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3573 
3574   // Store second word : arguments given on stack
3575   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3576                                     MachinePointerInfo(SV, nextOffset));
3577   nextOffset += FrameOffset;
3578   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3579 
3580   // Store third word : arguments given in registers
3581   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3582                       MachinePointerInfo(SV, nextOffset));
3583 }
3584 
3585 /// FPR - The set of FP registers that should be allocated for arguments
3586 /// on Darwin and AIX.
3587 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3588                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3589                                 PPC::F11, PPC::F12, PPC::F13};
3590 
3591 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3592 /// the stack.
3593 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3594                                        unsigned PtrByteSize) {
3595   unsigned ArgSize = ArgVT.getStoreSize();
3596   if (Flags.isByVal())
3597     ArgSize = Flags.getByValSize();
3598 
3599   // Round up to multiples of the pointer size, except for array members,
3600   // which are always packed.
3601   if (!Flags.isInConsecutiveRegs())
3602     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3603 
3604   return ArgSize;
3605 }
3606 
3607 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3608 /// on the stack.
3609 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3610                                          ISD::ArgFlagsTy Flags,
3611                                          unsigned PtrByteSize) {
3612   Align Alignment(PtrByteSize);
3613 
3614   // Altivec parameters are padded to a 16 byte boundary.
3615   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3616       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3617       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3618       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3619     Alignment = Align(16);
3620 
3621   // ByVal parameters are aligned as requested.
3622   if (Flags.isByVal()) {
3623     auto BVAlign = Flags.getNonZeroByValAlign();
3624     if (BVAlign > PtrByteSize) {
3625       if (BVAlign.value() % PtrByteSize != 0)
3626         llvm_unreachable(
3627             "ByVal alignment is not a multiple of the pointer size");
3628 
3629       Alignment = BVAlign;
3630     }
3631   }
3632 
3633   // Array members are always packed to their original alignment.
3634   if (Flags.isInConsecutiveRegs()) {
3635     // If the array member was split into multiple registers, the first
3636     // needs to be aligned to the size of the full type.  (Except for
3637     // ppcf128, which is only aligned as its f64 components.)
3638     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3639       Alignment = Align(OrigVT.getStoreSize());
3640     else
3641       Alignment = Align(ArgVT.getStoreSize());
3642   }
3643 
3644   return Alignment;
3645 }
3646 
3647 /// CalculateStackSlotUsed - Return whether this argument will use its
3648 /// stack slot (instead of being passed in registers).  ArgOffset,
3649 /// AvailableFPRs, and AvailableVRs must hold the current argument
3650 /// position, and will be updated to account for this argument.
3651 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3652                                    unsigned PtrByteSize, unsigned LinkageSize,
3653                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3654                                    unsigned &AvailableFPRs,
3655                                    unsigned &AvailableVRs) {
3656   bool UseMemory = false;
3657 
3658   // Respect alignment of argument on the stack.
3659   Align Alignment =
3660       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3661   ArgOffset = alignTo(ArgOffset, Alignment);
3662   // If there's no space left in the argument save area, we must
3663   // use memory (this check also catches zero-sized arguments).
3664   if (ArgOffset >= LinkageSize + ParamAreaSize)
3665     UseMemory = true;
3666 
3667   // Allocate argument on the stack.
3668   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3669   if (Flags.isInConsecutiveRegsLast())
3670     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3671   // If we overran the argument save area, we must use memory
3672   // (this check catches arguments passed partially in memory)
3673   if (ArgOffset > LinkageSize + ParamAreaSize)
3674     UseMemory = true;
3675 
3676   // However, if the argument is actually passed in an FPR or a VR,
3677   // we don't use memory after all.
3678   if (!Flags.isByVal()) {
3679     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3680       if (AvailableFPRs > 0) {
3681         --AvailableFPRs;
3682         return false;
3683       }
3684     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3685         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3686         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3687         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3688       if (AvailableVRs > 0) {
3689         --AvailableVRs;
3690         return false;
3691       }
3692   }
3693 
3694   return UseMemory;
3695 }
3696 
3697 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3698 /// ensure minimum alignment required for target.
3699 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3700                                      unsigned NumBytes) {
3701   return alignTo(NumBytes, Lowering->getStackAlign());
3702 }
3703 
3704 SDValue PPCTargetLowering::LowerFormalArguments(
3705     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3706     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3707     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3708   if (Subtarget.isAIXABI())
3709     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3710                                     InVals);
3711   if (Subtarget.is64BitELFABI())
3712     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3713                                        InVals);
3714   if (Subtarget.is32BitELFABI())
3715     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3716                                        InVals);
3717 
3718   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3719                                      InVals);
3720 }
3721 
3722 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3723     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3724     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3725     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3726 
3727   // 32-bit SVR4 ABI Stack Frame Layout:
3728   //              +-----------------------------------+
3729   //        +-->  |            Back chain             |
3730   //        |     +-----------------------------------+
3731   //        |     | Floating-point register save area |
3732   //        |     +-----------------------------------+
3733   //        |     |    General register save area     |
3734   //        |     +-----------------------------------+
3735   //        |     |          CR save word             |
3736   //        |     +-----------------------------------+
3737   //        |     |         VRSAVE save word          |
3738   //        |     +-----------------------------------+
3739   //        |     |         Alignment padding         |
3740   //        |     +-----------------------------------+
3741   //        |     |     Vector register save area     |
3742   //        |     +-----------------------------------+
3743   //        |     |       Local variable space        |
3744   //        |     +-----------------------------------+
3745   //        |     |        Parameter list area        |
3746   //        |     +-----------------------------------+
3747   //        |     |           LR save word            |
3748   //        |     +-----------------------------------+
3749   // SP-->  +---  |            Back chain             |
3750   //              +-----------------------------------+
3751   //
3752   // Specifications:
3753   //   System V Application Binary Interface PowerPC Processor Supplement
3754   //   AltiVec Technology Programming Interface Manual
3755 
3756   MachineFunction &MF = DAG.getMachineFunction();
3757   MachineFrameInfo &MFI = MF.getFrameInfo();
3758   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3759 
3760   EVT PtrVT = getPointerTy(MF.getDataLayout());
3761   // Potential tail calls could cause overwriting of argument stack slots.
3762   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3763                        (CallConv == CallingConv::Fast));
3764   const Align PtrAlign(4);
3765 
3766   // Assign locations to all of the incoming arguments.
3767   SmallVector<CCValAssign, 16> ArgLocs;
3768   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3769                  *DAG.getContext());
3770 
3771   // Reserve space for the linkage area on the stack.
3772   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3773   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3774   if (useSoftFloat())
3775     CCInfo.PreAnalyzeFormalArguments(Ins);
3776 
3777   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3778   CCInfo.clearWasPPCF128();
3779 
3780   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3781     CCValAssign &VA = ArgLocs[i];
3782 
3783     // Arguments stored in registers.
3784     if (VA.isRegLoc()) {
3785       const TargetRegisterClass *RC;
3786       EVT ValVT = VA.getValVT();
3787 
3788       switch (ValVT.getSimpleVT().SimpleTy) {
3789         default:
3790           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3791         case MVT::i1:
3792         case MVT::i32:
3793           RC = &PPC::GPRCRegClass;
3794           break;
3795         case MVT::f32:
3796           if (Subtarget.hasP8Vector())
3797             RC = &PPC::VSSRCRegClass;
3798           else if (Subtarget.hasSPE())
3799             RC = &PPC::GPRCRegClass;
3800           else
3801             RC = &PPC::F4RCRegClass;
3802           break;
3803         case MVT::f64:
3804           if (Subtarget.hasVSX())
3805             RC = &PPC::VSFRCRegClass;
3806           else if (Subtarget.hasSPE())
3807             // SPE passes doubles in GPR pairs.
3808             RC = &PPC::GPRCRegClass;
3809           else
3810             RC = &PPC::F8RCRegClass;
3811           break;
3812         case MVT::v16i8:
3813         case MVT::v8i16:
3814         case MVT::v4i32:
3815           RC = &PPC::VRRCRegClass;
3816           break;
3817         case MVT::v4f32:
3818           RC = &PPC::VRRCRegClass;
3819           break;
3820         case MVT::v2f64:
3821         case MVT::v2i64:
3822           RC = &PPC::VRRCRegClass;
3823           break;
3824       }
3825 
3826       SDValue ArgValue;
3827       // Transform the arguments stored in physical registers into
3828       // virtual ones.
3829       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3830         assert(i + 1 < e && "No second half of double precision argument");
3831         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3832         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3833         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3834         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3835         if (!Subtarget.isLittleEndian())
3836           std::swap (ArgValueLo, ArgValueHi);
3837         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3838                                ArgValueHi);
3839       } else {
3840         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3841         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3842                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3843         if (ValVT == MVT::i1)
3844           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3845       }
3846 
3847       InVals.push_back(ArgValue);
3848     } else {
3849       // Argument stored in memory.
3850       assert(VA.isMemLoc());
3851 
3852       // Get the extended size of the argument type in stack
3853       unsigned ArgSize = VA.getLocVT().getStoreSize();
3854       // Get the actual size of the argument type
3855       unsigned ObjSize = VA.getValVT().getStoreSize();
3856       unsigned ArgOffset = VA.getLocMemOffset();
3857       // Stack objects in PPC32 are right justified.
3858       ArgOffset += ArgSize - ObjSize;
3859       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3860 
3861       // Create load nodes to retrieve arguments from the stack.
3862       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3863       InVals.push_back(
3864           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3865     }
3866   }
3867 
3868   // Assign locations to all of the incoming aggregate by value arguments.
3869   // Aggregates passed by value are stored in the local variable space of the
3870   // caller's stack frame, right above the parameter list area.
3871   SmallVector<CCValAssign, 16> ByValArgLocs;
3872   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3873                       ByValArgLocs, *DAG.getContext());
3874 
3875   // Reserve stack space for the allocations in CCInfo.
3876   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3877 
3878   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3879 
3880   // Area that is at least reserved in the caller of this function.
3881   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3882   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3883 
3884   // Set the size that is at least reserved in caller of this function.  Tail
3885   // call optimized function's reserved stack space needs to be aligned so that
3886   // taking the difference between two stack areas will result in an aligned
3887   // stack.
3888   MinReservedArea =
3889       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3890   FuncInfo->setMinReservedArea(MinReservedArea);
3891 
3892   SmallVector<SDValue, 8> MemOps;
3893 
3894   // If the function takes variable number of arguments, make a frame index for
3895   // the start of the first vararg value... for expansion of llvm.va_start.
3896   if (isVarArg) {
3897     static const MCPhysReg GPArgRegs[] = {
3898       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3899       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3900     };
3901     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3902 
3903     static const MCPhysReg FPArgRegs[] = {
3904       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3905       PPC::F8
3906     };
3907     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3908 
3909     if (useSoftFloat() || hasSPE())
3910        NumFPArgRegs = 0;
3911 
3912     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3913     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3914 
3915     // Make room for NumGPArgRegs and NumFPArgRegs.
3916     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3917                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3918 
3919     FuncInfo->setVarArgsStackOffset(
3920       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3921                             CCInfo.getNextStackOffset(), true));
3922 
3923     FuncInfo->setVarArgsFrameIndex(
3924         MFI.CreateStackObject(Depth, Align(8), false));
3925     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3926 
3927     // The fixed integer arguments of a variadic function are stored to the
3928     // VarArgsFrameIndex on the stack so that they may be loaded by
3929     // dereferencing the result of va_next.
3930     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3931       // Get an existing live-in vreg, or add a new one.
3932       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3933       if (!VReg)
3934         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3935 
3936       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3937       SDValue Store =
3938           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3939       MemOps.push_back(Store);
3940       // Increment the address by four for the next argument to store
3941       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3942       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3943     }
3944 
3945     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3946     // is set.
3947     // The double arguments are stored to the VarArgsFrameIndex
3948     // on the stack.
3949     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3950       // Get an existing live-in vreg, or add a new one.
3951       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3952       if (!VReg)
3953         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3954 
3955       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3956       SDValue Store =
3957           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3958       MemOps.push_back(Store);
3959       // Increment the address by eight for the next argument to store
3960       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3961                                          PtrVT);
3962       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3963     }
3964   }
3965 
3966   if (!MemOps.empty())
3967     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3968 
3969   return Chain;
3970 }
3971 
3972 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3973 // value to MVT::i64 and then truncate to the correct register size.
3974 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3975                                              EVT ObjectVT, SelectionDAG &DAG,
3976                                              SDValue ArgVal,
3977                                              const SDLoc &dl) const {
3978   if (Flags.isSExt())
3979     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3980                          DAG.getValueType(ObjectVT));
3981   else if (Flags.isZExt())
3982     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3983                          DAG.getValueType(ObjectVT));
3984 
3985   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3986 }
3987 
3988 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3989     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3990     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3991     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3992   // TODO: add description of PPC stack frame format, or at least some docs.
3993   //
3994   bool isELFv2ABI = Subtarget.isELFv2ABI();
3995   bool isLittleEndian = Subtarget.isLittleEndian();
3996   MachineFunction &MF = DAG.getMachineFunction();
3997   MachineFrameInfo &MFI = MF.getFrameInfo();
3998   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3999 
4000   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
4001          "fastcc not supported on varargs functions");
4002 
4003   EVT PtrVT = getPointerTy(MF.getDataLayout());
4004   // Potential tail calls could cause overwriting of argument stack slots.
4005   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4006                        (CallConv == CallingConv::Fast));
4007   unsigned PtrByteSize = 8;
4008   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4009 
4010   static const MCPhysReg GPR[] = {
4011     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4012     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4013   };
4014   static const MCPhysReg VR[] = {
4015     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4016     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4017   };
4018 
4019   const unsigned Num_GPR_Regs = array_lengthof(GPR);
4020   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4021   const unsigned Num_VR_Regs  = array_lengthof(VR);
4022 
4023   // Do a first pass over the arguments to determine whether the ABI
4024   // guarantees that our caller has allocated the parameter save area
4025   // on its stack frame.  In the ELFv1 ABI, this is always the case;
4026   // in the ELFv2 ABI, it is true if this is a vararg function or if
4027   // any parameter is located in a stack slot.
4028 
4029   bool HasParameterArea = !isELFv2ABI || isVarArg;
4030   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4031   unsigned NumBytes = LinkageSize;
4032   unsigned AvailableFPRs = Num_FPR_Regs;
4033   unsigned AvailableVRs = Num_VR_Regs;
4034   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4035     if (Ins[i].Flags.isNest())
4036       continue;
4037 
4038     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4039                                PtrByteSize, LinkageSize, ParamAreaSize,
4040                                NumBytes, AvailableFPRs, AvailableVRs))
4041       HasParameterArea = true;
4042   }
4043 
4044   // Add DAG nodes to load the arguments or copy them out of registers.  On
4045   // entry to a function on PPC, the arguments start after the linkage area,
4046   // although the first ones are often in registers.
4047 
4048   unsigned ArgOffset = LinkageSize;
4049   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4050   SmallVector<SDValue, 8> MemOps;
4051   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4052   unsigned CurArgIdx = 0;
4053   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4054     SDValue ArgVal;
4055     bool needsLoad = false;
4056     EVT ObjectVT = Ins[ArgNo].VT;
4057     EVT OrigVT = Ins[ArgNo].ArgVT;
4058     unsigned ObjSize = ObjectVT.getStoreSize();
4059     unsigned ArgSize = ObjSize;
4060     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4061     if (Ins[ArgNo].isOrigArg()) {
4062       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4063       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4064     }
4065     // We re-align the argument offset for each argument, except when using the
4066     // fast calling convention, when we need to make sure we do that only when
4067     // we'll actually use a stack slot.
4068     unsigned CurArgOffset;
4069     Align Alignment;
4070     auto ComputeArgOffset = [&]() {
4071       /* Respect alignment of argument on the stack.  */
4072       Alignment =
4073           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4074       ArgOffset = alignTo(ArgOffset, Alignment);
4075       CurArgOffset = ArgOffset;
4076     };
4077 
4078     if (CallConv != CallingConv::Fast) {
4079       ComputeArgOffset();
4080 
4081       /* Compute GPR index associated with argument offset.  */
4082       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4083       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4084     }
4085 
4086     // FIXME the codegen can be much improved in some cases.
4087     // We do not have to keep everything in memory.
4088     if (Flags.isByVal()) {
4089       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4090 
4091       if (CallConv == CallingConv::Fast)
4092         ComputeArgOffset();
4093 
4094       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4095       ObjSize = Flags.getByValSize();
4096       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4097       // Empty aggregate parameters do not take up registers.  Examples:
4098       //   struct { } a;
4099       //   union  { } b;
4100       //   int c[0];
4101       // etc.  However, we have to provide a place-holder in InVals, so
4102       // pretend we have an 8-byte item at the current address for that
4103       // purpose.
4104       if (!ObjSize) {
4105         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4106         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4107         InVals.push_back(FIN);
4108         continue;
4109       }
4110 
4111       // Create a stack object covering all stack doublewords occupied
4112       // by the argument.  If the argument is (fully or partially) on
4113       // the stack, or if the argument is fully in registers but the
4114       // caller has allocated the parameter save anyway, we can refer
4115       // directly to the caller's stack frame.  Otherwise, create a
4116       // local copy in our own frame.
4117       int FI;
4118       if (HasParameterArea ||
4119           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4120         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4121       else
4122         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4123       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4124 
4125       // Handle aggregates smaller than 8 bytes.
4126       if (ObjSize < PtrByteSize) {
4127         // The value of the object is its address, which differs from the
4128         // address of the enclosing doubleword on big-endian systems.
4129         SDValue Arg = FIN;
4130         if (!isLittleEndian) {
4131           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4132           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4133         }
4134         InVals.push_back(Arg);
4135 
4136         if (GPR_idx != Num_GPR_Regs) {
4137           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4138           FuncInfo->addLiveInAttr(VReg, Flags);
4139           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4140           SDValue Store;
4141 
4142           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4143             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4144                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4145             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4146                                       MachinePointerInfo(&*FuncArg), ObjType);
4147           } else {
4148             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4149             // store the whole register as-is to the parameter save area
4150             // slot.
4151             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4152                                  MachinePointerInfo(&*FuncArg));
4153           }
4154 
4155           MemOps.push_back(Store);
4156         }
4157         // Whether we copied from a register or not, advance the offset
4158         // into the parameter save area by a full doubleword.
4159         ArgOffset += PtrByteSize;
4160         continue;
4161       }
4162 
4163       // The value of the object is its address, which is the address of
4164       // its first stack doubleword.
4165       InVals.push_back(FIN);
4166 
4167       // Store whatever pieces of the object are in registers to memory.
4168       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4169         if (GPR_idx == Num_GPR_Regs)
4170           break;
4171 
4172         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4173         FuncInfo->addLiveInAttr(VReg, Flags);
4174         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4175         SDValue Addr = FIN;
4176         if (j) {
4177           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4178           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4179         }
4180         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4181                                      MachinePointerInfo(&*FuncArg, j));
4182         MemOps.push_back(Store);
4183         ++GPR_idx;
4184       }
4185       ArgOffset += ArgSize;
4186       continue;
4187     }
4188 
4189     switch (ObjectVT.getSimpleVT().SimpleTy) {
4190     default: llvm_unreachable("Unhandled argument type!");
4191     case MVT::i1:
4192     case MVT::i32:
4193     case MVT::i64:
4194       if (Flags.isNest()) {
4195         // The 'nest' parameter, if any, is passed in R11.
4196         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4197         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4198 
4199         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4200           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4201 
4202         break;
4203       }
4204 
4205       // These can be scalar arguments or elements of an integer array type
4206       // passed directly.  Clang may use those instead of "byval" aggregate
4207       // types to avoid forcing arguments to memory unnecessarily.
4208       if (GPR_idx != Num_GPR_Regs) {
4209         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4210         FuncInfo->addLiveInAttr(VReg, Flags);
4211         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4212 
4213         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4214           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4215           // value to MVT::i64 and then truncate to the correct register size.
4216           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4217       } else {
4218         if (CallConv == CallingConv::Fast)
4219           ComputeArgOffset();
4220 
4221         needsLoad = true;
4222         ArgSize = PtrByteSize;
4223       }
4224       if (CallConv != CallingConv::Fast || needsLoad)
4225         ArgOffset += 8;
4226       break;
4227 
4228     case MVT::f32:
4229     case MVT::f64:
4230       // These can be scalar arguments or elements of a float array type
4231       // passed directly.  The latter are used to implement ELFv2 homogenous
4232       // float aggregates.
4233       if (FPR_idx != Num_FPR_Regs) {
4234         unsigned VReg;
4235 
4236         if (ObjectVT == MVT::f32)
4237           VReg = MF.addLiveIn(FPR[FPR_idx],
4238                               Subtarget.hasP8Vector()
4239                                   ? &PPC::VSSRCRegClass
4240                                   : &PPC::F4RCRegClass);
4241         else
4242           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4243                                                 ? &PPC::VSFRCRegClass
4244                                                 : &PPC::F8RCRegClass);
4245 
4246         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4247         ++FPR_idx;
4248       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4249         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4250         // once we support fp <-> gpr moves.
4251 
4252         // This can only ever happen in the presence of f32 array types,
4253         // since otherwise we never run out of FPRs before running out
4254         // of GPRs.
4255         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4256         FuncInfo->addLiveInAttr(VReg, Flags);
4257         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4258 
4259         if (ObjectVT == MVT::f32) {
4260           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4261             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4262                                  DAG.getConstant(32, dl, MVT::i32));
4263           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4264         }
4265 
4266         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4267       } else {
4268         if (CallConv == CallingConv::Fast)
4269           ComputeArgOffset();
4270 
4271         needsLoad = true;
4272       }
4273 
4274       // When passing an array of floats, the array occupies consecutive
4275       // space in the argument area; only round up to the next doubleword
4276       // at the end of the array.  Otherwise, each float takes 8 bytes.
4277       if (CallConv != CallingConv::Fast || needsLoad) {
4278         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4279         ArgOffset += ArgSize;
4280         if (Flags.isInConsecutiveRegsLast())
4281           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4282       }
4283       break;
4284     case MVT::v4f32:
4285     case MVT::v4i32:
4286     case MVT::v8i16:
4287     case MVT::v16i8:
4288     case MVT::v2f64:
4289     case MVT::v2i64:
4290     case MVT::v1i128:
4291     case MVT::f128:
4292       // These can be scalar arguments or elements of a vector array type
4293       // passed directly.  The latter are used to implement ELFv2 homogenous
4294       // vector aggregates.
4295       if (VR_idx != Num_VR_Regs) {
4296         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4297         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4298         ++VR_idx;
4299       } else {
4300         if (CallConv == CallingConv::Fast)
4301           ComputeArgOffset();
4302         needsLoad = true;
4303       }
4304       if (CallConv != CallingConv::Fast || needsLoad)
4305         ArgOffset += 16;
4306       break;
4307     }
4308 
4309     // We need to load the argument to a virtual register if we determined
4310     // above that we ran out of physical registers of the appropriate type.
4311     if (needsLoad) {
4312       if (ObjSize < ArgSize && !isLittleEndian)
4313         CurArgOffset += ArgSize - ObjSize;
4314       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4315       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4316       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4317     }
4318 
4319     InVals.push_back(ArgVal);
4320   }
4321 
4322   // Area that is at least reserved in the caller of this function.
4323   unsigned MinReservedArea;
4324   if (HasParameterArea)
4325     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4326   else
4327     MinReservedArea = LinkageSize;
4328 
4329   // Set the size that is at least reserved in caller of this function.  Tail
4330   // call optimized functions' reserved stack space needs to be aligned so that
4331   // taking the difference between two stack areas will result in an aligned
4332   // stack.
4333   MinReservedArea =
4334       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4335   FuncInfo->setMinReservedArea(MinReservedArea);
4336 
4337   // If the function takes variable number of arguments, make a frame index for
4338   // the start of the first vararg value... for expansion of llvm.va_start.
4339   // On ELFv2ABI spec, it writes:
4340   // C programs that are intended to be *portable* across different compilers
4341   // and architectures must use the header file <stdarg.h> to deal with variable
4342   // argument lists.
4343   if (isVarArg && MFI.hasVAStart()) {
4344     int Depth = ArgOffset;
4345 
4346     FuncInfo->setVarArgsFrameIndex(
4347       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4348     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4349 
4350     // If this function is vararg, store any remaining integer argument regs
4351     // to their spots on the stack so that they may be loaded by dereferencing
4352     // the result of va_next.
4353     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4354          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4355       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4356       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4357       SDValue Store =
4358           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4359       MemOps.push_back(Store);
4360       // Increment the address by four for the next argument to store
4361       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4362       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4363     }
4364   }
4365 
4366   if (!MemOps.empty())
4367     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4368 
4369   return Chain;
4370 }
4371 
4372 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4373     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4374     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4375     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4376   // TODO: add description of PPC stack frame format, or at least some docs.
4377   //
4378   MachineFunction &MF = DAG.getMachineFunction();
4379   MachineFrameInfo &MFI = MF.getFrameInfo();
4380   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4381 
4382   EVT PtrVT = getPointerTy(MF.getDataLayout());
4383   bool isPPC64 = PtrVT == MVT::i64;
4384   // Potential tail calls could cause overwriting of argument stack slots.
4385   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4386                        (CallConv == CallingConv::Fast));
4387   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4388   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4389   unsigned ArgOffset = LinkageSize;
4390   // Area that is at least reserved in caller of this function.
4391   unsigned MinReservedArea = ArgOffset;
4392 
4393   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4394     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4395     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4396   };
4397   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4398     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4399     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4400   };
4401   static const MCPhysReg VR[] = {
4402     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4403     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4404   };
4405 
4406   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4407   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4408   const unsigned Num_VR_Regs  = array_lengthof( VR);
4409 
4410   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4411 
4412   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4413 
4414   // In 32-bit non-varargs functions, the stack space for vectors is after the
4415   // stack space for non-vectors.  We do not use this space unless we have
4416   // too many vectors to fit in registers, something that only occurs in
4417   // constructed examples:), but we have to walk the arglist to figure
4418   // that out...for the pathological case, compute VecArgOffset as the
4419   // start of the vector parameter area.  Computing VecArgOffset is the
4420   // entire point of the following loop.
4421   unsigned VecArgOffset = ArgOffset;
4422   if (!isVarArg && !isPPC64) {
4423     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4424          ++ArgNo) {
4425       EVT ObjectVT = Ins[ArgNo].VT;
4426       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4427 
4428       if (Flags.isByVal()) {
4429         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4430         unsigned ObjSize = Flags.getByValSize();
4431         unsigned ArgSize =
4432                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4433         VecArgOffset += ArgSize;
4434         continue;
4435       }
4436 
4437       switch(ObjectVT.getSimpleVT().SimpleTy) {
4438       default: llvm_unreachable("Unhandled argument type!");
4439       case MVT::i1:
4440       case MVT::i32:
4441       case MVT::f32:
4442         VecArgOffset += 4;
4443         break;
4444       case MVT::i64:  // PPC64
4445       case MVT::f64:
4446         // FIXME: We are guaranteed to be !isPPC64 at this point.
4447         // Does MVT::i64 apply?
4448         VecArgOffset += 8;
4449         break;
4450       case MVT::v4f32:
4451       case MVT::v4i32:
4452       case MVT::v8i16:
4453       case MVT::v16i8:
4454         // Nothing to do, we're only looking at Nonvector args here.
4455         break;
4456       }
4457     }
4458   }
4459   // We've found where the vector parameter area in memory is.  Skip the
4460   // first 12 parameters; these don't use that memory.
4461   VecArgOffset = ((VecArgOffset+15)/16)*16;
4462   VecArgOffset += 12*16;
4463 
4464   // Add DAG nodes to load the arguments or copy them out of registers.  On
4465   // entry to a function on PPC, the arguments start after the linkage area,
4466   // although the first ones are often in registers.
4467 
4468   SmallVector<SDValue, 8> MemOps;
4469   unsigned nAltivecParamsAtEnd = 0;
4470   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4471   unsigned CurArgIdx = 0;
4472   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4473     SDValue ArgVal;
4474     bool needsLoad = false;
4475     EVT ObjectVT = Ins[ArgNo].VT;
4476     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4477     unsigned ArgSize = ObjSize;
4478     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4479     if (Ins[ArgNo].isOrigArg()) {
4480       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4481       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4482     }
4483     unsigned CurArgOffset = ArgOffset;
4484 
4485     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4486     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4487         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4488       if (isVarArg || isPPC64) {
4489         MinReservedArea = ((MinReservedArea+15)/16)*16;
4490         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4491                                                   Flags,
4492                                                   PtrByteSize);
4493       } else  nAltivecParamsAtEnd++;
4494     } else
4495       // Calculate min reserved area.
4496       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4497                                                 Flags,
4498                                                 PtrByteSize);
4499 
4500     // FIXME the codegen can be much improved in some cases.
4501     // We do not have to keep everything in memory.
4502     if (Flags.isByVal()) {
4503       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4504 
4505       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4506       ObjSize = Flags.getByValSize();
4507       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4508       // Objects of size 1 and 2 are right justified, everything else is
4509       // left justified.  This means the memory address is adjusted forwards.
4510       if (ObjSize==1 || ObjSize==2) {
4511         CurArgOffset = CurArgOffset + (4 - ObjSize);
4512       }
4513       // The value of the object is its address.
4514       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4515       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4516       InVals.push_back(FIN);
4517       if (ObjSize==1 || ObjSize==2) {
4518         if (GPR_idx != Num_GPR_Regs) {
4519           unsigned VReg;
4520           if (isPPC64)
4521             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4522           else
4523             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4524           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4525           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4526           SDValue Store =
4527               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4528                                 MachinePointerInfo(&*FuncArg), ObjType);
4529           MemOps.push_back(Store);
4530           ++GPR_idx;
4531         }
4532 
4533         ArgOffset += PtrByteSize;
4534 
4535         continue;
4536       }
4537       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4538         // Store whatever pieces of the object are in registers
4539         // to memory.  ArgOffset will be the address of the beginning
4540         // of the object.
4541         if (GPR_idx != Num_GPR_Regs) {
4542           unsigned VReg;
4543           if (isPPC64)
4544             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4545           else
4546             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4547           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4548           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4549           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4550           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4551                                        MachinePointerInfo(&*FuncArg, j));
4552           MemOps.push_back(Store);
4553           ++GPR_idx;
4554           ArgOffset += PtrByteSize;
4555         } else {
4556           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4557           break;
4558         }
4559       }
4560       continue;
4561     }
4562 
4563     switch (ObjectVT.getSimpleVT().SimpleTy) {
4564     default: llvm_unreachable("Unhandled argument type!");
4565     case MVT::i1:
4566     case MVT::i32:
4567       if (!isPPC64) {
4568         if (GPR_idx != Num_GPR_Regs) {
4569           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4570           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4571 
4572           if (ObjectVT == MVT::i1)
4573             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4574 
4575           ++GPR_idx;
4576         } else {
4577           needsLoad = true;
4578           ArgSize = PtrByteSize;
4579         }
4580         // All int arguments reserve stack space in the Darwin ABI.
4581         ArgOffset += PtrByteSize;
4582         break;
4583       }
4584       LLVM_FALLTHROUGH;
4585     case MVT::i64:  // PPC64
4586       if (GPR_idx != Num_GPR_Regs) {
4587         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4588         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4589 
4590         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4591           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4592           // value to MVT::i64 and then truncate to the correct register size.
4593           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4594 
4595         ++GPR_idx;
4596       } else {
4597         needsLoad = true;
4598         ArgSize = PtrByteSize;
4599       }
4600       // All int arguments reserve stack space in the Darwin ABI.
4601       ArgOffset += 8;
4602       break;
4603 
4604     case MVT::f32:
4605     case MVT::f64:
4606       // Every 4 bytes of argument space consumes one of the GPRs available for
4607       // argument passing.
4608       if (GPR_idx != Num_GPR_Regs) {
4609         ++GPR_idx;
4610         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4611           ++GPR_idx;
4612       }
4613       if (FPR_idx != Num_FPR_Regs) {
4614         unsigned VReg;
4615 
4616         if (ObjectVT == MVT::f32)
4617           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4618         else
4619           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4620 
4621         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4622         ++FPR_idx;
4623       } else {
4624         needsLoad = true;
4625       }
4626 
4627       // All FP arguments reserve stack space in the Darwin ABI.
4628       ArgOffset += isPPC64 ? 8 : ObjSize;
4629       break;
4630     case MVT::v4f32:
4631     case MVT::v4i32:
4632     case MVT::v8i16:
4633     case MVT::v16i8:
4634       // Note that vector arguments in registers don't reserve stack space,
4635       // except in varargs functions.
4636       if (VR_idx != Num_VR_Regs) {
4637         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4638         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4639         if (isVarArg) {
4640           while ((ArgOffset % 16) != 0) {
4641             ArgOffset += PtrByteSize;
4642             if (GPR_idx != Num_GPR_Regs)
4643               GPR_idx++;
4644           }
4645           ArgOffset += 16;
4646           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4647         }
4648         ++VR_idx;
4649       } else {
4650         if (!isVarArg && !isPPC64) {
4651           // Vectors go after all the nonvectors.
4652           CurArgOffset = VecArgOffset;
4653           VecArgOffset += 16;
4654         } else {
4655           // Vectors are aligned.
4656           ArgOffset = ((ArgOffset+15)/16)*16;
4657           CurArgOffset = ArgOffset;
4658           ArgOffset += 16;
4659         }
4660         needsLoad = true;
4661       }
4662       break;
4663     }
4664 
4665     // We need to load the argument to a virtual register if we determined above
4666     // that we ran out of physical registers of the appropriate type.
4667     if (needsLoad) {
4668       int FI = MFI.CreateFixedObject(ObjSize,
4669                                      CurArgOffset + (ArgSize - ObjSize),
4670                                      isImmutable);
4671       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4672       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4673     }
4674 
4675     InVals.push_back(ArgVal);
4676   }
4677 
4678   // Allow for Altivec parameters at the end, if needed.
4679   if (nAltivecParamsAtEnd) {
4680     MinReservedArea = ((MinReservedArea+15)/16)*16;
4681     MinReservedArea += 16*nAltivecParamsAtEnd;
4682   }
4683 
4684   // Area that is at least reserved in the caller of this function.
4685   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4686 
4687   // Set the size that is at least reserved in caller of this function.  Tail
4688   // call optimized functions' reserved stack space needs to be aligned so that
4689   // taking the difference between two stack areas will result in an aligned
4690   // stack.
4691   MinReservedArea =
4692       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4693   FuncInfo->setMinReservedArea(MinReservedArea);
4694 
4695   // If the function takes variable number of arguments, make a frame index for
4696   // the start of the first vararg value... for expansion of llvm.va_start.
4697   if (isVarArg) {
4698     int Depth = ArgOffset;
4699 
4700     FuncInfo->setVarArgsFrameIndex(
4701       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4702                             Depth, true));
4703     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4704 
4705     // If this function is vararg, store any remaining integer argument regs
4706     // to their spots on the stack so that they may be loaded by dereferencing
4707     // the result of va_next.
4708     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4709       unsigned VReg;
4710 
4711       if (isPPC64)
4712         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4713       else
4714         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4715 
4716       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4717       SDValue Store =
4718           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4719       MemOps.push_back(Store);
4720       // Increment the address by four for the next argument to store
4721       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4722       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4723     }
4724   }
4725 
4726   if (!MemOps.empty())
4727     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4728 
4729   return Chain;
4730 }
4731 
4732 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4733 /// adjusted to accommodate the arguments for the tailcall.
4734 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4735                                    unsigned ParamSize) {
4736 
4737   if (!isTailCall) return 0;
4738 
4739   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4740   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4741   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4742   // Remember only if the new adjustment is bigger.
4743   if (SPDiff < FI->getTailCallSPDelta())
4744     FI->setTailCallSPDelta(SPDiff);
4745 
4746   return SPDiff;
4747 }
4748 
4749 static bool isFunctionGlobalAddress(SDValue Callee);
4750 
4751 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4752                               const TargetMachine &TM) {
4753   // It does not make sense to call callsShareTOCBase() with a caller that
4754   // is PC Relative since PC Relative callers do not have a TOC.
4755 #ifndef NDEBUG
4756   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4757   assert(!STICaller->isUsingPCRelativeCalls() &&
4758          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4759 #endif
4760 
4761   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4762   // don't have enough information to determine if the caller and callee share
4763   // the same  TOC base, so we have to pessimistically assume they don't for
4764   // correctness.
4765   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4766   if (!G)
4767     return false;
4768 
4769   const GlobalValue *GV = G->getGlobal();
4770 
4771   // If the callee is preemptable, then the static linker will use a plt-stub
4772   // which saves the toc to the stack, and needs a nop after the call
4773   // instruction to convert to a toc-restore.
4774   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4775     return false;
4776 
4777   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4778   // We may need a TOC restore in the situation where the caller requires a
4779   // valid TOC but the callee is PC Relative and does not.
4780   const Function *F = dyn_cast<Function>(GV);
4781   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4782 
4783   // If we have an Alias we can try to get the function from there.
4784   if (Alias) {
4785     const GlobalObject *GlobalObj = Alias->getBaseObject();
4786     F = dyn_cast<Function>(GlobalObj);
4787   }
4788 
4789   // If we still have no valid function pointer we do not have enough
4790   // information to determine if the callee uses PC Relative calls so we must
4791   // assume that it does.
4792   if (!F)
4793     return false;
4794 
4795   // If the callee uses PC Relative we cannot guarantee that the callee won't
4796   // clobber the TOC of the caller and so we must assume that the two
4797   // functions do not share a TOC base.
4798   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4799   if (STICallee->isUsingPCRelativeCalls())
4800     return false;
4801 
4802   // If the GV is not a strong definition then we need to assume it can be
4803   // replaced by another function at link time. The function that replaces
4804   // it may not share the same TOC as the caller since the callee may be
4805   // replaced by a PC Relative version of the same function.
4806   if (!GV->isStrongDefinitionForLinker())
4807     return false;
4808 
4809   // The medium and large code models are expected to provide a sufficiently
4810   // large TOC to provide all data addressing needs of a module with a
4811   // single TOC.
4812   if (CodeModel::Medium == TM.getCodeModel() ||
4813       CodeModel::Large == TM.getCodeModel())
4814     return true;
4815 
4816   // Any explicitly-specified sections and section prefixes must also match.
4817   // Also, if we're using -ffunction-sections, then each function is always in
4818   // a different section (the same is true for COMDAT functions).
4819   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4820       GV->getSection() != Caller->getSection())
4821     return false;
4822   if (const auto *F = dyn_cast<Function>(GV)) {
4823     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4824       return false;
4825   }
4826 
4827   return true;
4828 }
4829 
4830 static bool
4831 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4832                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4833   assert(Subtarget.is64BitELFABI());
4834 
4835   const unsigned PtrByteSize = 8;
4836   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4837 
4838   static const MCPhysReg GPR[] = {
4839     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4840     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4841   };
4842   static const MCPhysReg VR[] = {
4843     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4844     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4845   };
4846 
4847   const unsigned NumGPRs = array_lengthof(GPR);
4848   const unsigned NumFPRs = 13;
4849   const unsigned NumVRs = array_lengthof(VR);
4850   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4851 
4852   unsigned NumBytes = LinkageSize;
4853   unsigned AvailableFPRs = NumFPRs;
4854   unsigned AvailableVRs = NumVRs;
4855 
4856   for (const ISD::OutputArg& Param : Outs) {
4857     if (Param.Flags.isNest()) continue;
4858 
4859     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4860                                LinkageSize, ParamAreaSize, NumBytes,
4861                                AvailableFPRs, AvailableVRs))
4862       return true;
4863   }
4864   return false;
4865 }
4866 
4867 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4868   if (CB.arg_size() != CallerFn->arg_size())
4869     return false;
4870 
4871   auto CalleeArgIter = CB.arg_begin();
4872   auto CalleeArgEnd = CB.arg_end();
4873   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4874 
4875   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4876     const Value* CalleeArg = *CalleeArgIter;
4877     const Value* CallerArg = &(*CallerArgIter);
4878     if (CalleeArg == CallerArg)
4879       continue;
4880 
4881     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4882     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4883     //      }
4884     // 1st argument of callee is undef and has the same type as caller.
4885     if (CalleeArg->getType() == CallerArg->getType() &&
4886         isa<UndefValue>(CalleeArg))
4887       continue;
4888 
4889     return false;
4890   }
4891 
4892   return true;
4893 }
4894 
4895 // Returns true if TCO is possible between the callers and callees
4896 // calling conventions.
4897 static bool
4898 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4899                                     CallingConv::ID CalleeCC) {
4900   // Tail calls are possible with fastcc and ccc.
4901   auto isTailCallableCC  = [] (CallingConv::ID CC){
4902       return  CC == CallingConv::C || CC == CallingConv::Fast;
4903   };
4904   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4905     return false;
4906 
4907   // We can safely tail call both fastcc and ccc callees from a c calling
4908   // convention caller. If the caller is fastcc, we may have less stack space
4909   // than a non-fastcc caller with the same signature so disable tail-calls in
4910   // that case.
4911   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4912 }
4913 
4914 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4915     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4916     const SmallVectorImpl<ISD::OutputArg> &Outs,
4917     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4918   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4919 
4920   if (DisableSCO && !TailCallOpt) return false;
4921 
4922   // Variadic argument functions are not supported.
4923   if (isVarArg) return false;
4924 
4925   auto &Caller = DAG.getMachineFunction().getFunction();
4926   // Check that the calling conventions are compatible for tco.
4927   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4928     return false;
4929 
4930   // Caller contains any byval parameter is not supported.
4931   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4932     return false;
4933 
4934   // Callee contains any byval parameter is not supported, too.
4935   // Note: This is a quick work around, because in some cases, e.g.
4936   // caller's stack size > callee's stack size, we are still able to apply
4937   // sibling call optimization. For example, gcc is able to do SCO for caller1
4938   // in the following example, but not for caller2.
4939   //   struct test {
4940   //     long int a;
4941   //     char ary[56];
4942   //   } gTest;
4943   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4944   //     b->a = v.a;
4945   //     return 0;
4946   //   }
4947   //   void caller1(struct test a, struct test c, struct test *b) {
4948   //     callee(gTest, b); }
4949   //   void caller2(struct test *b) { callee(gTest, b); }
4950   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4951     return false;
4952 
4953   // If callee and caller use different calling conventions, we cannot pass
4954   // parameters on stack since offsets for the parameter area may be different.
4955   if (Caller.getCallingConv() != CalleeCC &&
4956       needStackSlotPassParameters(Subtarget, Outs))
4957     return false;
4958 
4959   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4960   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4961   // callee potentially have different TOC bases then we cannot tail call since
4962   // we need to restore the TOC pointer after the call.
4963   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4964   // We cannot guarantee this for indirect calls or calls to external functions.
4965   // When PC-Relative addressing is used, the concept of the TOC is no longer
4966   // applicable so this check is not required.
4967   // Check first for indirect calls.
4968   if (!Subtarget.isUsingPCRelativeCalls() &&
4969       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4970     return false;
4971 
4972   // Check if we share the TOC base.
4973   if (!Subtarget.isUsingPCRelativeCalls() &&
4974       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4975     return false;
4976 
4977   // TCO allows altering callee ABI, so we don't have to check further.
4978   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4979     return true;
4980 
4981   if (DisableSCO) return false;
4982 
4983   // If callee use the same argument list that caller is using, then we can
4984   // apply SCO on this case. If it is not, then we need to check if callee needs
4985   // stack for passing arguments.
4986   // PC Relative tail calls may not have a CallBase.
4987   // If there is no CallBase we cannot verify if we have the same argument
4988   // list so assume that we don't have the same argument list.
4989   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4990       needStackSlotPassParameters(Subtarget, Outs))
4991     return false;
4992   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4993     return false;
4994 
4995   return true;
4996 }
4997 
4998 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4999 /// for tail call optimization. Targets which want to do tail call
5000 /// optimization should implement this function.
5001 bool
5002 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
5003                                                      CallingConv::ID CalleeCC,
5004                                                      bool isVarArg,
5005                                       const SmallVectorImpl<ISD::InputArg> &Ins,
5006                                                      SelectionDAG& DAG) const {
5007   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5008     return false;
5009 
5010   // Variable argument functions are not supported.
5011   if (isVarArg)
5012     return false;
5013 
5014   MachineFunction &MF = DAG.getMachineFunction();
5015   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
5016   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
5017     // Functions containing by val parameters are not supported.
5018     for (unsigned i = 0; i != Ins.size(); i++) {
5019        ISD::ArgFlagsTy Flags = Ins[i].Flags;
5020        if (Flags.isByVal()) return false;
5021     }
5022 
5023     // Non-PIC/GOT tail calls are supported.
5024     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
5025       return true;
5026 
5027     // At the moment we can only do local tail calls (in same module, hidden
5028     // or protected) if we are generating PIC.
5029     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
5030       return G->getGlobal()->hasHiddenVisibility()
5031           || G->getGlobal()->hasProtectedVisibility();
5032   }
5033 
5034   return false;
5035 }
5036 
5037 /// isCallCompatibleAddress - Return the immediate to use if the specified
5038 /// 32-bit value is representable in the immediate field of a BxA instruction.
5039 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
5040   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5041   if (!C) return nullptr;
5042 
5043   int Addr = C->getZExtValue();
5044   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
5045       SignExtend32<26>(Addr) != Addr)
5046     return nullptr;  // Top 6 bits have to be sext of immediate.
5047 
5048   return DAG
5049       .getConstant(
5050           (int)C->getZExtValue() >> 2, SDLoc(Op),
5051           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
5052       .getNode();
5053 }
5054 
5055 namespace {
5056 
5057 struct TailCallArgumentInfo {
5058   SDValue Arg;
5059   SDValue FrameIdxOp;
5060   int FrameIdx = 0;
5061 
5062   TailCallArgumentInfo() = default;
5063 };
5064 
5065 } // end anonymous namespace
5066 
5067 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
5068 static void StoreTailCallArgumentsToStackSlot(
5069     SelectionDAG &DAG, SDValue Chain,
5070     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5071     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5072   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5073     SDValue Arg = TailCallArgs[i].Arg;
5074     SDValue FIN = TailCallArgs[i].FrameIdxOp;
5075     int FI = TailCallArgs[i].FrameIdx;
5076     // Store relative to framepointer.
5077     MemOpChains.push_back(DAG.getStore(
5078         Chain, dl, Arg, FIN,
5079         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
5080   }
5081 }
5082 
5083 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5084 /// the appropriate stack slot for the tail call optimized function call.
5085 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
5086                                              SDValue OldRetAddr, SDValue OldFP,
5087                                              int SPDiff, const SDLoc &dl) {
5088   if (SPDiff) {
5089     // Calculate the new stack slot for the return address.
5090     MachineFunction &MF = DAG.getMachineFunction();
5091     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5092     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5093     bool isPPC64 = Subtarget.isPPC64();
5094     int SlotSize = isPPC64 ? 8 : 4;
5095     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5096     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5097                                                          NewRetAddrLoc, true);
5098     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5099     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5100     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5101                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5102   }
5103   return Chain;
5104 }
5105 
5106 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5107 /// the position of the argument.
5108 static void
5109 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
5110                          SDValue Arg, int SPDiff, unsigned ArgOffset,
5111                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5112   int Offset = ArgOffset + SPDiff;
5113   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5114   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5115   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5116   SDValue FIN = DAG.getFrameIndex(FI, VT);
5117   TailCallArgumentInfo Info;
5118   Info.Arg = Arg;
5119   Info.FrameIdxOp = FIN;
5120   Info.FrameIdx = FI;
5121   TailCallArguments.push_back(Info);
5122 }
5123 
5124 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5125 /// stack slot. Returns the chain as result and the loaded frame pointers in
5126 /// LROpOut/FPOpout. Used when tail calling.
5127 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5128     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5129     SDValue &FPOpOut, const SDLoc &dl) const {
5130   if (SPDiff) {
5131     // Load the LR and FP stack slot for later adjusting.
5132     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5133     LROpOut = getReturnAddrFrameIndex(DAG);
5134     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5135     Chain = SDValue(LROpOut.getNode(), 1);
5136   }
5137   return Chain;
5138 }
5139 
5140 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5141 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5142 /// specified by the specific parameter attribute. The copy will be passed as
5143 /// a byval function parameter.
5144 /// Sometimes what we are copying is the end of a larger object, the part that
5145 /// does not fit in registers.
5146 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5147                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5148                                          SelectionDAG &DAG, const SDLoc &dl) {
5149   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5150   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5151                        Flags.getNonZeroByValAlign(), false, false, false,
5152                        MachinePointerInfo(), MachinePointerInfo());
5153 }
5154 
5155 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5156 /// tail calls.
5157 static void LowerMemOpCallTo(
5158     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5159     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5160     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5161     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5162   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5163   if (!isTailCall) {
5164     if (isVector) {
5165       SDValue StackPtr;
5166       if (isPPC64)
5167         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5168       else
5169         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5170       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5171                            DAG.getConstant(ArgOffset, dl, PtrVT));
5172     }
5173     MemOpChains.push_back(
5174         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5175     // Calculate and remember argument location.
5176   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5177                                   TailCallArguments);
5178 }
5179 
5180 static void
5181 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5182                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5183                 SDValue FPOp,
5184                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5185   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5186   // might overwrite each other in case of tail call optimization.
5187   SmallVector<SDValue, 8> MemOpChains2;
5188   // Do not flag preceding copytoreg stuff together with the following stuff.
5189   InFlag = SDValue();
5190   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5191                                     MemOpChains2, dl);
5192   if (!MemOpChains2.empty())
5193     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5194 
5195   // Store the return address to the appropriate stack slot.
5196   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5197 
5198   // Emit callseq_end just before tailcall node.
5199   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5200                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5201   InFlag = Chain.getValue(1);
5202 }
5203 
5204 // Is this global address that of a function that can be called by name? (as
5205 // opposed to something that must hold a descriptor for an indirect call).
5206 static bool isFunctionGlobalAddress(SDValue Callee) {
5207   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5208     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5209         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5210       return false;
5211 
5212     return G->getGlobal()->getValueType()->isFunctionTy();
5213   }
5214 
5215   return false;
5216 }
5217 
5218 SDValue PPCTargetLowering::LowerCallResult(
5219     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5220     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5221     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5222   SmallVector<CCValAssign, 16> RVLocs;
5223   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5224                     *DAG.getContext());
5225 
5226   CCRetInfo.AnalyzeCallResult(
5227       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5228                ? RetCC_PPC_Cold
5229                : RetCC_PPC);
5230 
5231   // Copy all of the result registers out of their specified physreg.
5232   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5233     CCValAssign &VA = RVLocs[i];
5234     assert(VA.isRegLoc() && "Can only return in registers!");
5235 
5236     SDValue Val;
5237 
5238     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5239       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5240                                       InFlag);
5241       Chain = Lo.getValue(1);
5242       InFlag = Lo.getValue(2);
5243       VA = RVLocs[++i]; // skip ahead to next loc
5244       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5245                                       InFlag);
5246       Chain = Hi.getValue(1);
5247       InFlag = Hi.getValue(2);
5248       if (!Subtarget.isLittleEndian())
5249         std::swap (Lo, Hi);
5250       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5251     } else {
5252       Val = DAG.getCopyFromReg(Chain, dl,
5253                                VA.getLocReg(), VA.getLocVT(), InFlag);
5254       Chain = Val.getValue(1);
5255       InFlag = Val.getValue(2);
5256     }
5257 
5258     switch (VA.getLocInfo()) {
5259     default: llvm_unreachable("Unknown loc info!");
5260     case CCValAssign::Full: break;
5261     case CCValAssign::AExt:
5262       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5263       break;
5264     case CCValAssign::ZExt:
5265       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5266                         DAG.getValueType(VA.getValVT()));
5267       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5268       break;
5269     case CCValAssign::SExt:
5270       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5271                         DAG.getValueType(VA.getValVT()));
5272       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5273       break;
5274     }
5275 
5276     InVals.push_back(Val);
5277   }
5278 
5279   return Chain;
5280 }
5281 
5282 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5283                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5284   // PatchPoint calls are not indirect.
5285   if (isPatchPoint)
5286     return false;
5287 
5288   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5289     return false;
5290 
5291   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5292   // becuase the immediate function pointer points to a descriptor instead of
5293   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5294   // pointer immediate points to the global entry point, while the BLA would
5295   // need to jump to the local entry point (see rL211174).
5296   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5297       isBLACompatibleAddress(Callee, DAG))
5298     return false;
5299 
5300   return true;
5301 }
5302 
5303 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5304 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5305   return Subtarget.isAIXABI() ||
5306          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5307 }
5308 
5309 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5310                               const Function &Caller,
5311                               const SDValue &Callee,
5312                               const PPCSubtarget &Subtarget,
5313                               const TargetMachine &TM) {
5314   if (CFlags.IsTailCall)
5315     return PPCISD::TC_RETURN;
5316 
5317   // This is a call through a function pointer.
5318   if (CFlags.IsIndirect) {
5319     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5320     // indirect calls. The save of the caller's TOC pointer to the stack will be
5321     // inserted into the DAG as part of call lowering. The restore of the TOC
5322     // pointer is modeled by using a pseudo instruction for the call opcode that
5323     // represents the 2 instruction sequence of an indirect branch and link,
5324     // immediately followed by a load of the TOC pointer from the the stack save
5325     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5326     // as it is not saved or used.
5327     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5328                                                : PPCISD::BCTRL;
5329   }
5330 
5331   if (Subtarget.isUsingPCRelativeCalls()) {
5332     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5333     return PPCISD::CALL_NOTOC;
5334   }
5335 
5336   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5337   // immediately following the call instruction if the caller and callee may
5338   // have different TOC bases. At link time if the linker determines the calls
5339   // may not share a TOC base, the call is redirected to a trampoline inserted
5340   // by the linker. The trampoline will (among other things) save the callers
5341   // TOC pointer at an ABI designated offset in the linkage area and the linker
5342   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5343   // into gpr2.
5344   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5345     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5346                                                   : PPCISD::CALL_NOP;
5347 
5348   return PPCISD::CALL;
5349 }
5350 
5351 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5352                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5353   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5354     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5355       return SDValue(Dest, 0);
5356 
5357   // Returns true if the callee is local, and false otherwise.
5358   auto isLocalCallee = [&]() {
5359     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5360     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5361     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5362 
5363     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5364            !dyn_cast_or_null<GlobalIFunc>(GV);
5365   };
5366 
5367   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5368   // a static relocation model causes some versions of GNU LD (2.17.50, at
5369   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5370   // built with secure-PLT.
5371   bool UsePlt =
5372       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5373       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5374 
5375   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5376     const TargetMachine &TM = Subtarget.getTargetMachine();
5377     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5378     MCSymbolXCOFF *S =
5379         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5380 
5381     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5382     return DAG.getMCSymbol(S, PtrVT);
5383   };
5384 
5385   if (isFunctionGlobalAddress(Callee)) {
5386     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5387 
5388     if (Subtarget.isAIXABI()) {
5389       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5390       return getAIXFuncEntryPointSymbolSDNode(GV);
5391     }
5392     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5393                                       UsePlt ? PPCII::MO_PLT : 0);
5394   }
5395 
5396   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5397     const char *SymName = S->getSymbol();
5398     if (Subtarget.isAIXABI()) {
5399       // If there exists a user-declared function whose name is the same as the
5400       // ExternalSymbol's, then we pick up the user-declared version.
5401       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5402       if (const Function *F =
5403               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5404         return getAIXFuncEntryPointSymbolSDNode(F);
5405 
5406       // On AIX, direct function calls reference the symbol for the function's
5407       // entry point, which is named by prepending a "." before the function's
5408       // C-linkage name. A Qualname is returned here because an external
5409       // function entry point is a csect with XTY_ER property.
5410       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5411         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5412         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5413             (Twine(".") + Twine(SymName)).str(), XCOFF::XMC_PR, XCOFF::XTY_ER,
5414             SectionKind::getMetadata());
5415         return Sec->getQualNameSymbol();
5416       };
5417 
5418       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5419     }
5420     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5421                                        UsePlt ? PPCII::MO_PLT : 0);
5422   }
5423 
5424   // No transformation needed.
5425   assert(Callee.getNode() && "What no callee?");
5426   return Callee;
5427 }
5428 
5429 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5430   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5431          "Expected a CALLSEQ_STARTSDNode.");
5432 
5433   // The last operand is the chain, except when the node has glue. If the node
5434   // has glue, then the last operand is the glue, and the chain is the second
5435   // last operand.
5436   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5437   if (LastValue.getValueType() != MVT::Glue)
5438     return LastValue;
5439 
5440   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5441 }
5442 
5443 // Creates the node that moves a functions address into the count register
5444 // to prepare for an indirect call instruction.
5445 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5446                                 SDValue &Glue, SDValue &Chain,
5447                                 const SDLoc &dl) {
5448   SDValue MTCTROps[] = {Chain, Callee, Glue};
5449   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5450   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5451                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5452   // The glue is the second value produced.
5453   Glue = Chain.getValue(1);
5454 }
5455 
5456 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5457                                           SDValue &Glue, SDValue &Chain,
5458                                           SDValue CallSeqStart,
5459                                           const CallBase *CB, const SDLoc &dl,
5460                                           bool hasNest,
5461                                           const PPCSubtarget &Subtarget) {
5462   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5463   // entry point, but to the function descriptor (the function entry point
5464   // address is part of the function descriptor though).
5465   // The function descriptor is a three doubleword structure with the
5466   // following fields: function entry point, TOC base address and
5467   // environment pointer.
5468   // Thus for a call through a function pointer, the following actions need
5469   // to be performed:
5470   //   1. Save the TOC of the caller in the TOC save area of its stack
5471   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5472   //   2. Load the address of the function entry point from the function
5473   //      descriptor.
5474   //   3. Load the TOC of the callee from the function descriptor into r2.
5475   //   4. Load the environment pointer from the function descriptor into
5476   //      r11.
5477   //   5. Branch to the function entry point address.
5478   //   6. On return of the callee, the TOC of the caller needs to be
5479   //      restored (this is done in FinishCall()).
5480   //
5481   // The loads are scheduled at the beginning of the call sequence, and the
5482   // register copies are flagged together to ensure that no other
5483   // operations can be scheduled in between. E.g. without flagging the
5484   // copies together, a TOC access in the caller could be scheduled between
5485   // the assignment of the callee TOC and the branch to the callee, which leads
5486   // to incorrect code.
5487 
5488   // Start by loading the function address from the descriptor.
5489   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5490   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5491                       ? (MachineMemOperand::MODereferenceable |
5492                          MachineMemOperand::MOInvariant)
5493                       : MachineMemOperand::MONone;
5494 
5495   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5496 
5497   // Registers used in building the DAG.
5498   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5499   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5500 
5501   // Offsets of descriptor members.
5502   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5503   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5504 
5505   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5506   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5507 
5508   // One load for the functions entry point address.
5509   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5510                                     Alignment, MMOFlags);
5511 
5512   // One for loading the TOC anchor for the module that contains the called
5513   // function.
5514   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5515   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5516   SDValue TOCPtr =
5517       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5518                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5519 
5520   // One for loading the environment pointer.
5521   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5522   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5523   SDValue LoadEnvPtr =
5524       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5525                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5526 
5527 
5528   // Then copy the newly loaded TOC anchor to the TOC pointer.
5529   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5530   Chain = TOCVal.getValue(0);
5531   Glue = TOCVal.getValue(1);
5532 
5533   // If the function call has an explicit 'nest' parameter, it takes the
5534   // place of the environment pointer.
5535   assert((!hasNest || !Subtarget.isAIXABI()) &&
5536          "Nest parameter is not supported on AIX.");
5537   if (!hasNest) {
5538     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5539     Chain = EnvVal.getValue(0);
5540     Glue = EnvVal.getValue(1);
5541   }
5542 
5543   // The rest of the indirect call sequence is the same as the non-descriptor
5544   // DAG.
5545   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5546 }
5547 
5548 static void
5549 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5550                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5551                   SelectionDAG &DAG,
5552                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5553                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5554                   const PPCSubtarget &Subtarget) {
5555   const bool IsPPC64 = Subtarget.isPPC64();
5556   // MVT for a general purpose register.
5557   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5558 
5559   // First operand is always the chain.
5560   Ops.push_back(Chain);
5561 
5562   // If it's a direct call pass the callee as the second operand.
5563   if (!CFlags.IsIndirect)
5564     Ops.push_back(Callee);
5565   else {
5566     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5567 
5568     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5569     // on the stack (this would have been done in `LowerCall_64SVR4` or
5570     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5571     // represents both the indirect branch and a load that restores the TOC
5572     // pointer from the linkage area. The operand for the TOC restore is an add
5573     // of the TOC save offset to the stack pointer. This must be the second
5574     // operand: after the chain input but before any other variadic arguments.
5575     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5576     // saved or used.
5577     if (isTOCSaveRestoreRequired(Subtarget)) {
5578       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5579 
5580       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5581       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5582       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5583       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5584       Ops.push_back(AddTOC);
5585     }
5586 
5587     // Add the register used for the environment pointer.
5588     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5589       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5590                                     RegVT));
5591 
5592 
5593     // Add CTR register as callee so a bctr can be emitted later.
5594     if (CFlags.IsTailCall)
5595       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5596   }
5597 
5598   // If this is a tail call add stack pointer delta.
5599   if (CFlags.IsTailCall)
5600     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5601 
5602   // Add argument registers to the end of the list so that they are known live
5603   // into the call.
5604   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5605     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5606                                   RegsToPass[i].second.getValueType()));
5607 
5608   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5609   // no way to mark dependencies as implicit here.
5610   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5611   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5612        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5613     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5614 
5615   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5616   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5617     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5618 
5619   // Add a register mask operand representing the call-preserved registers.
5620   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5621   const uint32_t *Mask =
5622       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5623   assert(Mask && "Missing call preserved mask for calling convention");
5624   Ops.push_back(DAG.getRegisterMask(Mask));
5625 
5626   // If the glue is valid, it is the last operand.
5627   if (Glue.getNode())
5628     Ops.push_back(Glue);
5629 }
5630 
5631 SDValue PPCTargetLowering::FinishCall(
5632     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5633     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5634     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5635     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5636     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5637 
5638   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5639       Subtarget.isAIXABI())
5640     setUsesTOCBasePtr(DAG);
5641 
5642   unsigned CallOpc =
5643       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5644                     Subtarget, DAG.getTarget());
5645 
5646   if (!CFlags.IsIndirect)
5647     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5648   else if (Subtarget.usesFunctionDescriptors())
5649     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5650                                   dl, CFlags.HasNest, Subtarget);
5651   else
5652     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5653 
5654   // Build the operand list for the call instruction.
5655   SmallVector<SDValue, 8> Ops;
5656   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5657                     SPDiff, Subtarget);
5658 
5659   // Emit tail call.
5660   if (CFlags.IsTailCall) {
5661     // Indirect tail call when using PC Relative calls do not have the same
5662     // constraints.
5663     assert(((Callee.getOpcode() == ISD::Register &&
5664              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5665             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5666             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5667             isa<ConstantSDNode>(Callee) ||
5668             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5669            "Expecting a global address, external symbol, absolute value, "
5670            "register or an indirect tail call when PC Relative calls are "
5671            "used.");
5672     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5673     assert(CallOpc == PPCISD::TC_RETURN &&
5674            "Unexpected call opcode for a tail call.");
5675     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5676     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5677   }
5678 
5679   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5680   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5681   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5682   Glue = Chain.getValue(1);
5683 
5684   // When performing tail call optimization the callee pops its arguments off
5685   // the stack. Account for this here so these bytes can be pushed back on in
5686   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5687   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5688                          getTargetMachine().Options.GuaranteedTailCallOpt)
5689                             ? NumBytes
5690                             : 0;
5691 
5692   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5693                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5694                              Glue, dl);
5695   Glue = Chain.getValue(1);
5696 
5697   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5698                          DAG, InVals);
5699 }
5700 
5701 SDValue
5702 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5703                              SmallVectorImpl<SDValue> &InVals) const {
5704   SelectionDAG &DAG                     = CLI.DAG;
5705   SDLoc &dl                             = CLI.DL;
5706   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5707   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5708   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5709   SDValue Chain                         = CLI.Chain;
5710   SDValue Callee                        = CLI.Callee;
5711   bool &isTailCall                      = CLI.IsTailCall;
5712   CallingConv::ID CallConv              = CLI.CallConv;
5713   bool isVarArg                         = CLI.IsVarArg;
5714   bool isPatchPoint                     = CLI.IsPatchPoint;
5715   const CallBase *CB                    = CLI.CB;
5716 
5717   if (isTailCall) {
5718     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5719       isTailCall = false;
5720     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5721       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5722           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5723     else
5724       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5725                                                      Ins, DAG);
5726     if (isTailCall) {
5727       ++NumTailCalls;
5728       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5729         ++NumSiblingCalls;
5730 
5731       // PC Relative calls no longer guarantee that the callee is a Global
5732       // Address Node. The callee could be an indirect tail call in which
5733       // case the SDValue for the callee could be a load (to load the address
5734       // of a function pointer) or it may be a register copy (to move the
5735       // address of the callee from a function parameter into a virtual
5736       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5737       assert((Subtarget.isUsingPCRelativeCalls() ||
5738               isa<GlobalAddressSDNode>(Callee)) &&
5739              "Callee should be an llvm::Function object.");
5740 
5741       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5742                         << "\nTCO callee: ");
5743       LLVM_DEBUG(Callee.dump());
5744     }
5745   }
5746 
5747   if (!isTailCall && CB && CB->isMustTailCall())
5748     report_fatal_error("failed to perform tail call elimination on a call "
5749                        "site marked musttail");
5750 
5751   // When long calls (i.e. indirect calls) are always used, calls are always
5752   // made via function pointer. If we have a function name, first translate it
5753   // into a pointer.
5754   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5755       !isTailCall)
5756     Callee = LowerGlobalAddress(Callee, DAG);
5757 
5758   CallFlags CFlags(
5759       CallConv, isTailCall, isVarArg, isPatchPoint,
5760       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5761       // hasNest
5762       Subtarget.is64BitELFABI() &&
5763           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5764       CLI.NoMerge);
5765 
5766   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5767     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5768                             InVals, CB);
5769 
5770   if (Subtarget.isSVR4ABI())
5771     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5772                             InVals, CB);
5773 
5774   if (Subtarget.isAIXABI())
5775     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5776                          InVals, CB);
5777 
5778   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5779                           InVals, CB);
5780 }
5781 
5782 SDValue PPCTargetLowering::LowerCall_32SVR4(
5783     SDValue Chain, SDValue Callee, CallFlags CFlags,
5784     const SmallVectorImpl<ISD::OutputArg> &Outs,
5785     const SmallVectorImpl<SDValue> &OutVals,
5786     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5787     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5788     const CallBase *CB) const {
5789   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5790   // of the 32-bit SVR4 ABI stack frame layout.
5791 
5792   const CallingConv::ID CallConv = CFlags.CallConv;
5793   const bool IsVarArg = CFlags.IsVarArg;
5794   const bool IsTailCall = CFlags.IsTailCall;
5795 
5796   assert((CallConv == CallingConv::C ||
5797           CallConv == CallingConv::Cold ||
5798           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5799 
5800   const Align PtrAlign(4);
5801 
5802   MachineFunction &MF = DAG.getMachineFunction();
5803 
5804   // Mark this function as potentially containing a function that contains a
5805   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5806   // and restoring the callers stack pointer in this functions epilog. This is
5807   // done because by tail calling the called function might overwrite the value
5808   // in this function's (MF) stack pointer stack slot 0(SP).
5809   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5810       CallConv == CallingConv::Fast)
5811     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5812 
5813   // Count how many bytes are to be pushed on the stack, including the linkage
5814   // area, parameter list area and the part of the local variable space which
5815   // contains copies of aggregates which are passed by value.
5816 
5817   // Assign locations to all of the outgoing arguments.
5818   SmallVector<CCValAssign, 16> ArgLocs;
5819   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5820 
5821   // Reserve space for the linkage area on the stack.
5822   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5823                        PtrAlign);
5824   if (useSoftFloat())
5825     CCInfo.PreAnalyzeCallOperands(Outs);
5826 
5827   if (IsVarArg) {
5828     // Handle fixed and variable vector arguments differently.
5829     // Fixed vector arguments go into registers as long as registers are
5830     // available. Variable vector arguments always go into memory.
5831     unsigned NumArgs = Outs.size();
5832 
5833     for (unsigned i = 0; i != NumArgs; ++i) {
5834       MVT ArgVT = Outs[i].VT;
5835       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5836       bool Result;
5837 
5838       if (Outs[i].IsFixed) {
5839         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5840                                CCInfo);
5841       } else {
5842         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5843                                       ArgFlags, CCInfo);
5844       }
5845 
5846       if (Result) {
5847 #ifndef NDEBUG
5848         errs() << "Call operand #" << i << " has unhandled type "
5849              << EVT(ArgVT).getEVTString() << "\n";
5850 #endif
5851         llvm_unreachable(nullptr);
5852       }
5853     }
5854   } else {
5855     // All arguments are treated the same.
5856     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5857   }
5858   CCInfo.clearWasPPCF128();
5859 
5860   // Assign locations to all of the outgoing aggregate by value arguments.
5861   SmallVector<CCValAssign, 16> ByValArgLocs;
5862   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5863 
5864   // Reserve stack space for the allocations in CCInfo.
5865   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5866 
5867   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5868 
5869   // Size of the linkage area, parameter list area and the part of the local
5870   // space variable where copies of aggregates which are passed by value are
5871   // stored.
5872   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5873 
5874   // Calculate by how many bytes the stack has to be adjusted in case of tail
5875   // call optimization.
5876   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5877 
5878   // Adjust the stack pointer for the new arguments...
5879   // These operations are automatically eliminated by the prolog/epilog pass
5880   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5881   SDValue CallSeqStart = Chain;
5882 
5883   // Load the return address and frame pointer so it can be moved somewhere else
5884   // later.
5885   SDValue LROp, FPOp;
5886   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5887 
5888   // Set up a copy of the stack pointer for use loading and storing any
5889   // arguments that may not fit in the registers available for argument
5890   // passing.
5891   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5892 
5893   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5894   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5895   SmallVector<SDValue, 8> MemOpChains;
5896 
5897   bool seenFloatArg = false;
5898   // Walk the register/memloc assignments, inserting copies/loads.
5899   // i - Tracks the index into the list of registers allocated for the call
5900   // RealArgIdx - Tracks the index into the list of actual function arguments
5901   // j - Tracks the index into the list of byval arguments
5902   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5903        i != e;
5904        ++i, ++RealArgIdx) {
5905     CCValAssign &VA = ArgLocs[i];
5906     SDValue Arg = OutVals[RealArgIdx];
5907     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5908 
5909     if (Flags.isByVal()) {
5910       // Argument is an aggregate which is passed by value, thus we need to
5911       // create a copy of it in the local variable space of the current stack
5912       // frame (which is the stack frame of the caller) and pass the address of
5913       // this copy to the callee.
5914       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5915       CCValAssign &ByValVA = ByValArgLocs[j++];
5916       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5917 
5918       // Memory reserved in the local variable space of the callers stack frame.
5919       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5920 
5921       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5922       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5923                            StackPtr, PtrOff);
5924 
5925       // Create a copy of the argument in the local area of the current
5926       // stack frame.
5927       SDValue MemcpyCall =
5928         CreateCopyOfByValArgument(Arg, PtrOff,
5929                                   CallSeqStart.getNode()->getOperand(0),
5930                                   Flags, DAG, dl);
5931 
5932       // This must go outside the CALLSEQ_START..END.
5933       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5934                                                      SDLoc(MemcpyCall));
5935       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5936                              NewCallSeqStart.getNode());
5937       Chain = CallSeqStart = NewCallSeqStart;
5938 
5939       // Pass the address of the aggregate copy on the stack either in a
5940       // physical register or in the parameter list area of the current stack
5941       // frame to the callee.
5942       Arg = PtrOff;
5943     }
5944 
5945     // When useCRBits() is true, there can be i1 arguments.
5946     // It is because getRegisterType(MVT::i1) => MVT::i1,
5947     // and for other integer types getRegisterType() => MVT::i32.
5948     // Extend i1 and ensure callee will get i32.
5949     if (Arg.getValueType() == MVT::i1)
5950       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5951                         dl, MVT::i32, Arg);
5952 
5953     if (VA.isRegLoc()) {
5954       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5955       // Put argument in a physical register.
5956       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5957         bool IsLE = Subtarget.isLittleEndian();
5958         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5959                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5960         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5961         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5962                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5963         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5964                              SVal.getValue(0)));
5965       } else
5966         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5967     } else {
5968       // Put argument in the parameter list area of the current stack frame.
5969       assert(VA.isMemLoc());
5970       unsigned LocMemOffset = VA.getLocMemOffset();
5971 
5972       if (!IsTailCall) {
5973         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5974         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5975                              StackPtr, PtrOff);
5976 
5977         MemOpChains.push_back(
5978             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5979       } else {
5980         // Calculate and remember argument location.
5981         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5982                                  TailCallArguments);
5983       }
5984     }
5985   }
5986 
5987   if (!MemOpChains.empty())
5988     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5989 
5990   // Build a sequence of copy-to-reg nodes chained together with token chain
5991   // and flag operands which copy the outgoing args into the appropriate regs.
5992   SDValue InFlag;
5993   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5994     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5995                              RegsToPass[i].second, InFlag);
5996     InFlag = Chain.getValue(1);
5997   }
5998 
5999   // Set CR bit 6 to true if this is a vararg call with floating args passed in
6000   // registers.
6001   if (IsVarArg) {
6002     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
6003     SDValue Ops[] = { Chain, InFlag };
6004 
6005     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
6006                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
6007 
6008     InFlag = Chain.getValue(1);
6009   }
6010 
6011   if (IsTailCall)
6012     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6013                     TailCallArguments);
6014 
6015   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6016                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6017 }
6018 
6019 // Copy an argument into memory, being careful to do this outside the
6020 // call sequence for the call to which the argument belongs.
6021 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
6022     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
6023     SelectionDAG &DAG, const SDLoc &dl) const {
6024   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
6025                         CallSeqStart.getNode()->getOperand(0),
6026                         Flags, DAG, dl);
6027   // The MEMCPY must go outside the CALLSEQ_START..END.
6028   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6029   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6030                                                  SDLoc(MemcpyCall));
6031   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6032                          NewCallSeqStart.getNode());
6033   return NewCallSeqStart;
6034 }
6035 
6036 SDValue PPCTargetLowering::LowerCall_64SVR4(
6037     SDValue Chain, SDValue Callee, CallFlags CFlags,
6038     const SmallVectorImpl<ISD::OutputArg> &Outs,
6039     const SmallVectorImpl<SDValue> &OutVals,
6040     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6041     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6042     const CallBase *CB) const {
6043   bool isELFv2ABI = Subtarget.isELFv2ABI();
6044   bool isLittleEndian = Subtarget.isLittleEndian();
6045   unsigned NumOps = Outs.size();
6046   bool IsSibCall = false;
6047   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6048 
6049   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6050   unsigned PtrByteSize = 8;
6051 
6052   MachineFunction &MF = DAG.getMachineFunction();
6053 
6054   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6055     IsSibCall = true;
6056 
6057   // Mark this function as potentially containing a function that contains a
6058   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6059   // and restoring the callers stack pointer in this functions epilog. This is
6060   // done because by tail calling the called function might overwrite the value
6061   // in this function's (MF) stack pointer stack slot 0(SP).
6062   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6063     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6064 
6065   assert(!(IsFastCall && CFlags.IsVarArg) &&
6066          "fastcc not supported on varargs functions");
6067 
6068   // Count how many bytes are to be pushed on the stack, including the linkage
6069   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
6070   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6071   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6072   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6073   unsigned NumBytes = LinkageSize;
6074   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6075 
6076   static const MCPhysReg GPR[] = {
6077     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6078     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6079   };
6080   static const MCPhysReg VR[] = {
6081     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6082     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6083   };
6084 
6085   const unsigned NumGPRs = array_lengthof(GPR);
6086   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6087   const unsigned NumVRs  = array_lengthof(VR);
6088 
6089   // On ELFv2, we can avoid allocating the parameter area if all the arguments
6090   // can be passed to the callee in registers.
6091   // For the fast calling convention, there is another check below.
6092   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6093   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6094   if (!HasParameterArea) {
6095     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6096     unsigned AvailableFPRs = NumFPRs;
6097     unsigned AvailableVRs = NumVRs;
6098     unsigned NumBytesTmp = NumBytes;
6099     for (unsigned i = 0; i != NumOps; ++i) {
6100       if (Outs[i].Flags.isNest()) continue;
6101       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6102                                  PtrByteSize, LinkageSize, ParamAreaSize,
6103                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
6104         HasParameterArea = true;
6105     }
6106   }
6107 
6108   // When using the fast calling convention, we don't provide backing for
6109   // arguments that will be in registers.
6110   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6111 
6112   // Avoid allocating parameter area for fastcc functions if all the arguments
6113   // can be passed in the registers.
6114   if (IsFastCall)
6115     HasParameterArea = false;
6116 
6117   // Add up all the space actually used.
6118   for (unsigned i = 0; i != NumOps; ++i) {
6119     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6120     EVT ArgVT = Outs[i].VT;
6121     EVT OrigVT = Outs[i].ArgVT;
6122 
6123     if (Flags.isNest())
6124       continue;
6125 
6126     if (IsFastCall) {
6127       if (Flags.isByVal()) {
6128         NumGPRsUsed += (Flags.getByValSize()+7)/8;
6129         if (NumGPRsUsed > NumGPRs)
6130           HasParameterArea = true;
6131       } else {
6132         switch (ArgVT.getSimpleVT().SimpleTy) {
6133         default: llvm_unreachable("Unexpected ValueType for argument!");
6134         case MVT::i1:
6135         case MVT::i32:
6136         case MVT::i64:
6137           if (++NumGPRsUsed <= NumGPRs)
6138             continue;
6139           break;
6140         case MVT::v4i32:
6141         case MVT::v8i16:
6142         case MVT::v16i8:
6143         case MVT::v2f64:
6144         case MVT::v2i64:
6145         case MVT::v1i128:
6146         case MVT::f128:
6147           if (++NumVRsUsed <= NumVRs)
6148             continue;
6149           break;
6150         case MVT::v4f32:
6151           if (++NumVRsUsed <= NumVRs)
6152             continue;
6153           break;
6154         case MVT::f32:
6155         case MVT::f64:
6156           if (++NumFPRsUsed <= NumFPRs)
6157             continue;
6158           break;
6159         }
6160         HasParameterArea = true;
6161       }
6162     }
6163 
6164     /* Respect alignment of argument on the stack.  */
6165     auto Alignement =
6166         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6167     NumBytes = alignTo(NumBytes, Alignement);
6168 
6169     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6170     if (Flags.isInConsecutiveRegsLast())
6171       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6172   }
6173 
6174   unsigned NumBytesActuallyUsed = NumBytes;
6175 
6176   // In the old ELFv1 ABI,
6177   // the prolog code of the callee may store up to 8 GPR argument registers to
6178   // the stack, allowing va_start to index over them in memory if its varargs.
6179   // Because we cannot tell if this is needed on the caller side, we have to
6180   // conservatively assume that it is needed.  As such, make sure we have at
6181   // least enough stack space for the caller to store the 8 GPRs.
6182   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6183   // really requires memory operands, e.g. a vararg function.
6184   if (HasParameterArea)
6185     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6186   else
6187     NumBytes = LinkageSize;
6188 
6189   // Tail call needs the stack to be aligned.
6190   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6191     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6192 
6193   int SPDiff = 0;
6194 
6195   // Calculate by how many bytes the stack has to be adjusted in case of tail
6196   // call optimization.
6197   if (!IsSibCall)
6198     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6199 
6200   // To protect arguments on the stack from being clobbered in a tail call,
6201   // force all the loads to happen before doing any other lowering.
6202   if (CFlags.IsTailCall)
6203     Chain = DAG.getStackArgumentTokenFactor(Chain);
6204 
6205   // Adjust the stack pointer for the new arguments...
6206   // These operations are automatically eliminated by the prolog/epilog pass
6207   if (!IsSibCall)
6208     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6209   SDValue CallSeqStart = Chain;
6210 
6211   // Load the return address and frame pointer so it can be move somewhere else
6212   // later.
6213   SDValue LROp, FPOp;
6214   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6215 
6216   // Set up a copy of the stack pointer for use loading and storing any
6217   // arguments that may not fit in the registers available for argument
6218   // passing.
6219   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6220 
6221   // Figure out which arguments are going to go in registers, and which in
6222   // memory.  Also, if this is a vararg function, floating point operations
6223   // must be stored to our stack, and loaded into integer regs as well, if
6224   // any integer regs are available for argument passing.
6225   unsigned ArgOffset = LinkageSize;
6226 
6227   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6228   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6229 
6230   SmallVector<SDValue, 8> MemOpChains;
6231   for (unsigned i = 0; i != NumOps; ++i) {
6232     SDValue Arg = OutVals[i];
6233     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6234     EVT ArgVT = Outs[i].VT;
6235     EVT OrigVT = Outs[i].ArgVT;
6236 
6237     // PtrOff will be used to store the current argument to the stack if a
6238     // register cannot be found for it.
6239     SDValue PtrOff;
6240 
6241     // We re-align the argument offset for each argument, except when using the
6242     // fast calling convention, when we need to make sure we do that only when
6243     // we'll actually use a stack slot.
6244     auto ComputePtrOff = [&]() {
6245       /* Respect alignment of argument on the stack.  */
6246       auto Alignment =
6247           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6248       ArgOffset = alignTo(ArgOffset, Alignment);
6249 
6250       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6251 
6252       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6253     };
6254 
6255     if (!IsFastCall) {
6256       ComputePtrOff();
6257 
6258       /* Compute GPR index associated with argument offset.  */
6259       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6260       GPR_idx = std::min(GPR_idx, NumGPRs);
6261     }
6262 
6263     // Promote integers to 64-bit values.
6264     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6265       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6266       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6267       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6268     }
6269 
6270     // FIXME memcpy is used way more than necessary.  Correctness first.
6271     // Note: "by value" is code for passing a structure by value, not
6272     // basic types.
6273     if (Flags.isByVal()) {
6274       // Note: Size includes alignment padding, so
6275       //   struct x { short a; char b; }
6276       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6277       // These are the proper values we need for right-justifying the
6278       // aggregate in a parameter register.
6279       unsigned Size = Flags.getByValSize();
6280 
6281       // An empty aggregate parameter takes up no storage and no
6282       // registers.
6283       if (Size == 0)
6284         continue;
6285 
6286       if (IsFastCall)
6287         ComputePtrOff();
6288 
6289       // All aggregates smaller than 8 bytes must be passed right-justified.
6290       if (Size==1 || Size==2 || Size==4) {
6291         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6292         if (GPR_idx != NumGPRs) {
6293           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6294                                         MachinePointerInfo(), VT);
6295           MemOpChains.push_back(Load.getValue(1));
6296           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6297 
6298           ArgOffset += PtrByteSize;
6299           continue;
6300         }
6301       }
6302 
6303       if (GPR_idx == NumGPRs && Size < 8) {
6304         SDValue AddPtr = PtrOff;
6305         if (!isLittleEndian) {
6306           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6307                                           PtrOff.getValueType());
6308           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6309         }
6310         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6311                                                           CallSeqStart,
6312                                                           Flags, DAG, dl);
6313         ArgOffset += PtrByteSize;
6314         continue;
6315       }
6316       // Copy entire object into memory.  There are cases where gcc-generated
6317       // code assumes it is there, even if it could be put entirely into
6318       // registers.  (This is not what the doc says.)
6319 
6320       // FIXME: The above statement is likely due to a misunderstanding of the
6321       // documents.  All arguments must be copied into the parameter area BY
6322       // THE CALLEE in the event that the callee takes the address of any
6323       // formal argument.  That has not yet been implemented.  However, it is
6324       // reasonable to use the stack area as a staging area for the register
6325       // load.
6326 
6327       // Skip this for small aggregates, as we will use the same slot for a
6328       // right-justified copy, below.
6329       if (Size >= 8)
6330         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6331                                                           CallSeqStart,
6332                                                           Flags, DAG, dl);
6333 
6334       // When a register is available, pass a small aggregate right-justified.
6335       if (Size < 8 && GPR_idx != NumGPRs) {
6336         // The easiest way to get this right-justified in a register
6337         // is to copy the structure into the rightmost portion of a
6338         // local variable slot, then load the whole slot into the
6339         // register.
6340         // FIXME: The memcpy seems to produce pretty awful code for
6341         // small aggregates, particularly for packed ones.
6342         // FIXME: It would be preferable to use the slot in the
6343         // parameter save area instead of a new local variable.
6344         SDValue AddPtr = PtrOff;
6345         if (!isLittleEndian) {
6346           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6347           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6348         }
6349         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6350                                                           CallSeqStart,
6351                                                           Flags, DAG, dl);
6352 
6353         // Load the slot into the register.
6354         SDValue Load =
6355             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6356         MemOpChains.push_back(Load.getValue(1));
6357         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6358 
6359         // Done with this argument.
6360         ArgOffset += PtrByteSize;
6361         continue;
6362       }
6363 
6364       // For aggregates larger than PtrByteSize, copy the pieces of the
6365       // object that fit into registers from the parameter save area.
6366       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6367         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6368         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6369         if (GPR_idx != NumGPRs) {
6370           SDValue Load =
6371               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6372           MemOpChains.push_back(Load.getValue(1));
6373           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6374           ArgOffset += PtrByteSize;
6375         } else {
6376           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6377           break;
6378         }
6379       }
6380       continue;
6381     }
6382 
6383     switch (Arg.getSimpleValueType().SimpleTy) {
6384     default: llvm_unreachable("Unexpected ValueType for argument!");
6385     case MVT::i1:
6386     case MVT::i32:
6387     case MVT::i64:
6388       if (Flags.isNest()) {
6389         // The 'nest' parameter, if any, is passed in R11.
6390         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6391         break;
6392       }
6393 
6394       // These can be scalar arguments or elements of an integer array type
6395       // passed directly.  Clang may use those instead of "byval" aggregate
6396       // types to avoid forcing arguments to memory unnecessarily.
6397       if (GPR_idx != NumGPRs) {
6398         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6399       } else {
6400         if (IsFastCall)
6401           ComputePtrOff();
6402 
6403         assert(HasParameterArea &&
6404                "Parameter area must exist to pass an argument in memory.");
6405         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6406                          true, CFlags.IsTailCall, false, MemOpChains,
6407                          TailCallArguments, dl);
6408         if (IsFastCall)
6409           ArgOffset += PtrByteSize;
6410       }
6411       if (!IsFastCall)
6412         ArgOffset += PtrByteSize;
6413       break;
6414     case MVT::f32:
6415     case MVT::f64: {
6416       // These can be scalar arguments or elements of a float array type
6417       // passed directly.  The latter are used to implement ELFv2 homogenous
6418       // float aggregates.
6419 
6420       // Named arguments go into FPRs first, and once they overflow, the
6421       // remaining arguments go into GPRs and then the parameter save area.
6422       // Unnamed arguments for vararg functions always go to GPRs and
6423       // then the parameter save area.  For now, put all arguments to vararg
6424       // routines always in both locations (FPR *and* GPR or stack slot).
6425       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6426       bool NeededLoad = false;
6427 
6428       // First load the argument into the next available FPR.
6429       if (FPR_idx != NumFPRs)
6430         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6431 
6432       // Next, load the argument into GPR or stack slot if needed.
6433       if (!NeedGPROrStack)
6434         ;
6435       else if (GPR_idx != NumGPRs && !IsFastCall) {
6436         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6437         // once we support fp <-> gpr moves.
6438 
6439         // In the non-vararg case, this can only ever happen in the
6440         // presence of f32 array types, since otherwise we never run
6441         // out of FPRs before running out of GPRs.
6442         SDValue ArgVal;
6443 
6444         // Double values are always passed in a single GPR.
6445         if (Arg.getValueType() != MVT::f32) {
6446           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6447 
6448         // Non-array float values are extended and passed in a GPR.
6449         } else if (!Flags.isInConsecutiveRegs()) {
6450           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6451           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6452 
6453         // If we have an array of floats, we collect every odd element
6454         // together with its predecessor into one GPR.
6455         } else if (ArgOffset % PtrByteSize != 0) {
6456           SDValue Lo, Hi;
6457           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6458           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6459           if (!isLittleEndian)
6460             std::swap(Lo, Hi);
6461           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6462 
6463         // The final element, if even, goes into the first half of a GPR.
6464         } else if (Flags.isInConsecutiveRegsLast()) {
6465           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6466           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6467           if (!isLittleEndian)
6468             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6469                                  DAG.getConstant(32, dl, MVT::i32));
6470 
6471         // Non-final even elements are skipped; they will be handled
6472         // together the with subsequent argument on the next go-around.
6473         } else
6474           ArgVal = SDValue();
6475 
6476         if (ArgVal.getNode())
6477           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6478       } else {
6479         if (IsFastCall)
6480           ComputePtrOff();
6481 
6482         // Single-precision floating-point values are mapped to the
6483         // second (rightmost) word of the stack doubleword.
6484         if (Arg.getValueType() == MVT::f32 &&
6485             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6486           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6487           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6488         }
6489 
6490         assert(HasParameterArea &&
6491                "Parameter area must exist to pass an argument in memory.");
6492         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6493                          true, CFlags.IsTailCall, false, MemOpChains,
6494                          TailCallArguments, dl);
6495 
6496         NeededLoad = true;
6497       }
6498       // When passing an array of floats, the array occupies consecutive
6499       // space in the argument area; only round up to the next doubleword
6500       // at the end of the array.  Otherwise, each float takes 8 bytes.
6501       if (!IsFastCall || NeededLoad) {
6502         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6503                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6504         if (Flags.isInConsecutiveRegsLast())
6505           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6506       }
6507       break;
6508     }
6509     case MVT::v4f32:
6510     case MVT::v4i32:
6511     case MVT::v8i16:
6512     case MVT::v16i8:
6513     case MVT::v2f64:
6514     case MVT::v2i64:
6515     case MVT::v1i128:
6516     case MVT::f128:
6517       // These can be scalar arguments or elements of a vector array type
6518       // passed directly.  The latter are used to implement ELFv2 homogenous
6519       // vector aggregates.
6520 
6521       // For a varargs call, named arguments go into VRs or on the stack as
6522       // usual; unnamed arguments always go to the stack or the corresponding
6523       // GPRs when within range.  For now, we always put the value in both
6524       // locations (or even all three).
6525       if (CFlags.IsVarArg) {
6526         assert(HasParameterArea &&
6527                "Parameter area must exist if we have a varargs call.");
6528         // We could elide this store in the case where the object fits
6529         // entirely in R registers.  Maybe later.
6530         SDValue Store =
6531             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6532         MemOpChains.push_back(Store);
6533         if (VR_idx != NumVRs) {
6534           SDValue Load =
6535               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6536           MemOpChains.push_back(Load.getValue(1));
6537           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6538         }
6539         ArgOffset += 16;
6540         for (unsigned i=0; i<16; i+=PtrByteSize) {
6541           if (GPR_idx == NumGPRs)
6542             break;
6543           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6544                                    DAG.getConstant(i, dl, PtrVT));
6545           SDValue Load =
6546               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6547           MemOpChains.push_back(Load.getValue(1));
6548           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6549         }
6550         break;
6551       }
6552 
6553       // Non-varargs Altivec params go into VRs or on the stack.
6554       if (VR_idx != NumVRs) {
6555         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6556       } else {
6557         if (IsFastCall)
6558           ComputePtrOff();
6559 
6560         assert(HasParameterArea &&
6561                "Parameter area must exist to pass an argument in memory.");
6562         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6563                          true, CFlags.IsTailCall, true, MemOpChains,
6564                          TailCallArguments, dl);
6565         if (IsFastCall)
6566           ArgOffset += 16;
6567       }
6568 
6569       if (!IsFastCall)
6570         ArgOffset += 16;
6571       break;
6572     }
6573   }
6574 
6575   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6576          "mismatch in size of parameter area");
6577   (void)NumBytesActuallyUsed;
6578 
6579   if (!MemOpChains.empty())
6580     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6581 
6582   // Check if this is an indirect call (MTCTR/BCTRL).
6583   // See prepareDescriptorIndirectCall and buildCallOperands for more
6584   // information about calls through function pointers in the 64-bit SVR4 ABI.
6585   if (CFlags.IsIndirect) {
6586     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6587     // caller in the TOC save area.
6588     if (isTOCSaveRestoreRequired(Subtarget)) {
6589       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6590       // Load r2 into a virtual register and store it to the TOC save area.
6591       setUsesTOCBasePtr(DAG);
6592       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6593       // TOC save area offset.
6594       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6595       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6596       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6597       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6598                            MachinePointerInfo::getStack(
6599                                DAG.getMachineFunction(), TOCSaveOffset));
6600     }
6601     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6602     // This does not mean the MTCTR instruction must use R12; it's easier
6603     // to model this as an extra parameter, so do that.
6604     if (isELFv2ABI && !CFlags.IsPatchPoint)
6605       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6606   }
6607 
6608   // Build a sequence of copy-to-reg nodes chained together with token chain
6609   // and flag operands which copy the outgoing args into the appropriate regs.
6610   SDValue InFlag;
6611   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6612     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6613                              RegsToPass[i].second, InFlag);
6614     InFlag = Chain.getValue(1);
6615   }
6616 
6617   if (CFlags.IsTailCall && !IsSibCall)
6618     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6619                     TailCallArguments);
6620 
6621   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6622                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6623 }
6624 
6625 SDValue PPCTargetLowering::LowerCall_Darwin(
6626     SDValue Chain, SDValue Callee, CallFlags CFlags,
6627     const SmallVectorImpl<ISD::OutputArg> &Outs,
6628     const SmallVectorImpl<SDValue> &OutVals,
6629     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6630     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6631     const CallBase *CB) const {
6632   unsigned NumOps = Outs.size();
6633 
6634   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6635   bool isPPC64 = PtrVT == MVT::i64;
6636   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6637 
6638   MachineFunction &MF = DAG.getMachineFunction();
6639 
6640   // Mark this function as potentially containing a function that contains a
6641   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6642   // and restoring the callers stack pointer in this functions epilog. This is
6643   // done because by tail calling the called function might overwrite the value
6644   // in this function's (MF) stack pointer stack slot 0(SP).
6645   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6646       CFlags.CallConv == CallingConv::Fast)
6647     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6648 
6649   // Count how many bytes are to be pushed on the stack, including the linkage
6650   // area, and parameter passing area.  We start with 24/48 bytes, which is
6651   // prereserved space for [SP][CR][LR][3 x unused].
6652   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6653   unsigned NumBytes = LinkageSize;
6654 
6655   // Add up all the space actually used.
6656   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6657   // they all go in registers, but we must reserve stack space for them for
6658   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6659   // assigned stack space in order, with padding so Altivec parameters are
6660   // 16-byte aligned.
6661   unsigned nAltivecParamsAtEnd = 0;
6662   for (unsigned i = 0; i != NumOps; ++i) {
6663     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6664     EVT ArgVT = Outs[i].VT;
6665     // Varargs Altivec parameters are padded to a 16 byte boundary.
6666     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6667         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6668         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6669       if (!CFlags.IsVarArg && !isPPC64) {
6670         // Non-varargs Altivec parameters go after all the non-Altivec
6671         // parameters; handle those later so we know how much padding we need.
6672         nAltivecParamsAtEnd++;
6673         continue;
6674       }
6675       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6676       NumBytes = ((NumBytes+15)/16)*16;
6677     }
6678     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6679   }
6680 
6681   // Allow for Altivec parameters at the end, if needed.
6682   if (nAltivecParamsAtEnd) {
6683     NumBytes = ((NumBytes+15)/16)*16;
6684     NumBytes += 16*nAltivecParamsAtEnd;
6685   }
6686 
6687   // The prolog code of the callee may store up to 8 GPR argument registers to
6688   // the stack, allowing va_start to index over them in memory if its varargs.
6689   // Because we cannot tell if this is needed on the caller side, we have to
6690   // conservatively assume that it is needed.  As such, make sure we have at
6691   // least enough stack space for the caller to store the 8 GPRs.
6692   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6693 
6694   // Tail call needs the stack to be aligned.
6695   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6696       CFlags.CallConv == CallingConv::Fast)
6697     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6698 
6699   // Calculate by how many bytes the stack has to be adjusted in case of tail
6700   // call optimization.
6701   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6702 
6703   // To protect arguments on the stack from being clobbered in a tail call,
6704   // force all the loads to happen before doing any other lowering.
6705   if (CFlags.IsTailCall)
6706     Chain = DAG.getStackArgumentTokenFactor(Chain);
6707 
6708   // Adjust the stack pointer for the new arguments...
6709   // These operations are automatically eliminated by the prolog/epilog pass
6710   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6711   SDValue CallSeqStart = Chain;
6712 
6713   // Load the return address and frame pointer so it can be move somewhere else
6714   // later.
6715   SDValue LROp, FPOp;
6716   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6717 
6718   // Set up a copy of the stack pointer for use loading and storing any
6719   // arguments that may not fit in the registers available for argument
6720   // passing.
6721   SDValue StackPtr;
6722   if (isPPC64)
6723     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6724   else
6725     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6726 
6727   // Figure out which arguments are going to go in registers, and which in
6728   // memory.  Also, if this is a vararg function, floating point operations
6729   // must be stored to our stack, and loaded into integer regs as well, if
6730   // any integer regs are available for argument passing.
6731   unsigned ArgOffset = LinkageSize;
6732   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6733 
6734   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6735     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6736     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6737   };
6738   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6739     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6740     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6741   };
6742   static const MCPhysReg VR[] = {
6743     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6744     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6745   };
6746   const unsigned NumGPRs = array_lengthof(GPR_32);
6747   const unsigned NumFPRs = 13;
6748   const unsigned NumVRs  = array_lengthof(VR);
6749 
6750   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6751 
6752   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6753   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6754 
6755   SmallVector<SDValue, 8> MemOpChains;
6756   for (unsigned i = 0; i != NumOps; ++i) {
6757     SDValue Arg = OutVals[i];
6758     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6759 
6760     // PtrOff will be used to store the current argument to the stack if a
6761     // register cannot be found for it.
6762     SDValue PtrOff;
6763 
6764     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6765 
6766     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6767 
6768     // On PPC64, promote integers to 64-bit values.
6769     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6770       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6771       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6772       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6773     }
6774 
6775     // FIXME memcpy is used way more than necessary.  Correctness first.
6776     // Note: "by value" is code for passing a structure by value, not
6777     // basic types.
6778     if (Flags.isByVal()) {
6779       unsigned Size = Flags.getByValSize();
6780       // Very small objects are passed right-justified.  Everything else is
6781       // passed left-justified.
6782       if (Size==1 || Size==2) {
6783         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6784         if (GPR_idx != NumGPRs) {
6785           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6786                                         MachinePointerInfo(), VT);
6787           MemOpChains.push_back(Load.getValue(1));
6788           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6789 
6790           ArgOffset += PtrByteSize;
6791         } else {
6792           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6793                                           PtrOff.getValueType());
6794           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6795           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6796                                                             CallSeqStart,
6797                                                             Flags, DAG, dl);
6798           ArgOffset += PtrByteSize;
6799         }
6800         continue;
6801       }
6802       // Copy entire object into memory.  There are cases where gcc-generated
6803       // code assumes it is there, even if it could be put entirely into
6804       // registers.  (This is not what the doc says.)
6805       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6806                                                         CallSeqStart,
6807                                                         Flags, DAG, dl);
6808 
6809       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6810       // copy the pieces of the object that fit into registers from the
6811       // parameter save area.
6812       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6813         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6814         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6815         if (GPR_idx != NumGPRs) {
6816           SDValue Load =
6817               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6818           MemOpChains.push_back(Load.getValue(1));
6819           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6820           ArgOffset += PtrByteSize;
6821         } else {
6822           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6823           break;
6824         }
6825       }
6826       continue;
6827     }
6828 
6829     switch (Arg.getSimpleValueType().SimpleTy) {
6830     default: llvm_unreachable("Unexpected ValueType for argument!");
6831     case MVT::i1:
6832     case MVT::i32:
6833     case MVT::i64:
6834       if (GPR_idx != NumGPRs) {
6835         if (Arg.getValueType() == MVT::i1)
6836           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6837 
6838         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6839       } else {
6840         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6841                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6842                          TailCallArguments, dl);
6843       }
6844       ArgOffset += PtrByteSize;
6845       break;
6846     case MVT::f32:
6847     case MVT::f64:
6848       if (FPR_idx != NumFPRs) {
6849         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6850 
6851         if (CFlags.IsVarArg) {
6852           SDValue Store =
6853               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6854           MemOpChains.push_back(Store);
6855 
6856           // Float varargs are always shadowed in available integer registers
6857           if (GPR_idx != NumGPRs) {
6858             SDValue Load =
6859                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6860             MemOpChains.push_back(Load.getValue(1));
6861             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6862           }
6863           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6864             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6865             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6866             SDValue Load =
6867                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6868             MemOpChains.push_back(Load.getValue(1));
6869             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6870           }
6871         } else {
6872           // If we have any FPRs remaining, we may also have GPRs remaining.
6873           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6874           // GPRs.
6875           if (GPR_idx != NumGPRs)
6876             ++GPR_idx;
6877           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6878               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6879             ++GPR_idx;
6880         }
6881       } else
6882         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6883                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6884                          TailCallArguments, dl);
6885       if (isPPC64)
6886         ArgOffset += 8;
6887       else
6888         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6889       break;
6890     case MVT::v4f32:
6891     case MVT::v4i32:
6892     case MVT::v8i16:
6893     case MVT::v16i8:
6894       if (CFlags.IsVarArg) {
6895         // These go aligned on the stack, or in the corresponding R registers
6896         // when within range.  The Darwin PPC ABI doc claims they also go in
6897         // V registers; in fact gcc does this only for arguments that are
6898         // prototyped, not for those that match the ...  We do it for all
6899         // arguments, seems to work.
6900         while (ArgOffset % 16 !=0) {
6901           ArgOffset += PtrByteSize;
6902           if (GPR_idx != NumGPRs)
6903             GPR_idx++;
6904         }
6905         // We could elide this store in the case where the object fits
6906         // entirely in R registers.  Maybe later.
6907         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6908                              DAG.getConstant(ArgOffset, dl, PtrVT));
6909         SDValue Store =
6910             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6911         MemOpChains.push_back(Store);
6912         if (VR_idx != NumVRs) {
6913           SDValue Load =
6914               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6915           MemOpChains.push_back(Load.getValue(1));
6916           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6917         }
6918         ArgOffset += 16;
6919         for (unsigned i=0; i<16; i+=PtrByteSize) {
6920           if (GPR_idx == NumGPRs)
6921             break;
6922           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6923                                    DAG.getConstant(i, dl, PtrVT));
6924           SDValue Load =
6925               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6926           MemOpChains.push_back(Load.getValue(1));
6927           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6928         }
6929         break;
6930       }
6931 
6932       // Non-varargs Altivec params generally go in registers, but have
6933       // stack space allocated at the end.
6934       if (VR_idx != NumVRs) {
6935         // Doesn't have GPR space allocated.
6936         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6937       } else if (nAltivecParamsAtEnd==0) {
6938         // We are emitting Altivec params in order.
6939         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6940                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6941                          TailCallArguments, dl);
6942         ArgOffset += 16;
6943       }
6944       break;
6945     }
6946   }
6947   // If all Altivec parameters fit in registers, as they usually do,
6948   // they get stack space following the non-Altivec parameters.  We
6949   // don't track this here because nobody below needs it.
6950   // If there are more Altivec parameters than fit in registers emit
6951   // the stores here.
6952   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6953     unsigned j = 0;
6954     // Offset is aligned; skip 1st 12 params which go in V registers.
6955     ArgOffset = ((ArgOffset+15)/16)*16;
6956     ArgOffset += 12*16;
6957     for (unsigned i = 0; i != NumOps; ++i) {
6958       SDValue Arg = OutVals[i];
6959       EVT ArgType = Outs[i].VT;
6960       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6961           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6962         if (++j > NumVRs) {
6963           SDValue PtrOff;
6964           // We are emitting Altivec params in order.
6965           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6966                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
6967                            TailCallArguments, dl);
6968           ArgOffset += 16;
6969         }
6970       }
6971     }
6972   }
6973 
6974   if (!MemOpChains.empty())
6975     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6976 
6977   // On Darwin, R12 must contain the address of an indirect callee.  This does
6978   // not mean the MTCTR instruction must use R12; it's easier to model this as
6979   // an extra parameter, so do that.
6980   if (CFlags.IsIndirect) {
6981     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
6982     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6983                                                    PPC::R12), Callee));
6984   }
6985 
6986   // Build a sequence of copy-to-reg nodes chained together with token chain
6987   // and flag operands which copy the outgoing args into the appropriate regs.
6988   SDValue InFlag;
6989   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6990     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6991                              RegsToPass[i].second, InFlag);
6992     InFlag = Chain.getValue(1);
6993   }
6994 
6995   if (CFlags.IsTailCall)
6996     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6997                     TailCallArguments);
6998 
6999   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7000                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7001 }
7002 
7003 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
7004                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
7005                    CCState &State) {
7006 
7007   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
7008       State.getMachineFunction().getSubtarget());
7009   const bool IsPPC64 = Subtarget.isPPC64();
7010   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
7011   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
7012 
7013   if (ValVT.isVector() && !State.getMachineFunction()
7014                                .getTarget()
7015                                .Options.EnableAIXExtendedAltivecABI)
7016     report_fatal_error("the default Altivec AIX ABI is not yet supported");
7017 
7018   if (ValVT == MVT::f128)
7019     report_fatal_error("f128 is unimplemented on AIX.");
7020 
7021   if (ArgFlags.isNest())
7022     report_fatal_error("Nest arguments are unimplemented.");
7023 
7024   static const MCPhysReg GPR_32[] = {// 32-bit registers.
7025                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7026                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7027   static const MCPhysReg GPR_64[] = {// 64-bit registers.
7028                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7029                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7030 
7031   static const MCPhysReg VR[] = {// Vector registers.
7032                                  PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
7033                                  PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
7034                                  PPC::V10, PPC::V11, PPC::V12, PPC::V13};
7035 
7036   if (ArgFlags.isByVal()) {
7037     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
7038       report_fatal_error("Pass-by-value arguments with alignment greater than "
7039                          "register width are not supported.");
7040 
7041     const unsigned ByValSize = ArgFlags.getByValSize();
7042 
7043     // An empty aggregate parameter takes up no storage and no registers,
7044     // but needs a MemLoc for a stack slot for the formal arguments side.
7045     if (ByValSize == 0) {
7046       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7047                                        State.getNextStackOffset(), RegVT,
7048                                        LocInfo));
7049       return false;
7050     }
7051 
7052     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
7053     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
7054     for (const unsigned E = Offset + StackSize; Offset < E;
7055          Offset += PtrAlign.value()) {
7056       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7057         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7058       else {
7059         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7060                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
7061                                          LocInfo));
7062         break;
7063       }
7064     }
7065     return false;
7066   }
7067 
7068   // Arguments always reserve parameter save area.
7069   switch (ValVT.SimpleTy) {
7070   default:
7071     report_fatal_error("Unhandled value type for argument.");
7072   case MVT::i64:
7073     // i64 arguments should have been split to i32 for PPC32.
7074     assert(IsPPC64 && "PPC32 should have split i64 values.");
7075     LLVM_FALLTHROUGH;
7076   case MVT::i1:
7077   case MVT::i32: {
7078     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
7079     // AIX integer arguments are always passed in register width.
7080     if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
7081       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
7082                                   : CCValAssign::LocInfo::ZExt;
7083     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7084       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7085     else
7086       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
7087 
7088     return false;
7089   }
7090   case MVT::f32:
7091   case MVT::f64: {
7092     // Parameter save area (PSA) is reserved even if the float passes in fpr.
7093     const unsigned StoreSize = LocVT.getStoreSize();
7094     // Floats are always 4-byte aligned in the PSA on AIX.
7095     // This includes f64 in 64-bit mode for ABI compatibility.
7096     const unsigned Offset =
7097         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
7098     unsigned FReg = State.AllocateReg(FPR);
7099     if (FReg)
7100       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
7101 
7102     // Reserve and initialize GPRs or initialize the PSA as required.
7103     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
7104       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
7105         assert(FReg && "An FPR should be available when a GPR is reserved.");
7106         if (State.isVarArg()) {
7107           // Successfully reserved GPRs are only initialized for vararg calls.
7108           // Custom handling is required for:
7109           //   f64 in PPC32 needs to be split into 2 GPRs.
7110           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
7111           State.addLoc(
7112               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7113         }
7114       } else {
7115         // If there are insufficient GPRs, the PSA needs to be initialized.
7116         // Initialization occurs even if an FPR was initialized for
7117         // compatibility with the AIX XL compiler. The full memory for the
7118         // argument will be initialized even if a prior word is saved in GPR.
7119         // A custom memLoc is used when the argument also passes in FPR so
7120         // that the callee handling can skip over it easily.
7121         State.addLoc(
7122             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
7123                                              LocInfo)
7124                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7125         break;
7126       }
7127     }
7128 
7129     return false;
7130   }
7131   case MVT::v4f32:
7132   case MVT::v4i32:
7133   case MVT::v8i16:
7134   case MVT::v16i8:
7135   case MVT::v2i64:
7136   case MVT::v2f64:
7137   case MVT::v1i128: {
7138     if (State.isVarArg())
7139       report_fatal_error(
7140           "variadic arguments for vector types are unimplemented for AIX");
7141 
7142     if (unsigned VReg = State.AllocateReg(VR))
7143       State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
7144     else {
7145       report_fatal_error(
7146           "passing vector parameters to the stack is unimplemented for AIX");
7147     }
7148     return false;
7149   }
7150   }
7151   return true;
7152 }
7153 
7154 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7155                                                     bool IsPPC64) {
7156   assert((IsPPC64 || SVT != MVT::i64) &&
7157          "i64 should have been split for 32-bit codegen.");
7158 
7159   switch (SVT) {
7160   default:
7161     report_fatal_error("Unexpected value type for formal argument");
7162   case MVT::i1:
7163   case MVT::i32:
7164   case MVT::i64:
7165     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7166   case MVT::f32:
7167     return &PPC::F4RCRegClass;
7168   case MVT::f64:
7169     return &PPC::F8RCRegClass;
7170   case MVT::v4f32:
7171   case MVT::v4i32:
7172   case MVT::v8i16:
7173   case MVT::v16i8:
7174   case MVT::v2i64:
7175   case MVT::v2f64:
7176   case MVT::v1i128:
7177     return &PPC::VRRCRegClass;
7178   }
7179 }
7180 
7181 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7182                                         SelectionDAG &DAG, SDValue ArgValue,
7183                                         MVT LocVT, const SDLoc &dl) {
7184   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7185   assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
7186 
7187   if (Flags.isSExt())
7188     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7189                            DAG.getValueType(ValVT));
7190   else if (Flags.isZExt())
7191     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7192                            DAG.getValueType(ValVT));
7193 
7194   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7195 }
7196 
7197 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7198   const unsigned LASize = FL->getLinkageSize();
7199 
7200   if (PPC::GPRCRegClass.contains(Reg)) {
7201     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7202            "Reg must be a valid argument register!");
7203     return LASize + 4 * (Reg - PPC::R3);
7204   }
7205 
7206   if (PPC::G8RCRegClass.contains(Reg)) {
7207     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7208            "Reg must be a valid argument register!");
7209     return LASize + 8 * (Reg - PPC::X3);
7210   }
7211 
7212   llvm_unreachable("Only general purpose registers expected.");
7213 }
7214 
7215 //   AIX ABI Stack Frame Layout:
7216 //
7217 //   Low Memory +--------------------------------------------+
7218 //   SP   +---> | Back chain                                 | ---+
7219 //        |     +--------------------------------------------+    |
7220 //        |     | Saved Condition Register                   |    |
7221 //        |     +--------------------------------------------+    |
7222 //        |     | Saved Linkage Register                     |    |
7223 //        |     +--------------------------------------------+    | Linkage Area
7224 //        |     | Reserved for compilers                     |    |
7225 //        |     +--------------------------------------------+    |
7226 //        |     | Reserved for binders                       |    |
7227 //        |     +--------------------------------------------+    |
7228 //        |     | Saved TOC pointer                          | ---+
7229 //        |     +--------------------------------------------+
7230 //        |     | Parameter save area                        |
7231 //        |     +--------------------------------------------+
7232 //        |     | Alloca space                               |
7233 //        |     +--------------------------------------------+
7234 //        |     | Local variable space                       |
7235 //        |     +--------------------------------------------+
7236 //        |     | Float/int conversion temporary             |
7237 //        |     +--------------------------------------------+
7238 //        |     | Save area for AltiVec registers            |
7239 //        |     +--------------------------------------------+
7240 //        |     | AltiVec alignment padding                  |
7241 //        |     +--------------------------------------------+
7242 //        |     | Save area for VRSAVE register              |
7243 //        |     +--------------------------------------------+
7244 //        |     | Save area for General Purpose registers    |
7245 //        |     +--------------------------------------------+
7246 //        |     | Save area for Floating Point registers     |
7247 //        |     +--------------------------------------------+
7248 //        +---- | Back chain                                 |
7249 // High Memory  +--------------------------------------------+
7250 //
7251 //  Specifications:
7252 //  AIX 7.2 Assembler Language Reference
7253 //  Subroutine linkage convention
7254 
7255 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7256     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7257     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7258     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7259 
7260   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7261           CallConv == CallingConv::Fast) &&
7262          "Unexpected calling convention!");
7263 
7264   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7265     report_fatal_error("Tail call support is unimplemented on AIX.");
7266 
7267   if (useSoftFloat())
7268     report_fatal_error("Soft float support is unimplemented on AIX.");
7269 
7270   const PPCSubtarget &Subtarget =
7271       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7272 
7273   const bool IsPPC64 = Subtarget.isPPC64();
7274   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7275 
7276   // Assign locations to all of the incoming arguments.
7277   SmallVector<CCValAssign, 16> ArgLocs;
7278   MachineFunction &MF = DAG.getMachineFunction();
7279   MachineFrameInfo &MFI = MF.getFrameInfo();
7280   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7281 
7282   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7283   // Reserve space for the linkage area on the stack.
7284   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7285   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7286   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7287 
7288   SmallVector<SDValue, 8> MemOps;
7289 
7290   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7291     CCValAssign &VA = ArgLocs[I++];
7292     MVT LocVT = VA.getLocVT();
7293     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7294     if (VA.isMemLoc() && VA.getValVT().isVector())
7295       report_fatal_error(
7296           "passing vector parameters to the stack is unimplemented for AIX");
7297 
7298     // For compatibility with the AIX XL compiler, the float args in the
7299     // parameter save area are initialized even if the argument is available
7300     // in register.  The caller is required to initialize both the register
7301     // and memory, however, the callee can choose to expect it in either.
7302     // The memloc is dismissed here because the argument is retrieved from
7303     // the register.
7304     if (VA.isMemLoc() && VA.needsCustom())
7305       continue;
7306 
7307     if (Flags.isByVal() && VA.isMemLoc()) {
7308       const unsigned Size =
7309           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7310                   PtrByteSize);
7311       const int FI = MF.getFrameInfo().CreateFixedObject(
7312           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7313           /* IsAliased */ true);
7314       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7315       InVals.push_back(FIN);
7316 
7317       continue;
7318     }
7319 
7320     if (Flags.isByVal()) {
7321       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7322 
7323       const MCPhysReg ArgReg = VA.getLocReg();
7324       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7325 
7326       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7327         report_fatal_error("Over aligned byvals not supported yet.");
7328 
7329       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7330       const int FI = MF.getFrameInfo().CreateFixedObject(
7331           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7332           /* IsAliased */ true);
7333       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7334       InVals.push_back(FIN);
7335 
7336       // Add live ins for all the RegLocs for the same ByVal.
7337       const TargetRegisterClass *RegClass =
7338           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7339 
7340       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7341                                                unsigned Offset) {
7342         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7343         // Since the callers side has left justified the aggregate in the
7344         // register, we can simply store the entire register into the stack
7345         // slot.
7346         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7347         // The store to the fixedstack object is needed becuase accessing a
7348         // field of the ByVal will use a gep and load. Ideally we will optimize
7349         // to extracting the value from the register directly, and elide the
7350         // stores when the arguments address is not taken, but that will need to
7351         // be future work.
7352         SDValue Store = DAG.getStore(
7353             CopyFrom.getValue(1), dl, CopyFrom,
7354             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
7355             MachinePointerInfo::getFixedStack(MF, FI, Offset));
7356 
7357         MemOps.push_back(Store);
7358       };
7359 
7360       unsigned Offset = 0;
7361       HandleRegLoc(VA.getLocReg(), Offset);
7362       Offset += PtrByteSize;
7363       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7364            Offset += PtrByteSize) {
7365         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7366                "RegLocs should be for ByVal argument.");
7367 
7368         const CCValAssign RL = ArgLocs[I++];
7369         HandleRegLoc(RL.getLocReg(), Offset);
7370       }
7371 
7372       if (Offset != StackSize) {
7373         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7374                "Expected MemLoc for remaining bytes.");
7375         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7376         // Consume the MemLoc.The InVal has already been emitted, so nothing
7377         // more needs to be done.
7378         ++I;
7379       }
7380 
7381       continue;
7382     }
7383 
7384     EVT ValVT = VA.getValVT();
7385     if (VA.isRegLoc() && !VA.needsCustom()) {
7386       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7387       unsigned VReg =
7388           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7389       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7390       if (ValVT.isScalarInteger() &&
7391           (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
7392         ArgValue =
7393             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7394       }
7395       InVals.push_back(ArgValue);
7396       continue;
7397     }
7398     if (VA.isMemLoc()) {
7399       const unsigned LocSize = LocVT.getStoreSize();
7400       const unsigned ValSize = ValVT.getStoreSize();
7401       assert((ValSize <= LocSize) &&
7402              "Object size is larger than size of MemLoc");
7403       int CurArgOffset = VA.getLocMemOffset();
7404       // Objects are right-justified because AIX is big-endian.
7405       if (LocSize > ValSize)
7406         CurArgOffset += LocSize - ValSize;
7407       // Potential tail calls could cause overwriting of argument stack slots.
7408       const bool IsImmutable =
7409           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7410             (CallConv == CallingConv::Fast));
7411       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7412       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7413       SDValue ArgValue =
7414           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7415       InVals.push_back(ArgValue);
7416       continue;
7417     }
7418   }
7419 
7420   // On AIX a minimum of 8 words is saved to the parameter save area.
7421   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7422   // Area that is at least reserved in the caller of this function.
7423   unsigned CallerReservedArea =
7424       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7425 
7426   // Set the size that is at least reserved in caller of this function. Tail
7427   // call optimized function's reserved stack space needs to be aligned so
7428   // that taking the difference between two stack areas will result in an
7429   // aligned stack.
7430   CallerReservedArea =
7431       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7432   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7433   FuncInfo->setMinReservedArea(CallerReservedArea);
7434 
7435   if (isVarArg) {
7436     FuncInfo->setVarArgsFrameIndex(
7437         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7438     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7439 
7440     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7441                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7442 
7443     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7444                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7445     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7446 
7447     // The fixed integer arguments of a variadic function are stored to the
7448     // VarArgsFrameIndex on the stack so that they may be loaded by
7449     // dereferencing the result of va_next.
7450     for (unsigned GPRIndex =
7451              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7452          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7453 
7454       const unsigned VReg =
7455           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7456                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7457 
7458       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7459       SDValue Store =
7460           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7461       MemOps.push_back(Store);
7462       // Increment the address for the next argument to store.
7463       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7464       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7465     }
7466   }
7467 
7468   if (!MemOps.empty())
7469     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7470 
7471   return Chain;
7472 }
7473 
7474 SDValue PPCTargetLowering::LowerCall_AIX(
7475     SDValue Chain, SDValue Callee, CallFlags CFlags,
7476     const SmallVectorImpl<ISD::OutputArg> &Outs,
7477     const SmallVectorImpl<SDValue> &OutVals,
7478     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7479     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7480     const CallBase *CB) const {
7481   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7482   // AIX ABI stack frame layout.
7483 
7484   assert((CFlags.CallConv == CallingConv::C ||
7485           CFlags.CallConv == CallingConv::Cold ||
7486           CFlags.CallConv == CallingConv::Fast) &&
7487          "Unexpected calling convention!");
7488 
7489   if (CFlags.IsPatchPoint)
7490     report_fatal_error("This call type is unimplemented on AIX.");
7491 
7492   const PPCSubtarget& Subtarget =
7493       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7494 
7495   MachineFunction &MF = DAG.getMachineFunction();
7496   SmallVector<CCValAssign, 16> ArgLocs;
7497   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7498                  *DAG.getContext());
7499 
7500   // Reserve space for the linkage save area (LSA) on the stack.
7501   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7502   //   [SP][CR][LR][2 x reserved][TOC].
7503   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7504   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7505   const bool IsPPC64 = Subtarget.isPPC64();
7506   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7507   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7508   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7509   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7510 
7511   // The prolog code of the callee may store up to 8 GPR argument registers to
7512   // the stack, allowing va_start to index over them in memory if the callee
7513   // is variadic.
7514   // Because we cannot tell if this is needed on the caller side, we have to
7515   // conservatively assume that it is needed.  As such, make sure we have at
7516   // least enough stack space for the caller to store the 8 GPRs.
7517   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7518   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7519                                      CCInfo.getNextStackOffset());
7520 
7521   // Adjust the stack pointer for the new arguments...
7522   // These operations are automatically eliminated by the prolog/epilog pass.
7523   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7524   SDValue CallSeqStart = Chain;
7525 
7526   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7527   SmallVector<SDValue, 8> MemOpChains;
7528 
7529   // Set up a copy of the stack pointer for loading and storing any
7530   // arguments that may not fit in the registers available for argument
7531   // passing.
7532   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7533                                    : DAG.getRegister(PPC::R1, MVT::i32);
7534 
7535   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7536     const unsigned ValNo = ArgLocs[I].getValNo();
7537     SDValue Arg = OutVals[ValNo];
7538     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7539 
7540     if (Flags.isByVal()) {
7541       const unsigned ByValSize = Flags.getByValSize();
7542 
7543       // Nothing to do for zero-sized ByVals on the caller side.
7544       if (!ByValSize) {
7545         ++I;
7546         continue;
7547       }
7548 
7549       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7550         return DAG.getExtLoad(
7551             ISD::ZEXTLOAD, dl, PtrVT, Chain,
7552             (LoadOffset != 0)
7553                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7554                 : Arg,
7555             MachinePointerInfo(), VT);
7556       };
7557 
7558       unsigned LoadOffset = 0;
7559 
7560       // Initialize registers, which are fully occupied by the by-val argument.
7561       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7562         SDValue Load = GetLoad(PtrVT, LoadOffset);
7563         MemOpChains.push_back(Load.getValue(1));
7564         LoadOffset += PtrByteSize;
7565         const CCValAssign &ByValVA = ArgLocs[I++];
7566         assert(ByValVA.getValNo() == ValNo &&
7567                "Unexpected location for pass-by-value argument.");
7568         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7569       }
7570 
7571       if (LoadOffset == ByValSize)
7572         continue;
7573 
7574       // There must be one more loc to handle the remainder.
7575       assert(ArgLocs[I].getValNo() == ValNo &&
7576              "Expected additional location for by-value argument.");
7577 
7578       if (ArgLocs[I].isMemLoc()) {
7579         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7580         const CCValAssign &ByValVA = ArgLocs[I++];
7581         ISD::ArgFlagsTy MemcpyFlags = Flags;
7582         // Only memcpy the bytes that don't pass in register.
7583         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7584         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7585             (LoadOffset != 0)
7586                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7587                 : Arg,
7588             DAG.getObjectPtrOffset(dl, StackPtr,
7589                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
7590             CallSeqStart, MemcpyFlags, DAG, dl);
7591         continue;
7592       }
7593 
7594       // Initialize the final register residue.
7595       // Any residue that occupies the final by-val arg register must be
7596       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7597       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7598       // 2 and 1 byte loads.
7599       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7600       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7601              "Unexpected register residue for by-value argument.");
7602       SDValue ResidueVal;
7603       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7604         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7605         const MVT VT =
7606             N == 1 ? MVT::i8
7607                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7608         SDValue Load = GetLoad(VT, LoadOffset);
7609         MemOpChains.push_back(Load.getValue(1));
7610         LoadOffset += N;
7611         Bytes += N;
7612 
7613         // By-val arguments are passed left-justfied in register.
7614         // Every load here needs to be shifted, otherwise a full register load
7615         // should have been used.
7616         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7617                "Unexpected load emitted during handling of pass-by-value "
7618                "argument.");
7619         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7620         EVT ShiftAmountTy =
7621             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7622         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7623         SDValue ShiftedLoad =
7624             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7625         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7626                                               ShiftedLoad)
7627                                 : ShiftedLoad;
7628       }
7629 
7630       const CCValAssign &ByValVA = ArgLocs[I++];
7631       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7632       continue;
7633     }
7634 
7635     CCValAssign &VA = ArgLocs[I++];
7636     const MVT LocVT = VA.getLocVT();
7637     const MVT ValVT = VA.getValVT();
7638 
7639     if (VA.isMemLoc() && VA.getValVT().isVector())
7640       report_fatal_error(
7641           "passing vector parameters to the stack is unimplemented for AIX");
7642 
7643     switch (VA.getLocInfo()) {
7644     default:
7645       report_fatal_error("Unexpected argument extension type.");
7646     case CCValAssign::Full:
7647       break;
7648     case CCValAssign::ZExt:
7649       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7650       break;
7651     case CCValAssign::SExt:
7652       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7653       break;
7654     }
7655 
7656     if (VA.isRegLoc() && !VA.needsCustom()) {
7657       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7658       continue;
7659     }
7660 
7661     if (VA.isMemLoc()) {
7662       SDValue PtrOff =
7663           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7664       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7665       MemOpChains.push_back(
7666           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7667 
7668       continue;
7669     }
7670 
7671     // Custom handling is used for GPR initializations for vararg float
7672     // arguments.
7673     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7674            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7675            "Unexpected register handling for calling convention.");
7676 
7677     SDValue ArgAsInt =
7678         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7679 
7680     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7681       // f32 in 32-bit GPR
7682       // f64 in 64-bit GPR
7683       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7684     else if (Arg.getValueType().getFixedSizeInBits() <
7685              LocVT.getFixedSizeInBits())
7686       // f32 in 64-bit GPR.
7687       RegsToPass.push_back(std::make_pair(
7688           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7689     else {
7690       // f64 in two 32-bit GPRs
7691       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7692       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7693              "Unexpected custom register for argument!");
7694       CCValAssign &GPR1 = VA;
7695       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7696                                      DAG.getConstant(32, dl, MVT::i8));
7697       RegsToPass.push_back(std::make_pair(
7698           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7699 
7700       if (I != E) {
7701         // If only 1 GPR was available, there will only be one custom GPR and
7702         // the argument will also pass in memory.
7703         CCValAssign &PeekArg = ArgLocs[I];
7704         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7705           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7706           CCValAssign &GPR2 = ArgLocs[I++];
7707           RegsToPass.push_back(std::make_pair(
7708               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7709         }
7710       }
7711     }
7712   }
7713 
7714   if (!MemOpChains.empty())
7715     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7716 
7717   // For indirect calls, we need to save the TOC base to the stack for
7718   // restoration after the call.
7719   if (CFlags.IsIndirect) {
7720     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7721     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7722     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7723     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7724     const unsigned TOCSaveOffset =
7725         Subtarget.getFrameLowering()->getTOCSaveOffset();
7726 
7727     setUsesTOCBasePtr(DAG);
7728     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7729     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7730     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7731     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7732     Chain = DAG.getStore(
7733         Val.getValue(1), dl, Val, AddPtr,
7734         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7735   }
7736 
7737   // Build a sequence of copy-to-reg nodes chained together with token chain
7738   // and flag operands which copy the outgoing args into the appropriate regs.
7739   SDValue InFlag;
7740   for (auto Reg : RegsToPass) {
7741     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7742     InFlag = Chain.getValue(1);
7743   }
7744 
7745   const int SPDiff = 0;
7746   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7747                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7748 }
7749 
7750 bool
7751 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7752                                   MachineFunction &MF, bool isVarArg,
7753                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7754                                   LLVMContext &Context) const {
7755   SmallVector<CCValAssign, 16> RVLocs;
7756   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7757   return CCInfo.CheckReturn(
7758       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7759                 ? RetCC_PPC_Cold
7760                 : RetCC_PPC);
7761 }
7762 
7763 SDValue
7764 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7765                                bool isVarArg,
7766                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7767                                const SmallVectorImpl<SDValue> &OutVals,
7768                                const SDLoc &dl, SelectionDAG &DAG) const {
7769   SmallVector<CCValAssign, 16> RVLocs;
7770   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7771                  *DAG.getContext());
7772   CCInfo.AnalyzeReturn(Outs,
7773                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7774                            ? RetCC_PPC_Cold
7775                            : RetCC_PPC);
7776 
7777   SDValue Flag;
7778   SmallVector<SDValue, 4> RetOps(1, Chain);
7779 
7780   // Copy the result values into the output registers.
7781   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7782     CCValAssign &VA = RVLocs[i];
7783     assert(VA.isRegLoc() && "Can only return in registers!");
7784 
7785     SDValue Arg = OutVals[RealResIdx];
7786 
7787     switch (VA.getLocInfo()) {
7788     default: llvm_unreachable("Unknown loc info!");
7789     case CCValAssign::Full: break;
7790     case CCValAssign::AExt:
7791       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7792       break;
7793     case CCValAssign::ZExt:
7794       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7795       break;
7796     case CCValAssign::SExt:
7797       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7798       break;
7799     }
7800     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7801       bool isLittleEndian = Subtarget.isLittleEndian();
7802       // Legalize ret f64 -> ret 2 x i32.
7803       SDValue SVal =
7804           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7805                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7806       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7807       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7808       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7809                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7810       Flag = Chain.getValue(1);
7811       VA = RVLocs[++i]; // skip ahead to next loc
7812       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7813     } else
7814       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7815     Flag = Chain.getValue(1);
7816     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7817   }
7818 
7819   RetOps[0] = Chain;  // Update chain.
7820 
7821   // Add the flag if we have it.
7822   if (Flag.getNode())
7823     RetOps.push_back(Flag);
7824 
7825   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7826 }
7827 
7828 SDValue
7829 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7830                                                 SelectionDAG &DAG) const {
7831   SDLoc dl(Op);
7832 
7833   // Get the correct type for integers.
7834   EVT IntVT = Op.getValueType();
7835 
7836   // Get the inputs.
7837   SDValue Chain = Op.getOperand(0);
7838   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7839   // Build a DYNAREAOFFSET node.
7840   SDValue Ops[2] = {Chain, FPSIdx};
7841   SDVTList VTs = DAG.getVTList(IntVT);
7842   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7843 }
7844 
7845 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7846                                              SelectionDAG &DAG) const {
7847   // When we pop the dynamic allocation we need to restore the SP link.
7848   SDLoc dl(Op);
7849 
7850   // Get the correct type for pointers.
7851   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7852 
7853   // Construct the stack pointer operand.
7854   bool isPPC64 = Subtarget.isPPC64();
7855   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7856   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7857 
7858   // Get the operands for the STACKRESTORE.
7859   SDValue Chain = Op.getOperand(0);
7860   SDValue SaveSP = Op.getOperand(1);
7861 
7862   // Load the old link SP.
7863   SDValue LoadLinkSP =
7864       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7865 
7866   // Restore the stack pointer.
7867   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7868 
7869   // Store the old link SP.
7870   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7871 }
7872 
7873 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7874   MachineFunction &MF = DAG.getMachineFunction();
7875   bool isPPC64 = Subtarget.isPPC64();
7876   EVT PtrVT = getPointerTy(MF.getDataLayout());
7877 
7878   // Get current frame pointer save index.  The users of this index will be
7879   // primarily DYNALLOC instructions.
7880   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7881   int RASI = FI->getReturnAddrSaveIndex();
7882 
7883   // If the frame pointer save index hasn't been defined yet.
7884   if (!RASI) {
7885     // Find out what the fix offset of the frame pointer save area.
7886     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7887     // Allocate the frame index for frame pointer save area.
7888     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7889     // Save the result.
7890     FI->setReturnAddrSaveIndex(RASI);
7891   }
7892   return DAG.getFrameIndex(RASI, PtrVT);
7893 }
7894 
7895 SDValue
7896 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7897   MachineFunction &MF = DAG.getMachineFunction();
7898   bool isPPC64 = Subtarget.isPPC64();
7899   EVT PtrVT = getPointerTy(MF.getDataLayout());
7900 
7901   // Get current frame pointer save index.  The users of this index will be
7902   // primarily DYNALLOC instructions.
7903   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7904   int FPSI = FI->getFramePointerSaveIndex();
7905 
7906   // If the frame pointer save index hasn't been defined yet.
7907   if (!FPSI) {
7908     // Find out what the fix offset of the frame pointer save area.
7909     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7910     // Allocate the frame index for frame pointer save area.
7911     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7912     // Save the result.
7913     FI->setFramePointerSaveIndex(FPSI);
7914   }
7915   return DAG.getFrameIndex(FPSI, PtrVT);
7916 }
7917 
7918 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7919                                                    SelectionDAG &DAG) const {
7920   MachineFunction &MF = DAG.getMachineFunction();
7921   // Get the inputs.
7922   SDValue Chain = Op.getOperand(0);
7923   SDValue Size  = Op.getOperand(1);
7924   SDLoc dl(Op);
7925 
7926   // Get the correct type for pointers.
7927   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7928   // Negate the size.
7929   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7930                                 DAG.getConstant(0, dl, PtrVT), Size);
7931   // Construct a node for the frame pointer save index.
7932   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7933   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7934   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7935   if (hasInlineStackProbe(MF))
7936     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7937   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7938 }
7939 
7940 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7941                                                      SelectionDAG &DAG) const {
7942   MachineFunction &MF = DAG.getMachineFunction();
7943 
7944   bool isPPC64 = Subtarget.isPPC64();
7945   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7946 
7947   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7948   return DAG.getFrameIndex(FI, PtrVT);
7949 }
7950 
7951 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7952                                                SelectionDAG &DAG) const {
7953   SDLoc DL(Op);
7954   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7955                      DAG.getVTList(MVT::i32, MVT::Other),
7956                      Op.getOperand(0), Op.getOperand(1));
7957 }
7958 
7959 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7960                                                 SelectionDAG &DAG) const {
7961   SDLoc DL(Op);
7962   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7963                      Op.getOperand(0), Op.getOperand(1));
7964 }
7965 
7966 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7967   if (Op.getValueType().isVector())
7968     return LowerVectorLoad(Op, DAG);
7969 
7970   assert(Op.getValueType() == MVT::i1 &&
7971          "Custom lowering only for i1 loads");
7972 
7973   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7974 
7975   SDLoc dl(Op);
7976   LoadSDNode *LD = cast<LoadSDNode>(Op);
7977 
7978   SDValue Chain = LD->getChain();
7979   SDValue BasePtr = LD->getBasePtr();
7980   MachineMemOperand *MMO = LD->getMemOperand();
7981 
7982   SDValue NewLD =
7983       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7984                      BasePtr, MVT::i8, MMO);
7985   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7986 
7987   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7988   return DAG.getMergeValues(Ops, dl);
7989 }
7990 
7991 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7992   if (Op.getOperand(1).getValueType().isVector())
7993     return LowerVectorStore(Op, DAG);
7994 
7995   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7996          "Custom lowering only for i1 stores");
7997 
7998   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7999 
8000   SDLoc dl(Op);
8001   StoreSDNode *ST = cast<StoreSDNode>(Op);
8002 
8003   SDValue Chain = ST->getChain();
8004   SDValue BasePtr = ST->getBasePtr();
8005   SDValue Value = ST->getValue();
8006   MachineMemOperand *MMO = ST->getMemOperand();
8007 
8008   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
8009                       Value);
8010   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
8011 }
8012 
8013 // FIXME: Remove this once the ANDI glue bug is fixed:
8014 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
8015   assert(Op.getValueType() == MVT::i1 &&
8016          "Custom lowering only for i1 results");
8017 
8018   SDLoc DL(Op);
8019   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
8020 }
8021 
8022 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
8023                                                SelectionDAG &DAG) const {
8024 
8025   // Implements a vector truncate that fits in a vector register as a shuffle.
8026   // We want to legalize vector truncates down to where the source fits in
8027   // a vector register (and target is therefore smaller than vector register
8028   // size).  At that point legalization will try to custom lower the sub-legal
8029   // result and get here - where we can contain the truncate as a single target
8030   // operation.
8031 
8032   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
8033   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
8034   //
8035   // We will implement it for big-endian ordering as this (where x denotes
8036   // undefined):
8037   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
8038   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
8039   //
8040   // The same operation in little-endian ordering will be:
8041   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
8042   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8043 
8044   EVT TrgVT = Op.getValueType();
8045   assert(TrgVT.isVector() && "Vector type expected.");
8046   unsigned TrgNumElts = TrgVT.getVectorNumElements();
8047   EVT EltVT = TrgVT.getVectorElementType();
8048   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
8049       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
8050       !isPowerOf2_32(EltVT.getSizeInBits()))
8051     return SDValue();
8052 
8053   SDValue N1 = Op.getOperand(0);
8054   EVT SrcVT = N1.getValueType();
8055   unsigned SrcSize = SrcVT.getSizeInBits();
8056   if (SrcSize > 256 ||
8057       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
8058       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
8059     return SDValue();
8060   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
8061     return SDValue();
8062 
8063   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8064   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8065 
8066   SDLoc DL(Op);
8067   SDValue Op1, Op2;
8068   if (SrcSize == 256) {
8069     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
8070     EVT SplitVT =
8071         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
8072     unsigned SplitNumElts = SplitVT.getVectorNumElements();
8073     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
8074                       DAG.getConstant(0, DL, VecIdxTy));
8075     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
8076                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
8077   }
8078   else {
8079     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8080     Op2 = DAG.getUNDEF(WideVT);
8081   }
8082 
8083   // First list the elements we want to keep.
8084   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8085   SmallVector<int, 16> ShuffV;
8086   if (Subtarget.isLittleEndian())
8087     for (unsigned i = 0; i < TrgNumElts; ++i)
8088       ShuffV.push_back(i * SizeMult);
8089   else
8090     for (unsigned i = 1; i <= TrgNumElts; ++i)
8091       ShuffV.push_back(i * SizeMult - 1);
8092 
8093   // Populate the remaining elements with undefs.
8094   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8095     // ShuffV.push_back(i + WideNumElts);
8096     ShuffV.push_back(WideNumElts + 1);
8097 
8098   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
8099   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
8100   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
8101 }
8102 
8103 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8104 /// possible.
8105 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8106   // Not FP, or using SPE? Not a fsel.
8107   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
8108       !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE())
8109     return Op;
8110 
8111   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8112 
8113   EVT ResVT = Op.getValueType();
8114   EVT CmpVT = Op.getOperand(0).getValueType();
8115   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8116   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
8117   SDLoc dl(Op);
8118   SDNodeFlags Flags = Op.getNode()->getFlags();
8119 
8120   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
8121   // presence of infinities.
8122   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8123     switch (CC) {
8124     default:
8125       break;
8126     case ISD::SETOGT:
8127     case ISD::SETGT:
8128       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
8129     case ISD::SETOLT:
8130     case ISD::SETLT:
8131       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
8132     }
8133   }
8134 
8135   // We might be able to do better than this under some circumstances, but in
8136   // general, fsel-based lowering of select is a finite-math-only optimization.
8137   // For more information, see section F.3 of the 2.06 ISA specification.
8138   // With ISA 3.0
8139   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8140       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
8141     return Op;
8142 
8143   // If the RHS of the comparison is a 0.0, we don't need to do the
8144   // subtraction at all.
8145   SDValue Sel1;
8146   if (isFloatingPointZero(RHS))
8147     switch (CC) {
8148     default: break;       // SETUO etc aren't handled by fsel.
8149     case ISD::SETNE:
8150       std::swap(TV, FV);
8151       LLVM_FALLTHROUGH;
8152     case ISD::SETEQ:
8153       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8154         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8155       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8156       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8157         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8158       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8159                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8160     case ISD::SETULT:
8161     case ISD::SETLT:
8162       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8163       LLVM_FALLTHROUGH;
8164     case ISD::SETOGE:
8165     case ISD::SETGE:
8166       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8167         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8168       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8169     case ISD::SETUGT:
8170     case ISD::SETGT:
8171       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8172       LLVM_FALLTHROUGH;
8173     case ISD::SETOLE:
8174     case ISD::SETLE:
8175       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8176         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8177       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8178                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8179     }
8180 
8181   SDValue Cmp;
8182   switch (CC) {
8183   default: break;       // SETUO etc aren't handled by fsel.
8184   case ISD::SETNE:
8185     std::swap(TV, FV);
8186     LLVM_FALLTHROUGH;
8187   case ISD::SETEQ:
8188     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8189     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8190       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8191     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8192     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8193       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8194     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8195                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8196   case ISD::SETULT:
8197   case ISD::SETLT:
8198     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8199     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8200       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8201     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8202   case ISD::SETOGE:
8203   case ISD::SETGE:
8204     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8205     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8206       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8207     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8208   case ISD::SETUGT:
8209   case ISD::SETGT:
8210     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8211     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8212       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8213     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8214   case ISD::SETOLE:
8215   case ISD::SETLE:
8216     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8217     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8218       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8219     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8220   }
8221   return Op;
8222 }
8223 
8224 static unsigned getPPCStrictOpcode(unsigned Opc) {
8225   switch (Opc) {
8226   default:
8227     llvm_unreachable("No strict version of this opcode!");
8228   case PPCISD::FCTIDZ:
8229     return PPCISD::STRICT_FCTIDZ;
8230   case PPCISD::FCTIWZ:
8231     return PPCISD::STRICT_FCTIWZ;
8232   case PPCISD::FCTIDUZ:
8233     return PPCISD::STRICT_FCTIDUZ;
8234   case PPCISD::FCTIWUZ:
8235     return PPCISD::STRICT_FCTIWUZ;
8236   case PPCISD::FCFID:
8237     return PPCISD::STRICT_FCFID;
8238   case PPCISD::FCFIDU:
8239     return PPCISD::STRICT_FCFIDU;
8240   case PPCISD::FCFIDS:
8241     return PPCISD::STRICT_FCFIDS;
8242   case PPCISD::FCFIDUS:
8243     return PPCISD::STRICT_FCFIDUS;
8244   }
8245 }
8246 
8247 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
8248                               const PPCSubtarget &Subtarget) {
8249   SDLoc dl(Op);
8250   bool IsStrict = Op->isStrictFPOpcode();
8251   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8252                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8253 
8254   // TODO: Any other flags to propagate?
8255   SDNodeFlags Flags;
8256   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8257 
8258   // For strict nodes, source is the second operand.
8259   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8260   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
8261   assert(Src.getValueType().isFloatingPoint());
8262   if (Src.getValueType() == MVT::f32) {
8263     if (IsStrict) {
8264       Src =
8265           DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
8266                       DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
8267       Chain = Src.getValue(1);
8268     } else
8269       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8270   }
8271   SDValue Conv;
8272   unsigned Opc = ISD::DELETED_NODE;
8273   switch (Op.getSimpleValueType().SimpleTy) {
8274   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8275   case MVT::i32:
8276     Opc = IsSigned ? PPCISD::FCTIWZ
8277                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
8278     break;
8279   case MVT::i64:
8280     assert((IsSigned || Subtarget.hasFPCVT()) &&
8281            "i64 FP_TO_UINT is supported only with FPCVT");
8282     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
8283   }
8284   if (IsStrict) {
8285     Opc = getPPCStrictOpcode(Opc);
8286     Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
8287                        {Chain, Src}, Flags);
8288   } else {
8289     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
8290   }
8291   return Conv;
8292 }
8293 
8294 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8295                                                SelectionDAG &DAG,
8296                                                const SDLoc &dl) const {
8297   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
8298   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8299                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8300   bool IsStrict = Op->isStrictFPOpcode();
8301 
8302   // Convert the FP value to an int value through memory.
8303   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8304                   (IsSigned || Subtarget.hasFPCVT());
8305   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8306   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8307   MachinePointerInfo MPI =
8308       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8309 
8310   // Emit a store to the stack slot.
8311   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8312   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8313   if (i32Stack) {
8314     MachineFunction &MF = DAG.getMachineFunction();
8315     Alignment = Align(4);
8316     MachineMemOperand *MMO =
8317         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8318     SDValue Ops[] = { Chain, Tmp, FIPtr };
8319     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8320               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8321   } else
8322     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8323 
8324   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8325   // add in a bias on big endian.
8326   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8327     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8328                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8329     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8330   }
8331 
8332   RLI.Chain = Chain;
8333   RLI.Ptr = FIPtr;
8334   RLI.MPI = MPI;
8335   RLI.Alignment = Alignment;
8336 }
8337 
8338 /// Custom lowers floating point to integer conversions to use
8339 /// the direct move instructions available in ISA 2.07 to avoid the
8340 /// need for load/store combinations.
8341 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8342                                                     SelectionDAG &DAG,
8343                                                     const SDLoc &dl) const {
8344   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8345   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8346   if (Op->isStrictFPOpcode())
8347     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8348   else
8349     return Mov;
8350 }
8351 
8352 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8353                                           const SDLoc &dl) const {
8354   bool IsStrict = Op->isStrictFPOpcode();
8355   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8356                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8357   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8358   EVT SrcVT = Src.getValueType();
8359   EVT DstVT = Op.getValueType();
8360 
8361   // FP to INT conversions are legal for f128.
8362   if (SrcVT == MVT::f128)
8363     return Op;
8364 
8365   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8366   // PPC (the libcall is not available).
8367   if (SrcVT == MVT::ppcf128) {
8368     if (DstVT == MVT::i32) {
8369       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8370       // set other fast-math flags to FP operations in both strict and
8371       // non-strict cases. (FP_TO_SINT, FSUB)
8372       SDNodeFlags Flags;
8373       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8374 
8375       if (IsSigned) {
8376         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8377                                  DAG.getIntPtrConstant(0, dl));
8378         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8379                                  DAG.getIntPtrConstant(1, dl));
8380 
8381         // Add the two halves of the long double in round-to-zero mode, and use
8382         // a smaller FP_TO_SINT.
8383         if (IsStrict) {
8384           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
8385                                     DAG.getVTList(MVT::f64, MVT::Other),
8386                                     {Op.getOperand(0), Lo, Hi}, Flags);
8387           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8388                              DAG.getVTList(MVT::i32, MVT::Other),
8389                              {Res.getValue(1), Res}, Flags);
8390         } else {
8391           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8392           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8393         }
8394       } else {
8395         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8396         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8397         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8398         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8399         if (IsStrict) {
8400           // Sel = Src < 0x80000000
8401           // FltOfs = select Sel, 0.0, 0x80000000
8402           // IntOfs = select Sel, 0, 0x80000000
8403           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8404           SDValue Chain = Op.getOperand(0);
8405           EVT SetCCVT =
8406               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8407           EVT DstSetCCVT =
8408               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8409           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8410                                      Chain, true);
8411           Chain = Sel.getValue(1);
8412 
8413           SDValue FltOfs = DAG.getSelect(
8414               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8415           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8416 
8417           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8418                                     DAG.getVTList(SrcVT, MVT::Other),
8419                                     {Chain, Src, FltOfs}, Flags);
8420           Chain = Val.getValue(1);
8421           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8422                                      DAG.getVTList(DstVT, MVT::Other),
8423                                      {Chain, Val}, Flags);
8424           Chain = SInt.getValue(1);
8425           SDValue IntOfs = DAG.getSelect(
8426               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8427           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8428           return DAG.getMergeValues({Result, Chain}, dl);
8429         } else {
8430           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8431           // FIXME: generated code sucks.
8432           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8433           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8434           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8435           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8436           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8437         }
8438       }
8439     }
8440 
8441     return SDValue();
8442   }
8443 
8444   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8445     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8446 
8447   ReuseLoadInfo RLI;
8448   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8449 
8450   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8451                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8452 }
8453 
8454 // We're trying to insert a regular store, S, and then a load, L. If the
8455 // incoming value, O, is a load, we might just be able to have our load use the
8456 // address used by O. However, we don't know if anything else will store to
8457 // that address before we can load from it. To prevent this situation, we need
8458 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8459 // the same chain operand as O, we create a token factor from the chain results
8460 // of O and L, and we replace all uses of O's chain result with that token
8461 // factor (see spliceIntoChain below for this last part).
8462 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8463                                             ReuseLoadInfo &RLI,
8464                                             SelectionDAG &DAG,
8465                                             ISD::LoadExtType ET) const {
8466   // Conservatively skip reusing for constrained FP nodes.
8467   if (Op->isStrictFPOpcode())
8468     return false;
8469 
8470   SDLoc dl(Op);
8471   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8472                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8473   if (ET == ISD::NON_EXTLOAD &&
8474       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8475       isOperationLegalOrCustom(Op.getOpcode(),
8476                                Op.getOperand(0).getValueType())) {
8477 
8478     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8479     return true;
8480   }
8481 
8482   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8483   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8484       LD->isNonTemporal())
8485     return false;
8486   if (LD->getMemoryVT() != MemVT)
8487     return false;
8488 
8489   // If the result of the load is an illegal type, then we can't build a
8490   // valid chain for reuse since the legalised loads and token factor node that
8491   // ties the legalised loads together uses a different output chain then the
8492   // illegal load.
8493   if (!isTypeLegal(LD->getValueType(0)))
8494     return false;
8495 
8496   RLI.Ptr = LD->getBasePtr();
8497   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8498     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8499            "Non-pre-inc AM on PPC?");
8500     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8501                           LD->getOffset());
8502   }
8503 
8504   RLI.Chain = LD->getChain();
8505   RLI.MPI = LD->getPointerInfo();
8506   RLI.IsDereferenceable = LD->isDereferenceable();
8507   RLI.IsInvariant = LD->isInvariant();
8508   RLI.Alignment = LD->getAlign();
8509   RLI.AAInfo = LD->getAAInfo();
8510   RLI.Ranges = LD->getRanges();
8511 
8512   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8513   return true;
8514 }
8515 
8516 // Given the head of the old chain, ResChain, insert a token factor containing
8517 // it and NewResChain, and make users of ResChain now be users of that token
8518 // factor.
8519 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8520 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8521                                         SDValue NewResChain,
8522                                         SelectionDAG &DAG) const {
8523   if (!ResChain)
8524     return;
8525 
8526   SDLoc dl(NewResChain);
8527 
8528   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8529                            NewResChain, DAG.getUNDEF(MVT::Other));
8530   assert(TF.getNode() != NewResChain.getNode() &&
8531          "A new TF really is required here");
8532 
8533   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8534   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8535 }
8536 
8537 /// Analyze profitability of direct move
8538 /// prefer float load to int load plus direct move
8539 /// when there is no integer use of int load
8540 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8541   SDNode *Origin = Op.getOperand(0).getNode();
8542   if (Origin->getOpcode() != ISD::LOAD)
8543     return true;
8544 
8545   // If there is no LXSIBZX/LXSIHZX, like Power8,
8546   // prefer direct move if the memory size is 1 or 2 bytes.
8547   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8548   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8549     return true;
8550 
8551   for (SDNode::use_iterator UI = Origin->use_begin(),
8552                             UE = Origin->use_end();
8553        UI != UE; ++UI) {
8554 
8555     // Only look at the users of the loaded value.
8556     if (UI.getUse().get().getResNo() != 0)
8557       continue;
8558 
8559     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8560         UI->getOpcode() != ISD::UINT_TO_FP &&
8561         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8562         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8563       return true;
8564   }
8565 
8566   return false;
8567 }
8568 
8569 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8570                               const PPCSubtarget &Subtarget,
8571                               SDValue Chain = SDValue()) {
8572   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8573                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8574   SDLoc dl(Op);
8575 
8576   // TODO: Any other flags to propagate?
8577   SDNodeFlags Flags;
8578   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8579 
8580   // If we have FCFIDS, then use it when converting to single-precision.
8581   // Otherwise, convert to double-precision and then round.
8582   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8583   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8584                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8585   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8586   if (Op->isStrictFPOpcode()) {
8587     if (!Chain)
8588       Chain = Op.getOperand(0);
8589     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8590                        DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8591   } else
8592     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8593 }
8594 
8595 /// Custom lowers integer to floating point conversions to use
8596 /// the direct move instructions available in ISA 2.07 to avoid the
8597 /// need for load/store combinations.
8598 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8599                                                     SelectionDAG &DAG,
8600                                                     const SDLoc &dl) const {
8601   assert((Op.getValueType() == MVT::f32 ||
8602           Op.getValueType() == MVT::f64) &&
8603          "Invalid floating point type as target of conversion");
8604   assert(Subtarget.hasFPCVT() &&
8605          "Int to FP conversions with direct moves require FPCVT");
8606   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8607   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8608   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8609                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8610   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8611   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8612   return convertIntToFP(Op, Mov, DAG, Subtarget);
8613 }
8614 
8615 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8616 
8617   EVT VecVT = Vec.getValueType();
8618   assert(VecVT.isVector() && "Expected a vector type.");
8619   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8620 
8621   EVT EltVT = VecVT.getVectorElementType();
8622   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8623   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8624 
8625   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8626   SmallVector<SDValue, 16> Ops(NumConcat);
8627   Ops[0] = Vec;
8628   SDValue UndefVec = DAG.getUNDEF(VecVT);
8629   for (unsigned i = 1; i < NumConcat; ++i)
8630     Ops[i] = UndefVec;
8631 
8632   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8633 }
8634 
8635 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8636                                                 const SDLoc &dl) const {
8637   bool IsStrict = Op->isStrictFPOpcode();
8638   unsigned Opc = Op.getOpcode();
8639   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8640   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8641           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
8642          "Unexpected conversion type");
8643   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8644          "Supports conversions to v2f64/v4f32 only.");
8645 
8646   // TODO: Any other flags to propagate?
8647   SDNodeFlags Flags;
8648   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8649 
8650   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8651   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8652 
8653   SDValue Wide = widenVec(DAG, Src, dl);
8654   EVT WideVT = Wide.getValueType();
8655   unsigned WideNumElts = WideVT.getVectorNumElements();
8656   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8657 
8658   SmallVector<int, 16> ShuffV;
8659   for (unsigned i = 0; i < WideNumElts; ++i)
8660     ShuffV.push_back(i + WideNumElts);
8661 
8662   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8663   int SaveElts = FourEltRes ? 4 : 2;
8664   if (Subtarget.isLittleEndian())
8665     for (int i = 0; i < SaveElts; i++)
8666       ShuffV[i * Stride] = i;
8667   else
8668     for (int i = 1; i <= SaveElts; i++)
8669       ShuffV[i * Stride - 1] = i - 1;
8670 
8671   SDValue ShuffleSrc2 =
8672       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8673   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8674 
8675   SDValue Extend;
8676   if (SignedConv) {
8677     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8678     EVT ExtVT = Src.getValueType();
8679     if (Subtarget.hasP9Altivec())
8680       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8681                                IntermediateVT.getVectorNumElements());
8682 
8683     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8684                          DAG.getValueType(ExtVT));
8685   } else
8686     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8687 
8688   if (IsStrict)
8689     return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8690                        {Op.getOperand(0), Extend}, Flags);
8691 
8692   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8693 }
8694 
8695 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8696                                           SelectionDAG &DAG) const {
8697   SDLoc dl(Op);
8698   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8699                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8700   bool IsStrict = Op->isStrictFPOpcode();
8701   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8702   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8703 
8704   // TODO: Any other flags to propagate?
8705   SDNodeFlags Flags;
8706   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8707 
8708   EVT InVT = Src.getValueType();
8709   EVT OutVT = Op.getValueType();
8710   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8711       isOperationCustom(Op.getOpcode(), InVT))
8712     return LowerINT_TO_FPVector(Op, DAG, dl);
8713 
8714   // Conversions to f128 are legal.
8715   if (Op.getValueType() == MVT::f128)
8716     return Op;
8717 
8718   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8719   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8720     return SDValue();
8721 
8722   if (Src.getValueType() == MVT::i1) {
8723     SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8724                               DAG.getConstantFP(1.0, dl, Op.getValueType()),
8725                               DAG.getConstantFP(0.0, dl, Op.getValueType()));
8726     if (IsStrict)
8727       return DAG.getMergeValues({Sel, Chain}, dl);
8728     else
8729       return Sel;
8730   }
8731 
8732   // If we have direct moves, we can do all the conversion, skip the store/load
8733   // however, without FPCVT we can't do most conversions.
8734   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8735       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8736     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8737 
8738   assert((IsSigned || Subtarget.hasFPCVT()) &&
8739          "UINT_TO_FP is supported only with FPCVT");
8740 
8741   if (Src.getValueType() == MVT::i64) {
8742     SDValue SINT = Src;
8743     // When converting to single-precision, we actually need to convert
8744     // to double-precision first and then round to single-precision.
8745     // To avoid double-rounding effects during that operation, we have
8746     // to prepare the input operand.  Bits that might be truncated when
8747     // converting to double-precision are replaced by a bit that won't
8748     // be lost at this stage, but is below the single-precision rounding
8749     // position.
8750     //
8751     // However, if -enable-unsafe-fp-math is in effect, accept double
8752     // rounding to avoid the extra overhead.
8753     if (Op.getValueType() == MVT::f32 &&
8754         !Subtarget.hasFPCVT() &&
8755         !DAG.getTarget().Options.UnsafeFPMath) {
8756 
8757       // Twiddle input to make sure the low 11 bits are zero.  (If this
8758       // is the case, we are guaranteed the value will fit into the 53 bit
8759       // mantissa of an IEEE double-precision value without rounding.)
8760       // If any of those low 11 bits were not zero originally, make sure
8761       // bit 12 (value 2048) is set instead, so that the final rounding
8762       // to single-precision gets the correct result.
8763       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8764                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8765       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8766                           Round, DAG.getConstant(2047, dl, MVT::i64));
8767       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8768       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8769                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8770 
8771       // However, we cannot use that value unconditionally: if the magnitude
8772       // of the input value is small, the bit-twiddling we did above might
8773       // end up visibly changing the output.  Fortunately, in that case, we
8774       // don't need to twiddle bits since the original input will convert
8775       // exactly to double-precision floating-point already.  Therefore,
8776       // construct a conditional to use the original value if the top 11
8777       // bits are all sign-bit copies, and use the rounded value computed
8778       // above otherwise.
8779       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8780                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8781       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8782                          Cond, DAG.getConstant(1, dl, MVT::i64));
8783       Cond = DAG.getSetCC(
8784           dl,
8785           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8786           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8787 
8788       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8789     }
8790 
8791     ReuseLoadInfo RLI;
8792     SDValue Bits;
8793 
8794     MachineFunction &MF = DAG.getMachineFunction();
8795     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8796       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8797                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8798       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8799     } else if (Subtarget.hasLFIWAX() &&
8800                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8801       MachineMemOperand *MMO =
8802         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8803                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8804       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8805       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8806                                      DAG.getVTList(MVT::f64, MVT::Other),
8807                                      Ops, MVT::i32, MMO);
8808       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8809     } else if (Subtarget.hasFPCVT() &&
8810                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8811       MachineMemOperand *MMO =
8812         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8813                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8814       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8815       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8816                                      DAG.getVTList(MVT::f64, MVT::Other),
8817                                      Ops, MVT::i32, MMO);
8818       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8819     } else if (((Subtarget.hasLFIWAX() &&
8820                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8821                 (Subtarget.hasFPCVT() &&
8822                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8823                SINT.getOperand(0).getValueType() == MVT::i32) {
8824       MachineFrameInfo &MFI = MF.getFrameInfo();
8825       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8826 
8827       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8828       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8829 
8830       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8831                                    MachinePointerInfo::getFixedStack(
8832                                        DAG.getMachineFunction(), FrameIdx));
8833       Chain = Store;
8834 
8835       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8836              "Expected an i32 store");
8837 
8838       RLI.Ptr = FIdx;
8839       RLI.Chain = Chain;
8840       RLI.MPI =
8841           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8842       RLI.Alignment = Align(4);
8843 
8844       MachineMemOperand *MMO =
8845         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8846                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8847       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8848       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8849                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8850                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8851                                      Ops, MVT::i32, MMO);
8852       Chain = Bits.getValue(1);
8853     } else
8854       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8855 
8856     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8857     if (IsStrict)
8858       Chain = FP.getValue(1);
8859 
8860     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8861       if (IsStrict)
8862         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8863                          DAG.getVTList(MVT::f32, MVT::Other),
8864                          {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8865       else
8866         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8867                          DAG.getIntPtrConstant(0, dl));
8868     }
8869     return FP;
8870   }
8871 
8872   assert(Src.getValueType() == MVT::i32 &&
8873          "Unhandled INT_TO_FP type in custom expander!");
8874   // Since we only generate this in 64-bit mode, we can take advantage of
8875   // 64-bit registers.  In particular, sign extend the input value into the
8876   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8877   // then lfd it and fcfid it.
8878   MachineFunction &MF = DAG.getMachineFunction();
8879   MachineFrameInfo &MFI = MF.getFrameInfo();
8880   EVT PtrVT = getPointerTy(MF.getDataLayout());
8881 
8882   SDValue Ld;
8883   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8884     ReuseLoadInfo RLI;
8885     bool ReusingLoad;
8886     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8887       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8888       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8889 
8890       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8891                                    MachinePointerInfo::getFixedStack(
8892                                        DAG.getMachineFunction(), FrameIdx));
8893       Chain = Store;
8894 
8895       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8896              "Expected an i32 store");
8897 
8898       RLI.Ptr = FIdx;
8899       RLI.Chain = Chain;
8900       RLI.MPI =
8901           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8902       RLI.Alignment = Align(4);
8903     }
8904 
8905     MachineMemOperand *MMO =
8906       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8907                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8908     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8909     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8910                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8911                                  MVT::i32, MMO);
8912     Chain = Ld.getValue(1);
8913     if (ReusingLoad)
8914       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8915   } else {
8916     assert(Subtarget.isPPC64() &&
8917            "i32->FP without LFIWAX supported only on PPC64");
8918 
8919     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8920     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8921 
8922     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8923 
8924     // STD the extended value into the stack slot.
8925     SDValue Store = DAG.getStore(
8926         Chain, dl, Ext64, FIdx,
8927         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8928     Chain = Store;
8929 
8930     // Load the value as a double.
8931     Ld = DAG.getLoad(
8932         MVT::f64, dl, Chain, FIdx,
8933         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8934     Chain = Ld.getValue(1);
8935   }
8936 
8937   // FCFID it and return it.
8938   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8939   if (IsStrict)
8940     Chain = FP.getValue(1);
8941   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8942     if (IsStrict)
8943       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8944                        DAG.getVTList(MVT::f32, MVT::Other),
8945                        {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8946     else
8947       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8948                        DAG.getIntPtrConstant(0, dl));
8949   }
8950   return FP;
8951 }
8952 
8953 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8954                                             SelectionDAG &DAG) const {
8955   SDLoc dl(Op);
8956   /*
8957    The rounding mode is in bits 30:31 of FPSR, and has the following
8958    settings:
8959      00 Round to nearest
8960      01 Round to 0
8961      10 Round to +inf
8962      11 Round to -inf
8963 
8964   FLT_ROUNDS, on the other hand, expects the following:
8965     -1 Undefined
8966      0 Round to 0
8967      1 Round to nearest
8968      2 Round to +inf
8969      3 Round to -inf
8970 
8971   To perform the conversion, we do:
8972     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8973   */
8974 
8975   MachineFunction &MF = DAG.getMachineFunction();
8976   EVT VT = Op.getValueType();
8977   EVT PtrVT = getPointerTy(MF.getDataLayout());
8978 
8979   // Save FP Control Word to register
8980   SDValue Chain = Op.getOperand(0);
8981   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8982   Chain = MFFS.getValue(1);
8983 
8984   SDValue CWD;
8985   if (isTypeLegal(MVT::i64)) {
8986     CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8987                       DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8988   } else {
8989     // Save FP register to stack slot
8990     int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8991     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8992     Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8993 
8994     // Load FP Control Word from low 32 bits of stack slot.
8995     assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
8996            "Stack slot adjustment is valid only on big endian subtargets!");
8997     SDValue Four = DAG.getConstant(4, dl, PtrVT);
8998     SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8999     CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
9000     Chain = CWD.getValue(1);
9001   }
9002 
9003   // Transform as necessary
9004   SDValue CWD1 =
9005     DAG.getNode(ISD::AND, dl, MVT::i32,
9006                 CWD, DAG.getConstant(3, dl, MVT::i32));
9007   SDValue CWD2 =
9008     DAG.getNode(ISD::SRL, dl, MVT::i32,
9009                 DAG.getNode(ISD::AND, dl, MVT::i32,
9010                             DAG.getNode(ISD::XOR, dl, MVT::i32,
9011                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
9012                             DAG.getConstant(3, dl, MVT::i32)),
9013                 DAG.getConstant(1, dl, MVT::i32));
9014 
9015   SDValue RetVal =
9016     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
9017 
9018   RetVal =
9019       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
9020                   dl, VT, RetVal);
9021 
9022   return DAG.getMergeValues({RetVal, Chain}, dl);
9023 }
9024 
9025 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
9026   EVT VT = Op.getValueType();
9027   unsigned BitWidth = VT.getSizeInBits();
9028   SDLoc dl(Op);
9029   assert(Op.getNumOperands() == 3 &&
9030          VT == Op.getOperand(1).getValueType() &&
9031          "Unexpected SHL!");
9032 
9033   // Expand into a bunch of logical ops.  Note that these ops
9034   // depend on the PPC behavior for oversized shift amounts.
9035   SDValue Lo = Op.getOperand(0);
9036   SDValue Hi = Op.getOperand(1);
9037   SDValue Amt = Op.getOperand(2);
9038   EVT AmtVT = Amt.getValueType();
9039 
9040   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9041                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9042   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
9043   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
9044   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
9045   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9046                              DAG.getConstant(-BitWidth, dl, AmtVT));
9047   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
9048   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
9049   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
9050   SDValue OutOps[] = { OutLo, OutHi };
9051   return DAG.getMergeValues(OutOps, dl);
9052 }
9053 
9054 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
9055   EVT VT = Op.getValueType();
9056   SDLoc dl(Op);
9057   unsigned BitWidth = VT.getSizeInBits();
9058   assert(Op.getNumOperands() == 3 &&
9059          VT == Op.getOperand(1).getValueType() &&
9060          "Unexpected SRL!");
9061 
9062   // Expand into a bunch of logical ops.  Note that these ops
9063   // depend on the PPC behavior for oversized shift amounts.
9064   SDValue Lo = Op.getOperand(0);
9065   SDValue Hi = Op.getOperand(1);
9066   SDValue Amt = Op.getOperand(2);
9067   EVT AmtVT = Amt.getValueType();
9068 
9069   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9070                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9071   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
9072   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
9073   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
9074   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9075                              DAG.getConstant(-BitWidth, dl, AmtVT));
9076   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
9077   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
9078   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
9079   SDValue OutOps[] = { OutLo, OutHi };
9080   return DAG.getMergeValues(OutOps, dl);
9081 }
9082 
9083 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
9084   SDLoc dl(Op);
9085   EVT VT = Op.getValueType();
9086   unsigned BitWidth = VT.getSizeInBits();
9087   assert(Op.getNumOperands() == 3 &&
9088          VT == Op.getOperand(1).getValueType() &&
9089          "Unexpected SRA!");
9090 
9091   // Expand into a bunch of logical ops, followed by a select_cc.
9092   SDValue Lo = Op.getOperand(0);
9093   SDValue Hi = Op.getOperand(1);
9094   SDValue Amt = Op.getOperand(2);
9095   EVT AmtVT = Amt.getValueType();
9096 
9097   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
9098                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
9099   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
9100   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
9101   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
9102   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
9103                              DAG.getConstant(-BitWidth, dl, AmtVT));
9104   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
9105   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
9106   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
9107                                   Tmp4, Tmp6, ISD::SETLE);
9108   SDValue OutOps[] = { OutLo, OutHi };
9109   return DAG.getMergeValues(OutOps, dl);
9110 }
9111 
9112 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
9113                                             SelectionDAG &DAG) const {
9114   SDLoc dl(Op);
9115   EVT VT = Op.getValueType();
9116   unsigned BitWidth = VT.getSizeInBits();
9117 
9118   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
9119   SDValue X = Op.getOperand(0);
9120   SDValue Y = Op.getOperand(1);
9121   SDValue Z = Op.getOperand(2);
9122   EVT AmtVT = Z.getValueType();
9123 
9124   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
9125   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
9126   // This is simpler than TargetLowering::expandFunnelShift because we can rely
9127   // on PowerPC shift by BW being well defined.
9128   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
9129                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
9130   SDValue SubZ =
9131       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
9132   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
9133   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
9134   return DAG.getNode(ISD::OR, dl, VT, X, Y);
9135 }
9136 
9137 //===----------------------------------------------------------------------===//
9138 // Vector related lowering.
9139 //
9140 
9141 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
9142 /// element size of SplatSize. Cast the result to VT.
9143 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
9144                                       SelectionDAG &DAG, const SDLoc &dl) {
9145   static const MVT VTys[] = { // canonical VT to use for each size.
9146     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
9147   };
9148 
9149   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
9150 
9151   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
9152   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
9153     SplatSize = 1;
9154     Val = 0xFF;
9155   }
9156 
9157   EVT CanonicalVT = VTys[SplatSize-1];
9158 
9159   // Build a canonical splat for this value.
9160   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
9161 }
9162 
9163 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9164 /// specified intrinsic ID.
9165 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
9166                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
9167   if (DestVT == MVT::Other) DestVT = Op.getValueType();
9168   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9169                      DAG.getConstant(IID, dl, MVT::i32), Op);
9170 }
9171 
9172 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9173 /// specified intrinsic ID.
9174 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9175                                 SelectionDAG &DAG, const SDLoc &dl,
9176                                 EVT DestVT = MVT::Other) {
9177   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9178   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9179                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9180 }
9181 
9182 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9183 /// specified intrinsic ID.
9184 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9185                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9186                                 EVT DestVT = MVT::Other) {
9187   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9188   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9189                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9190 }
9191 
9192 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9193 /// amount.  The result has the specified value type.
9194 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9195                            SelectionDAG &DAG, const SDLoc &dl) {
9196   // Force LHS/RHS to be the right type.
9197   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9198   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9199 
9200   int Ops[16];
9201   for (unsigned i = 0; i != 16; ++i)
9202     Ops[i] = i + Amt;
9203   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9204   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9205 }
9206 
9207 /// Do we have an efficient pattern in a .td file for this node?
9208 ///
9209 /// \param V - pointer to the BuildVectorSDNode being matched
9210 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9211 ///
9212 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9213 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9214 /// the opposite is true (expansion is beneficial) are:
9215 /// - The node builds a vector out of integers that are not 32 or 64-bits
9216 /// - The node builds a vector out of constants
9217 /// - The node is a "load-and-splat"
9218 /// In all other cases, we will choose to keep the BUILD_VECTOR.
9219 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9220                                             bool HasDirectMove,
9221                                             bool HasP8Vector) {
9222   EVT VecVT = V->getValueType(0);
9223   bool RightType = VecVT == MVT::v2f64 ||
9224     (HasP8Vector && VecVT == MVT::v4f32) ||
9225     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9226   if (!RightType)
9227     return false;
9228 
9229   bool IsSplat = true;
9230   bool IsLoad = false;
9231   SDValue Op0 = V->getOperand(0);
9232 
9233   // This function is called in a block that confirms the node is not a constant
9234   // splat. So a constant BUILD_VECTOR here means the vector is built out of
9235   // different constants.
9236   if (V->isConstant())
9237     return false;
9238   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9239     if (V->getOperand(i).isUndef())
9240       return false;
9241     // We want to expand nodes that represent load-and-splat even if the
9242     // loaded value is a floating point truncation or conversion to int.
9243     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9244         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9245          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9246         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9247          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9248         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9249          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9250       IsLoad = true;
9251     // If the operands are different or the input is not a load and has more
9252     // uses than just this BV node, then it isn't a splat.
9253     if (V->getOperand(i) != Op0 ||
9254         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9255       IsSplat = false;
9256   }
9257   return !(IsSplat && IsLoad);
9258 }
9259 
9260 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9261 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9262 
9263   SDLoc dl(Op);
9264   SDValue Op0 = Op->getOperand(0);
9265 
9266   if ((Op.getValueType() != MVT::f128) ||
9267       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9268       (Op0.getOperand(0).getValueType() != MVT::i64) ||
9269       (Op0.getOperand(1).getValueType() != MVT::i64))
9270     return SDValue();
9271 
9272   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9273                      Op0.getOperand(1));
9274 }
9275 
9276 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
9277   const SDValue *InputLoad = &Op;
9278   if (InputLoad->getOpcode() == ISD::BITCAST)
9279     InputLoad = &InputLoad->getOperand(0);
9280   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9281       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
9282     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
9283     InputLoad = &InputLoad->getOperand(0);
9284   }
9285   if (InputLoad->getOpcode() != ISD::LOAD)
9286     return nullptr;
9287   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9288   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9289 }
9290 
9291 // Convert the argument APFloat to a single precision APFloat if there is no
9292 // loss in information during the conversion to single precision APFloat and the
9293 // resulting number is not a denormal number. Return true if successful.
9294 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9295   APFloat APFloatToConvert = ArgAPFloat;
9296   bool LosesInfo = true;
9297   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9298                            &LosesInfo);
9299   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9300   if (Success)
9301     ArgAPFloat = APFloatToConvert;
9302   return Success;
9303 }
9304 
9305 // Bitcast the argument APInt to a double and convert it to a single precision
9306 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9307 // argument if there is no loss in information during the conversion from
9308 // double to single precision APFloat and the resulting number is not a denormal
9309 // number. Return true if successful.
9310 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9311   double DpValue = ArgAPInt.bitsToDouble();
9312   APFloat APFloatDp(DpValue);
9313   bool Success = convertToNonDenormSingle(APFloatDp);
9314   if (Success)
9315     ArgAPInt = APFloatDp.bitcastToAPInt();
9316   return Success;
9317 }
9318 
9319 // If this is a case we can't handle, return null and let the default
9320 // expansion code take care of it.  If we CAN select this case, and if it
9321 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9322 // this case more efficiently than a constant pool load, lower it to the
9323 // sequence of ops that should be used.
9324 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9325                                              SelectionDAG &DAG) const {
9326   SDLoc dl(Op);
9327   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9328   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9329 
9330   // Check if this is a splat of a constant value.
9331   APInt APSplatBits, APSplatUndef;
9332   unsigned SplatBitSize;
9333   bool HasAnyUndefs;
9334   bool BVNIsConstantSplat =
9335       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9336                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9337 
9338   // If it is a splat of a double, check if we can shrink it to a 32 bit
9339   // non-denormal float which when converted back to double gives us the same
9340   // double. This is to exploit the XXSPLTIDP instruction.
9341   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9342       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9343       convertToNonDenormSingle(APSplatBits)) {
9344     SDValue SplatNode = DAG.getNode(
9345         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9346         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9347     return DAG.getBitcast(Op.getValueType(), SplatNode);
9348   }
9349 
9350   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9351 
9352     bool IsPermutedLoad = false;
9353     const SDValue *InputLoad =
9354         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9355     // Handle load-and-splat patterns as we have instructions that will do this
9356     // in one go.
9357     if (InputLoad && DAG.isSplatValue(Op, true)) {
9358       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9359 
9360       // We have handling for 4 and 8 byte elements.
9361       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9362 
9363       // Checking for a single use of this load, we have to check for vector
9364       // width (128 bits) / ElementSize uses (since each operand of the
9365       // BUILD_VECTOR is a separate use of the value.
9366       unsigned NumUsesOfInputLD = 128 / ElementSize;
9367       for (SDValue BVInOp : Op->ops())
9368         if (BVInOp.isUndef())
9369           NumUsesOfInputLD--;
9370       assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
9371       if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
9372           ((Subtarget.hasVSX() && ElementSize == 64) ||
9373            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9374         SDValue Ops[] = {
9375           LD->getChain(),    // Chain
9376           LD->getBasePtr(),  // Ptr
9377           DAG.getValueType(Op.getValueType()) // VT
9378         };
9379         SDValue LdSplt = DAG.getMemIntrinsicNode(
9380             PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
9381             Ops, LD->getMemoryVT(), LD->getMemOperand());
9382         // Replace all uses of the output chain of the original load with the
9383         // output chain of the new load.
9384         DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
9385                                       LdSplt.getValue(1));
9386         return LdSplt;
9387       }
9388     }
9389 
9390     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9391     // lowered to VSX instructions under certain conditions.
9392     // Without VSX, there is no pattern more efficient than expanding the node.
9393     if (Subtarget.hasVSX() &&
9394         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9395                                         Subtarget.hasP8Vector()))
9396       return Op;
9397     return SDValue();
9398   }
9399 
9400   uint64_t SplatBits = APSplatBits.getZExtValue();
9401   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9402   unsigned SplatSize = SplatBitSize / 8;
9403 
9404   // First, handle single instruction cases.
9405 
9406   // All zeros?
9407   if (SplatBits == 0) {
9408     // Canonicalize all zero vectors to be v4i32.
9409     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9410       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9411       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9412     }
9413     return Op;
9414   }
9415 
9416   // We have XXSPLTIW for constant splats four bytes wide.
9417   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9418   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9419   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9420   // turned into a 4-byte splat of 0xABABABAB.
9421   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9422     return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9423                                   Op.getValueType(), DAG, dl);
9424 
9425   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9426     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9427                                   dl);
9428 
9429   // We have XXSPLTIB for constant splats one byte wide.
9430   if (Subtarget.hasP9Vector() && SplatSize == 1)
9431     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9432                                   dl);
9433 
9434   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9435   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9436                     (32-SplatBitSize));
9437   if (SextVal >= -16 && SextVal <= 15)
9438     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9439                                   dl);
9440 
9441   // Two instruction sequences.
9442 
9443   // If this value is in the range [-32,30] and is even, use:
9444   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9445   // If this value is in the range [17,31] and is odd, use:
9446   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9447   // If this value is in the range [-31,-17] and is odd, use:
9448   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9449   // Note the last two are three-instruction sequences.
9450   if (SextVal >= -32 && SextVal <= 31) {
9451     // To avoid having these optimizations undone by constant folding,
9452     // we convert to a pseudo that will be expanded later into one of
9453     // the above forms.
9454     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9455     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9456               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9457     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9458     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9459     if (VT == Op.getValueType())
9460       return RetVal;
9461     else
9462       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9463   }
9464 
9465   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9466   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9467   // for fneg/fabs.
9468   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9469     // Make -1 and vspltisw -1:
9470     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9471 
9472     // Make the VSLW intrinsic, computing 0x8000_0000.
9473     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9474                                    OnesV, DAG, dl);
9475 
9476     // xor by OnesV to invert it.
9477     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9478     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9479   }
9480 
9481   // Check to see if this is a wide variety of vsplti*, binop self cases.
9482   static const signed char SplatCsts[] = {
9483     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9484     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9485   };
9486 
9487   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9488     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9489     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9490     int i = SplatCsts[idx];
9491 
9492     // Figure out what shift amount will be used by altivec if shifted by i in
9493     // this splat size.
9494     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9495 
9496     // vsplti + shl self.
9497     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9498       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9499       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9500         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9501         Intrinsic::ppc_altivec_vslw
9502       };
9503       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9504       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9505     }
9506 
9507     // vsplti + srl self.
9508     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9509       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9510       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9511         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9512         Intrinsic::ppc_altivec_vsrw
9513       };
9514       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9515       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9516     }
9517 
9518     // vsplti + sra self.
9519     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9520       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9521       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9522         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9523         Intrinsic::ppc_altivec_vsraw
9524       };
9525       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9526       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9527     }
9528 
9529     // vsplti + rol self.
9530     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9531                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9532       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9533       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9534         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9535         Intrinsic::ppc_altivec_vrlw
9536       };
9537       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9538       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9539     }
9540 
9541     // t = vsplti c, result = vsldoi t, t, 1
9542     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9543       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9544       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9545       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9546     }
9547     // t = vsplti c, result = vsldoi t, t, 2
9548     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9549       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9550       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9551       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9552     }
9553     // t = vsplti c, result = vsldoi t, t, 3
9554     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9555       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9556       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9557       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9558     }
9559   }
9560 
9561   return SDValue();
9562 }
9563 
9564 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9565 /// the specified operations to build the shuffle.
9566 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9567                                       SDValue RHS, SelectionDAG &DAG,
9568                                       const SDLoc &dl) {
9569   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9570   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9571   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9572 
9573   enum {
9574     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9575     OP_VMRGHW,
9576     OP_VMRGLW,
9577     OP_VSPLTISW0,
9578     OP_VSPLTISW1,
9579     OP_VSPLTISW2,
9580     OP_VSPLTISW3,
9581     OP_VSLDOI4,
9582     OP_VSLDOI8,
9583     OP_VSLDOI12
9584   };
9585 
9586   if (OpNum == OP_COPY) {
9587     if (LHSID == (1*9+2)*9+3) return LHS;
9588     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9589     return RHS;
9590   }
9591 
9592   SDValue OpLHS, OpRHS;
9593   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9594   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9595 
9596   int ShufIdxs[16];
9597   switch (OpNum) {
9598   default: llvm_unreachable("Unknown i32 permute!");
9599   case OP_VMRGHW:
9600     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9601     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9602     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9603     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9604     break;
9605   case OP_VMRGLW:
9606     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9607     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9608     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9609     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9610     break;
9611   case OP_VSPLTISW0:
9612     for (unsigned i = 0; i != 16; ++i)
9613       ShufIdxs[i] = (i&3)+0;
9614     break;
9615   case OP_VSPLTISW1:
9616     for (unsigned i = 0; i != 16; ++i)
9617       ShufIdxs[i] = (i&3)+4;
9618     break;
9619   case OP_VSPLTISW2:
9620     for (unsigned i = 0; i != 16; ++i)
9621       ShufIdxs[i] = (i&3)+8;
9622     break;
9623   case OP_VSPLTISW3:
9624     for (unsigned i = 0; i != 16; ++i)
9625       ShufIdxs[i] = (i&3)+12;
9626     break;
9627   case OP_VSLDOI4:
9628     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9629   case OP_VSLDOI8:
9630     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9631   case OP_VSLDOI12:
9632     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9633   }
9634   EVT VT = OpLHS.getValueType();
9635   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9636   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9637   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9638   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9639 }
9640 
9641 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9642 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9643 /// SDValue.
9644 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9645                                            SelectionDAG &DAG) const {
9646   const unsigned BytesInVector = 16;
9647   bool IsLE = Subtarget.isLittleEndian();
9648   SDLoc dl(N);
9649   SDValue V1 = N->getOperand(0);
9650   SDValue V2 = N->getOperand(1);
9651   unsigned ShiftElts = 0, InsertAtByte = 0;
9652   bool Swap = false;
9653 
9654   // Shifts required to get the byte we want at element 7.
9655   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9656                                    0, 15, 14, 13, 12, 11, 10, 9};
9657   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9658                                 1, 2,  3,  4,  5,  6,  7,  8};
9659 
9660   ArrayRef<int> Mask = N->getMask();
9661   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9662 
9663   // For each mask element, find out if we're just inserting something
9664   // from V2 into V1 or vice versa.
9665   // Possible permutations inserting an element from V2 into V1:
9666   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9667   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9668   //   ...
9669   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9670   // Inserting from V1 into V2 will be similar, except mask range will be
9671   // [16,31].
9672 
9673   bool FoundCandidate = false;
9674   // If both vector operands for the shuffle are the same vector, the mask
9675   // will contain only elements from the first one and the second one will be
9676   // undef.
9677   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9678   // Go through the mask of half-words to find an element that's being moved
9679   // from one vector to the other.
9680   for (unsigned i = 0; i < BytesInVector; ++i) {
9681     unsigned CurrentElement = Mask[i];
9682     // If 2nd operand is undefined, we should only look for element 7 in the
9683     // Mask.
9684     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9685       continue;
9686 
9687     bool OtherElementsInOrder = true;
9688     // Examine the other elements in the Mask to see if they're in original
9689     // order.
9690     for (unsigned j = 0; j < BytesInVector; ++j) {
9691       if (j == i)
9692         continue;
9693       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9694       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9695       // in which we always assume we're always picking from the 1st operand.
9696       int MaskOffset =
9697           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9698       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9699         OtherElementsInOrder = false;
9700         break;
9701       }
9702     }
9703     // If other elements are in original order, we record the number of shifts
9704     // we need to get the element we want into element 7. Also record which byte
9705     // in the vector we should insert into.
9706     if (OtherElementsInOrder) {
9707       // If 2nd operand is undefined, we assume no shifts and no swapping.
9708       if (V2.isUndef()) {
9709         ShiftElts = 0;
9710         Swap = false;
9711       } else {
9712         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9713         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9714                          : BigEndianShifts[CurrentElement & 0xF];
9715         Swap = CurrentElement < BytesInVector;
9716       }
9717       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9718       FoundCandidate = true;
9719       break;
9720     }
9721   }
9722 
9723   if (!FoundCandidate)
9724     return SDValue();
9725 
9726   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9727   // optionally with VECSHL if shift is required.
9728   if (Swap)
9729     std::swap(V1, V2);
9730   if (V2.isUndef())
9731     V2 = V1;
9732   if (ShiftElts) {
9733     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9734                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9735     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9736                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9737   }
9738   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9739                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9740 }
9741 
9742 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9743 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9744 /// SDValue.
9745 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9746                                            SelectionDAG &DAG) const {
9747   const unsigned NumHalfWords = 8;
9748   const unsigned BytesInVector = NumHalfWords * 2;
9749   // Check that the shuffle is on half-words.
9750   if (!isNByteElemShuffleMask(N, 2, 1))
9751     return SDValue();
9752 
9753   bool IsLE = Subtarget.isLittleEndian();
9754   SDLoc dl(N);
9755   SDValue V1 = N->getOperand(0);
9756   SDValue V2 = N->getOperand(1);
9757   unsigned ShiftElts = 0, InsertAtByte = 0;
9758   bool Swap = false;
9759 
9760   // Shifts required to get the half-word we want at element 3.
9761   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9762   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9763 
9764   uint32_t Mask = 0;
9765   uint32_t OriginalOrderLow = 0x1234567;
9766   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9767   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9768   // 32-bit space, only need 4-bit nibbles per element.
9769   for (unsigned i = 0; i < NumHalfWords; ++i) {
9770     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9771     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9772   }
9773 
9774   // For each mask element, find out if we're just inserting something
9775   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9776   // from V2 into V1:
9777   //   X, 1, 2, 3, 4, 5, 6, 7
9778   //   0, X, 2, 3, 4, 5, 6, 7
9779   //   0, 1, X, 3, 4, 5, 6, 7
9780   //   0, 1, 2, X, 4, 5, 6, 7
9781   //   0, 1, 2, 3, X, 5, 6, 7
9782   //   0, 1, 2, 3, 4, X, 6, 7
9783   //   0, 1, 2, 3, 4, 5, X, 7
9784   //   0, 1, 2, 3, 4, 5, 6, X
9785   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9786 
9787   bool FoundCandidate = false;
9788   // Go through the mask of half-words to find an element that's being moved
9789   // from one vector to the other.
9790   for (unsigned i = 0; i < NumHalfWords; ++i) {
9791     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9792     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9793     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9794     uint32_t TargetOrder = 0x0;
9795 
9796     // If both vector operands for the shuffle are the same vector, the mask
9797     // will contain only elements from the first one and the second one will be
9798     // undef.
9799     if (V2.isUndef()) {
9800       ShiftElts = 0;
9801       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9802       TargetOrder = OriginalOrderLow;
9803       Swap = false;
9804       // Skip if not the correct element or mask of other elements don't equal
9805       // to our expected order.
9806       if (MaskOneElt == VINSERTHSrcElem &&
9807           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9808         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9809         FoundCandidate = true;
9810         break;
9811       }
9812     } else { // If both operands are defined.
9813       // Target order is [8,15] if the current mask is between [0,7].
9814       TargetOrder =
9815           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9816       // Skip if mask of other elements don't equal our expected order.
9817       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9818         // We only need the last 3 bits for the number of shifts.
9819         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9820                          : BigEndianShifts[MaskOneElt & 0x7];
9821         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9822         Swap = MaskOneElt < NumHalfWords;
9823         FoundCandidate = true;
9824         break;
9825       }
9826     }
9827   }
9828 
9829   if (!FoundCandidate)
9830     return SDValue();
9831 
9832   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9833   // optionally with VECSHL if shift is required.
9834   if (Swap)
9835     std::swap(V1, V2);
9836   if (V2.isUndef())
9837     V2 = V1;
9838   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9839   if (ShiftElts) {
9840     // Double ShiftElts because we're left shifting on v16i8 type.
9841     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9842                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9843     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9844     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9845                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9846     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9847   }
9848   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9849   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9850                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9851   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9852 }
9853 
9854 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9855 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9856 /// return the default SDValue.
9857 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9858                                               SelectionDAG &DAG) const {
9859   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9860   // to v16i8. Peek through the bitcasts to get the actual operands.
9861   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9862   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9863 
9864   auto ShuffleMask = SVN->getMask();
9865   SDValue VecShuffle(SVN, 0);
9866   SDLoc DL(SVN);
9867 
9868   // Check that we have a four byte shuffle.
9869   if (!isNByteElemShuffleMask(SVN, 4, 1))
9870     return SDValue();
9871 
9872   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9873   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9874     std::swap(LHS, RHS);
9875     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9876     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9877   }
9878 
9879   // Ensure that the RHS is a vector of constants.
9880   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9881   if (!BVN)
9882     return SDValue();
9883 
9884   // Check if RHS is a splat of 4-bytes (or smaller).
9885   APInt APSplatValue, APSplatUndef;
9886   unsigned SplatBitSize;
9887   bool HasAnyUndefs;
9888   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9889                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9890       SplatBitSize > 32)
9891     return SDValue();
9892 
9893   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9894   // The instruction splats a constant C into two words of the source vector
9895   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9896   // Thus we check that the shuffle mask is the equivalent  of
9897   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9898   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9899   // within each word are consecutive, so we only need to check the first byte.
9900   SDValue Index;
9901   bool IsLE = Subtarget.isLittleEndian();
9902   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9903       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9904        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9905     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9906   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9907            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9908             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9909     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9910   else
9911     return SDValue();
9912 
9913   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9914   // for XXSPLTI32DX.
9915   unsigned SplatVal = APSplatValue.getZExtValue();
9916   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9917     SplatVal |= (SplatVal << SplatBitSize);
9918 
9919   SDValue SplatNode = DAG.getNode(
9920       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9921       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9922   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9923 }
9924 
9925 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9926 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9927 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9928 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9929 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9930   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9931   assert(Op.getValueType() == MVT::v1i128 &&
9932          "Only set v1i128 as custom, other type shouldn't reach here!");
9933   SDLoc dl(Op);
9934   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9935   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9936   unsigned SHLAmt = N1.getConstantOperandVal(0);
9937   if (SHLAmt % 8 == 0) {
9938     SmallVector<int, 16> Mask(16, 0);
9939     std::iota(Mask.begin(), Mask.end(), 0);
9940     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9941     if (SDValue Shuffle =
9942             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9943                                  DAG.getUNDEF(MVT::v16i8), Mask))
9944       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9945   }
9946   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9947   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9948                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9949   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9950                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9951   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9952   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9953 }
9954 
9955 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9956 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9957 /// return the code it can be lowered into.  Worst case, it can always be
9958 /// lowered into a vperm.
9959 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9960                                                SelectionDAG &DAG) const {
9961   SDLoc dl(Op);
9962   SDValue V1 = Op.getOperand(0);
9963   SDValue V2 = Op.getOperand(1);
9964   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9965 
9966   // Any nodes that were combined in the target-independent combiner prior
9967   // to vector legalization will not be sent to the target combine. Try to
9968   // combine it here.
9969   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9970     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9971       return NewShuffle;
9972     Op = NewShuffle;
9973     SVOp = cast<ShuffleVectorSDNode>(Op);
9974     V1 = Op.getOperand(0);
9975     V2 = Op.getOperand(1);
9976   }
9977   EVT VT = Op.getValueType();
9978   bool isLittleEndian = Subtarget.isLittleEndian();
9979 
9980   unsigned ShiftElts, InsertAtByte;
9981   bool Swap = false;
9982 
9983   // If this is a load-and-splat, we can do that with a single instruction
9984   // in some cases. However if the load has multiple uses, we don't want to
9985   // combine it because that will just produce multiple loads.
9986   bool IsPermutedLoad = false;
9987   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9988   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9989       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9990       InputLoad->hasOneUse()) {
9991     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9992     int SplatIdx =
9993       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9994 
9995     // The splat index for permuted loads will be in the left half of the vector
9996     // which is strictly wider than the loaded value by 8 bytes. So we need to
9997     // adjust the splat index to point to the correct address in memory.
9998     if (IsPermutedLoad) {
9999       assert(isLittleEndian && "Unexpected permuted load on big endian target");
10000       SplatIdx += IsFourByte ? 2 : 1;
10001       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
10002              "Splat of a value outside of the loaded memory");
10003     }
10004 
10005     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
10006     // For 4-byte load-and-splat, we need Power9.
10007     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
10008       uint64_t Offset = 0;
10009       if (IsFourByte)
10010         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
10011       else
10012         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
10013 
10014       SDValue BasePtr = LD->getBasePtr();
10015       if (Offset != 0)
10016         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
10017                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
10018       SDValue Ops[] = {
10019         LD->getChain(),    // Chain
10020         BasePtr,           // BasePtr
10021         DAG.getValueType(Op.getValueType()) // VT
10022       };
10023       SDVTList VTL =
10024         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
10025       SDValue LdSplt =
10026         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
10027                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
10028       DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
10029       if (LdSplt.getValueType() != SVOp->getValueType(0))
10030         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
10031       return LdSplt;
10032     }
10033   }
10034   if (Subtarget.hasP9Vector() &&
10035       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
10036                            isLittleEndian)) {
10037     if (Swap)
10038       std::swap(V1, V2);
10039     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10040     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
10041     if (ShiftElts) {
10042       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
10043                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
10044       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
10045                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10046       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
10047     }
10048     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
10049                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
10050     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
10051   }
10052 
10053   if (Subtarget.hasPrefixInstrs()) {
10054     SDValue SplatInsertNode;
10055     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
10056       return SplatInsertNode;
10057   }
10058 
10059   if (Subtarget.hasP9Altivec()) {
10060     SDValue NewISDNode;
10061     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
10062       return NewISDNode;
10063 
10064     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
10065       return NewISDNode;
10066   }
10067 
10068   if (Subtarget.hasVSX() &&
10069       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10070     if (Swap)
10071       std::swap(V1, V2);
10072     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10073     SDValue Conv2 =
10074         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
10075 
10076     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
10077                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10078     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10079   }
10080 
10081   if (Subtarget.hasVSX() &&
10082     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10083     if (Swap)
10084       std::swap(V1, V2);
10085     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10086     SDValue Conv2 =
10087         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10088 
10089     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10090                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10091     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10092   }
10093 
10094   if (Subtarget.hasP9Vector()) {
10095      if (PPC::isXXBRHShuffleMask(SVOp)) {
10096       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10097       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10098       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10099     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10100       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10101       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10102       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10103     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10104       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10105       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10106       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10107     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10108       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10109       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10110       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10111     }
10112   }
10113 
10114   if (Subtarget.hasVSX()) {
10115     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10116       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10117 
10118       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10119       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10120                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
10121       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10122     }
10123 
10124     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10125     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10126       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10127       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10128       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10129     }
10130   }
10131 
10132   // Cases that are handled by instructions that take permute immediates
10133   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10134   // selected by the instruction selector.
10135   if (V2.isUndef()) {
10136     if (PPC::isSplatShuffleMask(SVOp, 1) ||
10137         PPC::isSplatShuffleMask(SVOp, 2) ||
10138         PPC::isSplatShuffleMask(SVOp, 4) ||
10139         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10140         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10141         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10142         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10143         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10144         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10145         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10146         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10147         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10148         (Subtarget.hasP8Altivec() && (
10149          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10150          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10151          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10152       return Op;
10153     }
10154   }
10155 
10156   // Altivec has a variety of "shuffle immediates" that take two vector inputs
10157   // and produce a fixed permutation.  If any of these match, do not lower to
10158   // VPERM.
10159   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10160   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10161       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10162       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10163       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10164       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10165       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10166       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10167       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10168       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10169       (Subtarget.hasP8Altivec() && (
10170        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10171        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10172        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10173     return Op;
10174 
10175   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
10176   // perfect shuffle table to emit an optimal matching sequence.
10177   ArrayRef<int> PermMask = SVOp->getMask();
10178 
10179   unsigned PFIndexes[4];
10180   bool isFourElementShuffle = true;
10181   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
10182     unsigned EltNo = 8;   // Start out undef.
10183     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
10184       if (PermMask[i*4+j] < 0)
10185         continue;   // Undef, ignore it.
10186 
10187       unsigned ByteSource = PermMask[i*4+j];
10188       if ((ByteSource & 3) != j) {
10189         isFourElementShuffle = false;
10190         break;
10191       }
10192 
10193       if (EltNo == 8) {
10194         EltNo = ByteSource/4;
10195       } else if (EltNo != ByteSource/4) {
10196         isFourElementShuffle = false;
10197         break;
10198       }
10199     }
10200     PFIndexes[i] = EltNo;
10201   }
10202 
10203   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10204   // perfect shuffle vector to determine if it is cost effective to do this as
10205   // discrete instructions, or whether we should use a vperm.
10206   // For now, we skip this for little endian until such time as we have a
10207   // little-endian perfect shuffle table.
10208   if (isFourElementShuffle && !isLittleEndian) {
10209     // Compute the index in the perfect shuffle table.
10210     unsigned PFTableIndex =
10211       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10212 
10213     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10214     unsigned Cost  = (PFEntry >> 30);
10215 
10216     // Determining when to avoid vperm is tricky.  Many things affect the cost
10217     // of vperm, particularly how many times the perm mask needs to be computed.
10218     // For example, if the perm mask can be hoisted out of a loop or is already
10219     // used (perhaps because there are multiple permutes with the same shuffle
10220     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
10221     // the loop requires an extra register.
10222     //
10223     // As a compromise, we only emit discrete instructions if the shuffle can be
10224     // generated in 3 or fewer operations.  When we have loop information
10225     // available, if this block is within a loop, we should avoid using vperm
10226     // for 3-operation perms and use a constant pool load instead.
10227     if (Cost < 3)
10228       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10229   }
10230 
10231   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10232   // vector that will get spilled to the constant pool.
10233   if (V2.isUndef()) V2 = V1;
10234 
10235   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10236   // that it is in input element units, not in bytes.  Convert now.
10237 
10238   // For little endian, the order of the input vectors is reversed, and
10239   // the permutation mask is complemented with respect to 31.  This is
10240   // necessary to produce proper semantics with the big-endian-biased vperm
10241   // instruction.
10242   EVT EltVT = V1.getValueType().getVectorElementType();
10243   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10244 
10245   SmallVector<SDValue, 16> ResultMask;
10246   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10247     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10248 
10249     for (unsigned j = 0; j != BytesPerElement; ++j)
10250       if (isLittleEndian)
10251         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10252                                              dl, MVT::i32));
10253       else
10254         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10255                                              MVT::i32));
10256   }
10257 
10258   ShufflesHandledWithVPERM++;
10259   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10260   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10261   LLVM_DEBUG(SVOp->dump());
10262   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10263   LLVM_DEBUG(VPermMask.dump());
10264 
10265   if (isLittleEndian)
10266     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10267                        V2, V1, VPermMask);
10268   else
10269     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10270                        V1, V2, VPermMask);
10271 }
10272 
10273 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10274 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10275 /// information about the intrinsic.
10276 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10277                                  bool &isDot, const PPCSubtarget &Subtarget) {
10278   unsigned IntrinsicID =
10279       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10280   CompareOpc = -1;
10281   isDot = false;
10282   switch (IntrinsicID) {
10283   default:
10284     return false;
10285   // Comparison predicates.
10286   case Intrinsic::ppc_altivec_vcmpbfp_p:
10287     CompareOpc = 966;
10288     isDot = true;
10289     break;
10290   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10291     CompareOpc = 198;
10292     isDot = true;
10293     break;
10294   case Intrinsic::ppc_altivec_vcmpequb_p:
10295     CompareOpc = 6;
10296     isDot = true;
10297     break;
10298   case Intrinsic::ppc_altivec_vcmpequh_p:
10299     CompareOpc = 70;
10300     isDot = true;
10301     break;
10302   case Intrinsic::ppc_altivec_vcmpequw_p:
10303     CompareOpc = 134;
10304     isDot = true;
10305     break;
10306   case Intrinsic::ppc_altivec_vcmpequd_p:
10307     if (Subtarget.hasP8Altivec()) {
10308       CompareOpc = 199;
10309       isDot = true;
10310     } else
10311       return false;
10312     break;
10313   case Intrinsic::ppc_altivec_vcmpneb_p:
10314   case Intrinsic::ppc_altivec_vcmpneh_p:
10315   case Intrinsic::ppc_altivec_vcmpnew_p:
10316   case Intrinsic::ppc_altivec_vcmpnezb_p:
10317   case Intrinsic::ppc_altivec_vcmpnezh_p:
10318   case Intrinsic::ppc_altivec_vcmpnezw_p:
10319     if (Subtarget.hasP9Altivec()) {
10320       switch (IntrinsicID) {
10321       default:
10322         llvm_unreachable("Unknown comparison intrinsic.");
10323       case Intrinsic::ppc_altivec_vcmpneb_p:
10324         CompareOpc = 7;
10325         break;
10326       case Intrinsic::ppc_altivec_vcmpneh_p:
10327         CompareOpc = 71;
10328         break;
10329       case Intrinsic::ppc_altivec_vcmpnew_p:
10330         CompareOpc = 135;
10331         break;
10332       case Intrinsic::ppc_altivec_vcmpnezb_p:
10333         CompareOpc = 263;
10334         break;
10335       case Intrinsic::ppc_altivec_vcmpnezh_p:
10336         CompareOpc = 327;
10337         break;
10338       case Intrinsic::ppc_altivec_vcmpnezw_p:
10339         CompareOpc = 391;
10340         break;
10341       }
10342       isDot = true;
10343     } else
10344       return false;
10345     break;
10346   case Intrinsic::ppc_altivec_vcmpgefp_p:
10347     CompareOpc = 454;
10348     isDot = true;
10349     break;
10350   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10351     CompareOpc = 710;
10352     isDot = true;
10353     break;
10354   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10355     CompareOpc = 774;
10356     isDot = true;
10357     break;
10358   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10359     CompareOpc = 838;
10360     isDot = true;
10361     break;
10362   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10363     CompareOpc = 902;
10364     isDot = true;
10365     break;
10366   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10367     if (Subtarget.hasP8Altivec()) {
10368       CompareOpc = 967;
10369       isDot = true;
10370     } else
10371       return false;
10372     break;
10373   case Intrinsic::ppc_altivec_vcmpgtub_p:
10374     CompareOpc = 518;
10375     isDot = true;
10376     break;
10377   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10378     CompareOpc = 582;
10379     isDot = true;
10380     break;
10381   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10382     CompareOpc = 646;
10383     isDot = true;
10384     break;
10385   case Intrinsic::ppc_altivec_vcmpgtud_p:
10386     if (Subtarget.hasP8Altivec()) {
10387       CompareOpc = 711;
10388       isDot = true;
10389     } else
10390       return false;
10391     break;
10392 
10393   case Intrinsic::ppc_altivec_vcmpequq:
10394   case Intrinsic::ppc_altivec_vcmpgtsq:
10395   case Intrinsic::ppc_altivec_vcmpgtuq:
10396     if (!Subtarget.isISA3_1())
10397       return false;
10398     switch (IntrinsicID) {
10399     default:
10400       llvm_unreachable("Unknown comparison intrinsic.");
10401     case Intrinsic::ppc_altivec_vcmpequq:
10402       CompareOpc = 455;
10403       break;
10404     case Intrinsic::ppc_altivec_vcmpgtsq:
10405       CompareOpc = 903;
10406       break;
10407     case Intrinsic::ppc_altivec_vcmpgtuq:
10408       CompareOpc = 647;
10409       break;
10410     }
10411     break;
10412 
10413   // VSX predicate comparisons use the same infrastructure
10414   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10415   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10416   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10417   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10418   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10419   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10420     if (Subtarget.hasVSX()) {
10421       switch (IntrinsicID) {
10422       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10423         CompareOpc = 99;
10424         break;
10425       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10426         CompareOpc = 115;
10427         break;
10428       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10429         CompareOpc = 107;
10430         break;
10431       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10432         CompareOpc = 67;
10433         break;
10434       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10435         CompareOpc = 83;
10436         break;
10437       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10438         CompareOpc = 75;
10439         break;
10440       }
10441       isDot = true;
10442     } else
10443       return false;
10444     break;
10445 
10446   // Normal Comparisons.
10447   case Intrinsic::ppc_altivec_vcmpbfp:
10448     CompareOpc = 966;
10449     break;
10450   case Intrinsic::ppc_altivec_vcmpeqfp:
10451     CompareOpc = 198;
10452     break;
10453   case Intrinsic::ppc_altivec_vcmpequb:
10454     CompareOpc = 6;
10455     break;
10456   case Intrinsic::ppc_altivec_vcmpequh:
10457     CompareOpc = 70;
10458     break;
10459   case Intrinsic::ppc_altivec_vcmpequw:
10460     CompareOpc = 134;
10461     break;
10462   case Intrinsic::ppc_altivec_vcmpequd:
10463     if (Subtarget.hasP8Altivec())
10464       CompareOpc = 199;
10465     else
10466       return false;
10467     break;
10468   case Intrinsic::ppc_altivec_vcmpneb:
10469   case Intrinsic::ppc_altivec_vcmpneh:
10470   case Intrinsic::ppc_altivec_vcmpnew:
10471   case Intrinsic::ppc_altivec_vcmpnezb:
10472   case Intrinsic::ppc_altivec_vcmpnezh:
10473   case Intrinsic::ppc_altivec_vcmpnezw:
10474     if (Subtarget.hasP9Altivec())
10475       switch (IntrinsicID) {
10476       default:
10477         llvm_unreachable("Unknown comparison intrinsic.");
10478       case Intrinsic::ppc_altivec_vcmpneb:
10479         CompareOpc = 7;
10480         break;
10481       case Intrinsic::ppc_altivec_vcmpneh:
10482         CompareOpc = 71;
10483         break;
10484       case Intrinsic::ppc_altivec_vcmpnew:
10485         CompareOpc = 135;
10486         break;
10487       case Intrinsic::ppc_altivec_vcmpnezb:
10488         CompareOpc = 263;
10489         break;
10490       case Intrinsic::ppc_altivec_vcmpnezh:
10491         CompareOpc = 327;
10492         break;
10493       case Intrinsic::ppc_altivec_vcmpnezw:
10494         CompareOpc = 391;
10495         break;
10496       }
10497     else
10498       return false;
10499     break;
10500   case Intrinsic::ppc_altivec_vcmpgefp:
10501     CompareOpc = 454;
10502     break;
10503   case Intrinsic::ppc_altivec_vcmpgtfp:
10504     CompareOpc = 710;
10505     break;
10506   case Intrinsic::ppc_altivec_vcmpgtsb:
10507     CompareOpc = 774;
10508     break;
10509   case Intrinsic::ppc_altivec_vcmpgtsh:
10510     CompareOpc = 838;
10511     break;
10512   case Intrinsic::ppc_altivec_vcmpgtsw:
10513     CompareOpc = 902;
10514     break;
10515   case Intrinsic::ppc_altivec_vcmpgtsd:
10516     if (Subtarget.hasP8Altivec())
10517       CompareOpc = 967;
10518     else
10519       return false;
10520     break;
10521   case Intrinsic::ppc_altivec_vcmpgtub:
10522     CompareOpc = 518;
10523     break;
10524   case Intrinsic::ppc_altivec_vcmpgtuh:
10525     CompareOpc = 582;
10526     break;
10527   case Intrinsic::ppc_altivec_vcmpgtuw:
10528     CompareOpc = 646;
10529     break;
10530   case Intrinsic::ppc_altivec_vcmpgtud:
10531     if (Subtarget.hasP8Altivec())
10532       CompareOpc = 711;
10533     else
10534       return false;
10535     break;
10536   case Intrinsic::ppc_altivec_vcmpequq_p:
10537   case Intrinsic::ppc_altivec_vcmpgtsq_p:
10538   case Intrinsic::ppc_altivec_vcmpgtuq_p:
10539     if (!Subtarget.isISA3_1())
10540       return false;
10541     switch (IntrinsicID) {
10542     default:
10543       llvm_unreachable("Unknown comparison intrinsic.");
10544     case Intrinsic::ppc_altivec_vcmpequq_p:
10545       CompareOpc = 455;
10546       break;
10547     case Intrinsic::ppc_altivec_vcmpgtsq_p:
10548       CompareOpc = 903;
10549       break;
10550     case Intrinsic::ppc_altivec_vcmpgtuq_p:
10551       CompareOpc = 647;
10552       break;
10553     }
10554     isDot = true;
10555     break;
10556   }
10557   return true;
10558 }
10559 
10560 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10561 /// lower, do it, otherwise return null.
10562 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10563                                                    SelectionDAG &DAG) const {
10564   unsigned IntrinsicID =
10565     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10566 
10567   SDLoc dl(Op);
10568 
10569   switch (IntrinsicID) {
10570   case Intrinsic::thread_pointer:
10571     // Reads the thread pointer register, used for __builtin_thread_pointer.
10572     if (Subtarget.isPPC64())
10573       return DAG.getRegister(PPC::X13, MVT::i64);
10574     return DAG.getRegister(PPC::R2, MVT::i32);
10575 
10576   case Intrinsic::ppc_mma_disassemble_acc:
10577   case Intrinsic::ppc_mma_disassemble_pair: {
10578     int NumVecs = 2;
10579     SDValue WideVec = Op.getOperand(1);
10580     if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10581       NumVecs = 4;
10582       WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10583     }
10584     SmallVector<SDValue, 4> RetOps;
10585     for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10586       SDValue Extract = DAG.getNode(
10587           PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10588           DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10589                                                      : VecNo,
10590                           dl, MVT::i64));
10591       RetOps.push_back(Extract);
10592     }
10593     return DAG.getMergeValues(RetOps, dl);
10594   }
10595   }
10596 
10597   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10598   // opcode number of the comparison.
10599   int CompareOpc;
10600   bool isDot;
10601   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10602     return SDValue();    // Don't custom lower most intrinsics.
10603 
10604   // If this is a non-dot comparison, make the VCMP node and we are done.
10605   if (!isDot) {
10606     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10607                               Op.getOperand(1), Op.getOperand(2),
10608                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10609     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10610   }
10611 
10612   // Create the PPCISD altivec 'dot' comparison node.
10613   SDValue Ops[] = {
10614     Op.getOperand(2),  // LHS
10615     Op.getOperand(3),  // RHS
10616     DAG.getConstant(CompareOpc, dl, MVT::i32)
10617   };
10618   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10619   SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10620 
10621   // Now that we have the comparison, emit a copy from the CR to a GPR.
10622   // This is flagged to the above dot comparison.
10623   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10624                                 DAG.getRegister(PPC::CR6, MVT::i32),
10625                                 CompNode.getValue(1));
10626 
10627   // Unpack the result based on how the target uses it.
10628   unsigned BitNo;   // Bit # of CR6.
10629   bool InvertBit;   // Invert result?
10630   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10631   default:  // Can't happen, don't crash on invalid number though.
10632   case 0:   // Return the value of the EQ bit of CR6.
10633     BitNo = 0; InvertBit = false;
10634     break;
10635   case 1:   // Return the inverted value of the EQ bit of CR6.
10636     BitNo = 0; InvertBit = true;
10637     break;
10638   case 2:   // Return the value of the LT bit of CR6.
10639     BitNo = 2; InvertBit = false;
10640     break;
10641   case 3:   // Return the inverted value of the LT bit of CR6.
10642     BitNo = 2; InvertBit = true;
10643     break;
10644   }
10645 
10646   // Shift the bit into the low position.
10647   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10648                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10649   // Isolate the bit.
10650   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10651                       DAG.getConstant(1, dl, MVT::i32));
10652 
10653   // If we are supposed to, toggle the bit.
10654   if (InvertBit)
10655     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10656                         DAG.getConstant(1, dl, MVT::i32));
10657   return Flags;
10658 }
10659 
10660 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10661                                                SelectionDAG &DAG) const {
10662   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10663   // the beginning of the argument list.
10664   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10665   SDLoc DL(Op);
10666   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10667   case Intrinsic::ppc_cfence: {
10668     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10669     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10670     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10671                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10672                                                   Op.getOperand(ArgStart + 1)),
10673                                       Op.getOperand(0)),
10674                    0);
10675   }
10676   default:
10677     break;
10678   }
10679   return SDValue();
10680 }
10681 
10682 // Lower scalar BSWAP64 to xxbrd.
10683 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10684   SDLoc dl(Op);
10685   // MTVSRDD
10686   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10687                    Op.getOperand(0));
10688   // XXBRD
10689   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10690   // MFVSRD
10691   int VectorIndex = 0;
10692   if (Subtarget.isLittleEndian())
10693     VectorIndex = 1;
10694   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10695                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10696   return Op;
10697 }
10698 
10699 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10700 // compared to a value that is atomically loaded (atomic loads zero-extend).
10701 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10702                                                 SelectionDAG &DAG) const {
10703   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10704          "Expecting an atomic compare-and-swap here.");
10705   SDLoc dl(Op);
10706   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10707   EVT MemVT = AtomicNode->getMemoryVT();
10708   if (MemVT.getSizeInBits() >= 32)
10709     return Op;
10710 
10711   SDValue CmpOp = Op.getOperand(2);
10712   // If this is already correctly zero-extended, leave it alone.
10713   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10714   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10715     return Op;
10716 
10717   // Clear the high bits of the compare operand.
10718   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10719   SDValue NewCmpOp =
10720     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10721                 DAG.getConstant(MaskVal, dl, MVT::i32));
10722 
10723   // Replace the existing compare operand with the properly zero-extended one.
10724   SmallVector<SDValue, 4> Ops;
10725   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10726     Ops.push_back(AtomicNode->getOperand(i));
10727   Ops[2] = NewCmpOp;
10728   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10729   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10730   auto NodeTy =
10731     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10732   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10733 }
10734 
10735 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10736                                                  SelectionDAG &DAG) const {
10737   SDLoc dl(Op);
10738   // Create a stack slot that is 16-byte aligned.
10739   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10740   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10741   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10742   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10743 
10744   // Store the input value into Value#0 of the stack slot.
10745   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10746                                MachinePointerInfo());
10747   // Load it out.
10748   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10749 }
10750 
10751 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10752                                                   SelectionDAG &DAG) const {
10753   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10754          "Should only be called for ISD::INSERT_VECTOR_ELT");
10755 
10756   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10757   // We have legal lowering for constant indices but not for variable ones.
10758   if (!C)
10759     return SDValue();
10760 
10761   EVT VT = Op.getValueType();
10762   SDLoc dl(Op);
10763   SDValue V1 = Op.getOperand(0);
10764   SDValue V2 = Op.getOperand(1);
10765   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10766   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10767     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10768     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10769     unsigned InsertAtElement = C->getZExtValue();
10770     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10771     if (Subtarget.isLittleEndian()) {
10772       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10773     }
10774     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10775                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10776   }
10777   return Op;
10778 }
10779 
10780 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10781                                            SelectionDAG &DAG) const {
10782   SDLoc dl(Op);
10783   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10784   SDValue LoadChain = LN->getChain();
10785   SDValue BasePtr = LN->getBasePtr();
10786   EVT VT = Op.getValueType();
10787 
10788   if (VT != MVT::v256i1 && VT != MVT::v512i1)
10789     return Op;
10790 
10791   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10792   // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10793   // 2 or 4 vsx registers.
10794   assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
10795          "Type unsupported without MMA");
10796   assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10797          "Type unsupported without paired vector support");
10798   Align Alignment = LN->getAlign();
10799   SmallVector<SDValue, 4> Loads;
10800   SmallVector<SDValue, 4> LoadChains;
10801   unsigned NumVecs = VT.getSizeInBits() / 128;
10802   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10803     SDValue Load =
10804         DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10805                     LN->getPointerInfo().getWithOffset(Idx * 16),
10806                     commonAlignment(Alignment, Idx * 16),
10807                     LN->getMemOperand()->getFlags(), LN->getAAInfo());
10808     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10809                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10810     Loads.push_back(Load);
10811     LoadChains.push_back(Load.getValue(1));
10812   }
10813   if (Subtarget.isLittleEndian()) {
10814     std::reverse(Loads.begin(), Loads.end());
10815     std::reverse(LoadChains.begin(), LoadChains.end());
10816   }
10817   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10818   SDValue Value =
10819       DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10820                   dl, VT, Loads);
10821   SDValue RetOps[] = {Value, TF};
10822   return DAG.getMergeValues(RetOps, dl);
10823 }
10824 
10825 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10826                                             SelectionDAG &DAG) const {
10827   SDLoc dl(Op);
10828   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10829   SDValue StoreChain = SN->getChain();
10830   SDValue BasePtr = SN->getBasePtr();
10831   SDValue Value = SN->getValue();
10832   EVT StoreVT = Value.getValueType();
10833 
10834   if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10835     return Op;
10836 
10837   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10838   // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10839   // underlying registers individually.
10840   assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
10841          "Type unsupported without MMA");
10842   assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10843          "Type unsupported without paired vector support");
10844   Align Alignment = SN->getAlign();
10845   SmallVector<SDValue, 4> Stores;
10846   unsigned NumVecs = 2;
10847   if (StoreVT == MVT::v512i1) {
10848     Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10849     NumVecs = 4;
10850   }
10851   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10852     unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10853     SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10854                               DAG.getConstant(VecNum, dl, MVT::i64));
10855     SDValue Store =
10856         DAG.getStore(StoreChain, dl, Elt, BasePtr,
10857                      SN->getPointerInfo().getWithOffset(Idx * 16),
10858                      commonAlignment(Alignment, Idx * 16),
10859                      SN->getMemOperand()->getFlags(), SN->getAAInfo());
10860     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10861                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10862     Stores.push_back(Store);
10863   }
10864   SDValue TF = DAG.getTokenFactor(dl, Stores);
10865   return TF;
10866 }
10867 
10868 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10869   SDLoc dl(Op);
10870   if (Op.getValueType() == MVT::v4i32) {
10871     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10872 
10873     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10874     // +16 as shift amt.
10875     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10876     SDValue RHSSwap =   // = vrlw RHS, 16
10877       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10878 
10879     // Shrinkify inputs to v8i16.
10880     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10881     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10882     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10883 
10884     // Low parts multiplied together, generating 32-bit results (we ignore the
10885     // top parts).
10886     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10887                                         LHS, RHS, DAG, dl, MVT::v4i32);
10888 
10889     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10890                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10891     // Shift the high parts up 16 bits.
10892     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10893                               Neg16, DAG, dl);
10894     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10895   } else if (Op.getValueType() == MVT::v16i8) {
10896     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10897     bool isLittleEndian = Subtarget.isLittleEndian();
10898 
10899     // Multiply the even 8-bit parts, producing 16-bit sums.
10900     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10901                                            LHS, RHS, DAG, dl, MVT::v8i16);
10902     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10903 
10904     // Multiply the odd 8-bit parts, producing 16-bit sums.
10905     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10906                                           LHS, RHS, DAG, dl, MVT::v8i16);
10907     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10908 
10909     // Merge the results together.  Because vmuleub and vmuloub are
10910     // instructions with a big-endian bias, we must reverse the
10911     // element numbering and reverse the meaning of "odd" and "even"
10912     // when generating little endian code.
10913     int Ops[16];
10914     for (unsigned i = 0; i != 8; ++i) {
10915       if (isLittleEndian) {
10916         Ops[i*2  ] = 2*i;
10917         Ops[i*2+1] = 2*i+16;
10918       } else {
10919         Ops[i*2  ] = 2*i+1;
10920         Ops[i*2+1] = 2*i+1+16;
10921       }
10922     }
10923     if (isLittleEndian)
10924       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10925     else
10926       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10927   } else {
10928     llvm_unreachable("Unknown mul to lower!");
10929   }
10930 }
10931 
10932 // Custom lowering for fpext vf32 to v2f64
10933 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10934 
10935   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10936          "Should only be called for ISD::FP_EXTEND");
10937 
10938   // FIXME: handle extends from half precision float vectors on P9.
10939   // We only want to custom lower an extend from v2f32 to v2f64.
10940   if (Op.getValueType() != MVT::v2f64 ||
10941       Op.getOperand(0).getValueType() != MVT::v2f32)
10942     return SDValue();
10943 
10944   SDLoc dl(Op);
10945   SDValue Op0 = Op.getOperand(0);
10946 
10947   switch (Op0.getOpcode()) {
10948   default:
10949     return SDValue();
10950   case ISD::EXTRACT_SUBVECTOR: {
10951     assert(Op0.getNumOperands() == 2 &&
10952            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10953            "Node should have 2 operands with second one being a constant!");
10954 
10955     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10956       return SDValue();
10957 
10958     // Custom lower is only done for high or low doubleword.
10959     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10960     if (Idx % 2 != 0)
10961       return SDValue();
10962 
10963     // Since input is v4f32, at this point Idx is either 0 or 2.
10964     // Shift to get the doubleword position we want.
10965     int DWord = Idx >> 1;
10966 
10967     // High and low word positions are different on little endian.
10968     if (Subtarget.isLittleEndian())
10969       DWord ^= 0x1;
10970 
10971     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10972                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10973   }
10974   case ISD::FADD:
10975   case ISD::FMUL:
10976   case ISD::FSUB: {
10977     SDValue NewLoad[2];
10978     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10979       // Ensure both input are loads.
10980       SDValue LdOp = Op0.getOperand(i);
10981       if (LdOp.getOpcode() != ISD::LOAD)
10982         return SDValue();
10983       // Generate new load node.
10984       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10985       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10986       NewLoad[i] = DAG.getMemIntrinsicNode(
10987           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10988           LD->getMemoryVT(), LD->getMemOperand());
10989     }
10990     SDValue NewOp =
10991         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10992                     NewLoad[1], Op0.getNode()->getFlags());
10993     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10994                        DAG.getConstant(0, dl, MVT::i32));
10995   }
10996   case ISD::LOAD: {
10997     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10998     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10999     SDValue NewLd = DAG.getMemIntrinsicNode(
11000         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11001         LD->getMemoryVT(), LD->getMemOperand());
11002     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11003                        DAG.getConstant(0, dl, MVT::i32));
11004   }
11005   }
11006   llvm_unreachable("ERROR:Should return for all cases within swtich.");
11007 }
11008 
11009 /// LowerOperation - Provide custom lowering hooks for some operations.
11010 ///
11011 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11012   switch (Op.getOpcode()) {
11013   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11014   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
11015   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
11016   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
11017   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
11018   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
11019   case ISD::SETCC:              return LowerSETCC(Op, DAG);
11020   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
11021   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
11022 
11023   // Variable argument lowering.
11024   case ISD::VASTART:            return LowerVASTART(Op, DAG);
11025   case ISD::VAARG:              return LowerVAARG(Op, DAG);
11026   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
11027 
11028   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
11029   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11030   case ISD::GET_DYNAMIC_AREA_OFFSET:
11031     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11032 
11033   // Exception handling lowering.
11034   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
11035   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
11036   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
11037 
11038   case ISD::LOAD:               return LowerLOAD(Op, DAG);
11039   case ISD::STORE:              return LowerSTORE(Op, DAG);
11040   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
11041   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
11042   case ISD::STRICT_FP_TO_UINT:
11043   case ISD::STRICT_FP_TO_SINT:
11044   case ISD::FP_TO_UINT:
11045   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11046   case ISD::STRICT_UINT_TO_FP:
11047   case ISD::STRICT_SINT_TO_FP:
11048   case ISD::UINT_TO_FP:
11049   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
11050   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
11051 
11052   // Lower 64-bit shifts.
11053   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
11054   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
11055   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
11056 
11057   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
11058   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
11059 
11060   // Vector-related lowering.
11061   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
11062   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
11063   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11064   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
11065   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
11066   case ISD::MUL:                return LowerMUL(Op, DAG);
11067   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
11068   case ISD::ROTL:               return LowerROTL(Op, DAG);
11069 
11070   // For counter-based loop handling.
11071   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
11072 
11073   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
11074 
11075   // Frame & Return address.
11076   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
11077   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
11078 
11079   case ISD::INTRINSIC_VOID:
11080     return LowerINTRINSIC_VOID(Op, DAG);
11081   case ISD::BSWAP:
11082     return LowerBSWAP(Op, DAG);
11083   case ISD::ATOMIC_CMP_SWAP:
11084     return LowerATOMIC_CMP_SWAP(Op, DAG);
11085   }
11086 }
11087 
11088 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
11089                                            SmallVectorImpl<SDValue>&Results,
11090                                            SelectionDAG &DAG) const {
11091   SDLoc dl(N);
11092   switch (N->getOpcode()) {
11093   default:
11094     llvm_unreachable("Do not know how to custom type legalize this operation!");
11095   case ISD::READCYCLECOUNTER: {
11096     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11097     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11098 
11099     Results.push_back(
11100         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11101     Results.push_back(RTB.getValue(2));
11102     break;
11103   }
11104   case ISD::INTRINSIC_W_CHAIN: {
11105     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11106         Intrinsic::loop_decrement)
11107       break;
11108 
11109     assert(N->getValueType(0) == MVT::i1 &&
11110            "Unexpected result type for CTR decrement intrinsic");
11111     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11112                                  N->getValueType(0));
11113     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11114     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11115                                  N->getOperand(1));
11116 
11117     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11118     Results.push_back(NewInt.getValue(1));
11119     break;
11120   }
11121   case ISD::VAARG: {
11122     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11123       return;
11124 
11125     EVT VT = N->getValueType(0);
11126 
11127     if (VT == MVT::i64) {
11128       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11129 
11130       Results.push_back(NewNode);
11131       Results.push_back(NewNode.getValue(1));
11132     }
11133     return;
11134   }
11135   case ISD::STRICT_FP_TO_SINT:
11136   case ISD::STRICT_FP_TO_UINT:
11137   case ISD::FP_TO_SINT:
11138   case ISD::FP_TO_UINT:
11139     // LowerFP_TO_INT() can only handle f32 and f64.
11140     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
11141         MVT::ppcf128)
11142       return;
11143     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11144     return;
11145   case ISD::TRUNCATE: {
11146     if (!N->getValueType(0).isVector())
11147       return;
11148     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
11149     if (Lowered)
11150       Results.push_back(Lowered);
11151     return;
11152   }
11153   case ISD::FSHL:
11154   case ISD::FSHR:
11155     // Don't handle funnel shifts here.
11156     return;
11157   case ISD::BITCAST:
11158     // Don't handle bitcast here.
11159     return;
11160   case ISD::FP_EXTEND:
11161     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11162     if (Lowered)
11163       Results.push_back(Lowered);
11164     return;
11165   }
11166 }
11167 
11168 //===----------------------------------------------------------------------===//
11169 //  Other Lowering Code
11170 //===----------------------------------------------------------------------===//
11171 
11172 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
11173   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11174   Function *Func = Intrinsic::getDeclaration(M, Id);
11175   return Builder.CreateCall(Func, {});
11176 }
11177 
11178 // The mappings for emitLeading/TrailingFence is taken from
11179 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11180 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
11181                                                  Instruction *Inst,
11182                                                  AtomicOrdering Ord) const {
11183   if (Ord == AtomicOrdering::SequentiallyConsistent)
11184     return callIntrinsic(Builder, Intrinsic::ppc_sync);
11185   if (isReleaseOrStronger(Ord))
11186     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11187   return nullptr;
11188 }
11189 
11190 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
11191                                                   Instruction *Inst,
11192                                                   AtomicOrdering Ord) const {
11193   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11194     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11195     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11196     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11197     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11198       return Builder.CreateCall(
11199           Intrinsic::getDeclaration(
11200               Builder.GetInsertBlock()->getParent()->getParent(),
11201               Intrinsic::ppc_cfence, {Inst->getType()}),
11202           {Inst});
11203     // FIXME: Can use isync for rmw operation.
11204     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11205   }
11206   return nullptr;
11207 }
11208 
11209 MachineBasicBlock *
11210 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11211                                     unsigned AtomicSize,
11212                                     unsigned BinOpcode,
11213                                     unsigned CmpOpcode,
11214                                     unsigned CmpPred) const {
11215   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11216   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11217 
11218   auto LoadMnemonic = PPC::LDARX;
11219   auto StoreMnemonic = PPC::STDCX;
11220   switch (AtomicSize) {
11221   default:
11222     llvm_unreachable("Unexpected size of atomic entity");
11223   case 1:
11224     LoadMnemonic = PPC::LBARX;
11225     StoreMnemonic = PPC::STBCX;
11226     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11227     break;
11228   case 2:
11229     LoadMnemonic = PPC::LHARX;
11230     StoreMnemonic = PPC::STHCX;
11231     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11232     break;
11233   case 4:
11234     LoadMnemonic = PPC::LWARX;
11235     StoreMnemonic = PPC::STWCX;
11236     break;
11237   case 8:
11238     LoadMnemonic = PPC::LDARX;
11239     StoreMnemonic = PPC::STDCX;
11240     break;
11241   }
11242 
11243   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11244   MachineFunction *F = BB->getParent();
11245   MachineFunction::iterator It = ++BB->getIterator();
11246 
11247   Register dest = MI.getOperand(0).getReg();
11248   Register ptrA = MI.getOperand(1).getReg();
11249   Register ptrB = MI.getOperand(2).getReg();
11250   Register incr = MI.getOperand(3).getReg();
11251   DebugLoc dl = MI.getDebugLoc();
11252 
11253   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11254   MachineBasicBlock *loop2MBB =
11255     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11256   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11257   F->insert(It, loopMBB);
11258   if (CmpOpcode)
11259     F->insert(It, loop2MBB);
11260   F->insert(It, exitMBB);
11261   exitMBB->splice(exitMBB->begin(), BB,
11262                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11263   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11264 
11265   MachineRegisterInfo &RegInfo = F->getRegInfo();
11266   Register TmpReg = (!BinOpcode) ? incr :
11267     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11268                                            : &PPC::GPRCRegClass);
11269 
11270   //  thisMBB:
11271   //   ...
11272   //   fallthrough --> loopMBB
11273   BB->addSuccessor(loopMBB);
11274 
11275   //  loopMBB:
11276   //   l[wd]arx dest, ptr
11277   //   add r0, dest, incr
11278   //   st[wd]cx. r0, ptr
11279   //   bne- loopMBB
11280   //   fallthrough --> exitMBB
11281 
11282   // For max/min...
11283   //  loopMBB:
11284   //   l[wd]arx dest, ptr
11285   //   cmpl?[wd] incr, dest
11286   //   bgt exitMBB
11287   //  loop2MBB:
11288   //   st[wd]cx. dest, ptr
11289   //   bne- loopMBB
11290   //   fallthrough --> exitMBB
11291 
11292   BB = loopMBB;
11293   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11294     .addReg(ptrA).addReg(ptrB);
11295   if (BinOpcode)
11296     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11297   if (CmpOpcode) {
11298     // Signed comparisons of byte or halfword values must be sign-extended.
11299     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11300       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11301       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11302               ExtReg).addReg(dest);
11303       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11304         .addReg(incr).addReg(ExtReg);
11305     } else
11306       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11307         .addReg(incr).addReg(dest);
11308 
11309     BuildMI(BB, dl, TII->get(PPC::BCC))
11310       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11311     BB->addSuccessor(loop2MBB);
11312     BB->addSuccessor(exitMBB);
11313     BB = loop2MBB;
11314   }
11315   BuildMI(BB, dl, TII->get(StoreMnemonic))
11316     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11317   BuildMI(BB, dl, TII->get(PPC::BCC))
11318     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11319   BB->addSuccessor(loopMBB);
11320   BB->addSuccessor(exitMBB);
11321 
11322   //  exitMBB:
11323   //   ...
11324   BB = exitMBB;
11325   return BB;
11326 }
11327 
11328 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11329     MachineInstr &MI, MachineBasicBlock *BB,
11330     bool is8bit, // operation
11331     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11332   // If we support part-word atomic mnemonics, just use them
11333   if (Subtarget.hasPartwordAtomics())
11334     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11335                             CmpPred);
11336 
11337   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11338   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11339   // In 64 bit mode we have to use 64 bits for addresses, even though the
11340   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11341   // registers without caring whether they're 32 or 64, but here we're
11342   // doing actual arithmetic on the addresses.
11343   bool is64bit = Subtarget.isPPC64();
11344   bool isLittleEndian = Subtarget.isLittleEndian();
11345   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11346 
11347   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11348   MachineFunction *F = BB->getParent();
11349   MachineFunction::iterator It = ++BB->getIterator();
11350 
11351   Register dest = MI.getOperand(0).getReg();
11352   Register ptrA = MI.getOperand(1).getReg();
11353   Register ptrB = MI.getOperand(2).getReg();
11354   Register incr = MI.getOperand(3).getReg();
11355   DebugLoc dl = MI.getDebugLoc();
11356 
11357   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11358   MachineBasicBlock *loop2MBB =
11359       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11360   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11361   F->insert(It, loopMBB);
11362   if (CmpOpcode)
11363     F->insert(It, loop2MBB);
11364   F->insert(It, exitMBB);
11365   exitMBB->splice(exitMBB->begin(), BB,
11366                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11367   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11368 
11369   MachineRegisterInfo &RegInfo = F->getRegInfo();
11370   const TargetRegisterClass *RC =
11371       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11372   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11373 
11374   Register PtrReg = RegInfo.createVirtualRegister(RC);
11375   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11376   Register ShiftReg =
11377       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11378   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11379   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11380   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11381   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11382   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11383   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11384   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11385   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11386   Register Ptr1Reg;
11387   Register TmpReg =
11388       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11389 
11390   //  thisMBB:
11391   //   ...
11392   //   fallthrough --> loopMBB
11393   BB->addSuccessor(loopMBB);
11394 
11395   // The 4-byte load must be aligned, while a char or short may be
11396   // anywhere in the word.  Hence all this nasty bookkeeping code.
11397   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11398   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11399   //   xori shift, shift1, 24 [16]
11400   //   rlwinm ptr, ptr1, 0, 0, 29
11401   //   slw incr2, incr, shift
11402   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11403   //   slw mask, mask2, shift
11404   //  loopMBB:
11405   //   lwarx tmpDest, ptr
11406   //   add tmp, tmpDest, incr2
11407   //   andc tmp2, tmpDest, mask
11408   //   and tmp3, tmp, mask
11409   //   or tmp4, tmp3, tmp2
11410   //   stwcx. tmp4, ptr
11411   //   bne- loopMBB
11412   //   fallthrough --> exitMBB
11413   //   srw dest, tmpDest, shift
11414   if (ptrA != ZeroReg) {
11415     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11416     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11417         .addReg(ptrA)
11418         .addReg(ptrB);
11419   } else {
11420     Ptr1Reg = ptrB;
11421   }
11422   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11423   // mode.
11424   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11425       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11426       .addImm(3)
11427       .addImm(27)
11428       .addImm(is8bit ? 28 : 27);
11429   if (!isLittleEndian)
11430     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11431         .addReg(Shift1Reg)
11432         .addImm(is8bit ? 24 : 16);
11433   if (is64bit)
11434     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11435         .addReg(Ptr1Reg)
11436         .addImm(0)
11437         .addImm(61);
11438   else
11439     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11440         .addReg(Ptr1Reg)
11441         .addImm(0)
11442         .addImm(0)
11443         .addImm(29);
11444   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11445   if (is8bit)
11446     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11447   else {
11448     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11449     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11450         .addReg(Mask3Reg)
11451         .addImm(65535);
11452   }
11453   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11454       .addReg(Mask2Reg)
11455       .addReg(ShiftReg);
11456 
11457   BB = loopMBB;
11458   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11459       .addReg(ZeroReg)
11460       .addReg(PtrReg);
11461   if (BinOpcode)
11462     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11463         .addReg(Incr2Reg)
11464         .addReg(TmpDestReg);
11465   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11466       .addReg(TmpDestReg)
11467       .addReg(MaskReg);
11468   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11469   if (CmpOpcode) {
11470     // For unsigned comparisons, we can directly compare the shifted values.
11471     // For signed comparisons we shift and sign extend.
11472     Register SReg = RegInfo.createVirtualRegister(GPRC);
11473     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11474         .addReg(TmpDestReg)
11475         .addReg(MaskReg);
11476     unsigned ValueReg = SReg;
11477     unsigned CmpReg = Incr2Reg;
11478     if (CmpOpcode == PPC::CMPW) {
11479       ValueReg = RegInfo.createVirtualRegister(GPRC);
11480       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11481           .addReg(SReg)
11482           .addReg(ShiftReg);
11483       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11484       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11485           .addReg(ValueReg);
11486       ValueReg = ValueSReg;
11487       CmpReg = incr;
11488     }
11489     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11490         .addReg(CmpReg)
11491         .addReg(ValueReg);
11492     BuildMI(BB, dl, TII->get(PPC::BCC))
11493         .addImm(CmpPred)
11494         .addReg(PPC::CR0)
11495         .addMBB(exitMBB);
11496     BB->addSuccessor(loop2MBB);
11497     BB->addSuccessor(exitMBB);
11498     BB = loop2MBB;
11499   }
11500   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11501   BuildMI(BB, dl, TII->get(PPC::STWCX))
11502       .addReg(Tmp4Reg)
11503       .addReg(ZeroReg)
11504       .addReg(PtrReg);
11505   BuildMI(BB, dl, TII->get(PPC::BCC))
11506       .addImm(PPC::PRED_NE)
11507       .addReg(PPC::CR0)
11508       .addMBB(loopMBB);
11509   BB->addSuccessor(loopMBB);
11510   BB->addSuccessor(exitMBB);
11511 
11512   //  exitMBB:
11513   //   ...
11514   BB = exitMBB;
11515   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11516       .addReg(TmpDestReg)
11517       .addReg(ShiftReg);
11518   return BB;
11519 }
11520 
11521 llvm::MachineBasicBlock *
11522 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11523                                     MachineBasicBlock *MBB) const {
11524   DebugLoc DL = MI.getDebugLoc();
11525   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11526   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11527 
11528   MachineFunction *MF = MBB->getParent();
11529   MachineRegisterInfo &MRI = MF->getRegInfo();
11530 
11531   const BasicBlock *BB = MBB->getBasicBlock();
11532   MachineFunction::iterator I = ++MBB->getIterator();
11533 
11534   Register DstReg = MI.getOperand(0).getReg();
11535   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11536   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11537   Register mainDstReg = MRI.createVirtualRegister(RC);
11538   Register restoreDstReg = MRI.createVirtualRegister(RC);
11539 
11540   MVT PVT = getPointerTy(MF->getDataLayout());
11541   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11542          "Invalid Pointer Size!");
11543   // For v = setjmp(buf), we generate
11544   //
11545   // thisMBB:
11546   //  SjLjSetup mainMBB
11547   //  bl mainMBB
11548   //  v_restore = 1
11549   //  b sinkMBB
11550   //
11551   // mainMBB:
11552   //  buf[LabelOffset] = LR
11553   //  v_main = 0
11554   //
11555   // sinkMBB:
11556   //  v = phi(main, restore)
11557   //
11558 
11559   MachineBasicBlock *thisMBB = MBB;
11560   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11561   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11562   MF->insert(I, mainMBB);
11563   MF->insert(I, sinkMBB);
11564 
11565   MachineInstrBuilder MIB;
11566 
11567   // Transfer the remainder of BB and its successor edges to sinkMBB.
11568   sinkMBB->splice(sinkMBB->begin(), MBB,
11569                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11570   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11571 
11572   // Note that the structure of the jmp_buf used here is not compatible
11573   // with that used by libc, and is not designed to be. Specifically, it
11574   // stores only those 'reserved' registers that LLVM does not otherwise
11575   // understand how to spill. Also, by convention, by the time this
11576   // intrinsic is called, Clang has already stored the frame address in the
11577   // first slot of the buffer and stack address in the third. Following the
11578   // X86 target code, we'll store the jump address in the second slot. We also
11579   // need to save the TOC pointer (R2) to handle jumps between shared
11580   // libraries, and that will be stored in the fourth slot. The thread
11581   // identifier (R13) is not affected.
11582 
11583   // thisMBB:
11584   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11585   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11586   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11587 
11588   // Prepare IP either in reg.
11589   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11590   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11591   Register BufReg = MI.getOperand(1).getReg();
11592 
11593   if (Subtarget.is64BitELFABI()) {
11594     setUsesTOCBasePtr(*MBB->getParent());
11595     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11596               .addReg(PPC::X2)
11597               .addImm(TOCOffset)
11598               .addReg(BufReg)
11599               .cloneMemRefs(MI);
11600   }
11601 
11602   // Naked functions never have a base pointer, and so we use r1. For all
11603   // other functions, this decision must be delayed until during PEI.
11604   unsigned BaseReg;
11605   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11606     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11607   else
11608     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11609 
11610   MIB = BuildMI(*thisMBB, MI, DL,
11611                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11612             .addReg(BaseReg)
11613             .addImm(BPOffset)
11614             .addReg(BufReg)
11615             .cloneMemRefs(MI);
11616 
11617   // Setup
11618   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11619   MIB.addRegMask(TRI->getNoPreservedMask());
11620 
11621   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11622 
11623   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11624           .addMBB(mainMBB);
11625   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11626 
11627   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11628   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11629 
11630   // mainMBB:
11631   //  mainDstReg = 0
11632   MIB =
11633       BuildMI(mainMBB, DL,
11634               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11635 
11636   // Store IP
11637   if (Subtarget.isPPC64()) {
11638     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11639             .addReg(LabelReg)
11640             .addImm(LabelOffset)
11641             .addReg(BufReg);
11642   } else {
11643     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11644             .addReg(LabelReg)
11645             .addImm(LabelOffset)
11646             .addReg(BufReg);
11647   }
11648   MIB.cloneMemRefs(MI);
11649 
11650   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11651   mainMBB->addSuccessor(sinkMBB);
11652 
11653   // sinkMBB:
11654   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11655           TII->get(PPC::PHI), DstReg)
11656     .addReg(mainDstReg).addMBB(mainMBB)
11657     .addReg(restoreDstReg).addMBB(thisMBB);
11658 
11659   MI.eraseFromParent();
11660   return sinkMBB;
11661 }
11662 
11663 MachineBasicBlock *
11664 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11665                                      MachineBasicBlock *MBB) const {
11666   DebugLoc DL = MI.getDebugLoc();
11667   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11668 
11669   MachineFunction *MF = MBB->getParent();
11670   MachineRegisterInfo &MRI = MF->getRegInfo();
11671 
11672   MVT PVT = getPointerTy(MF->getDataLayout());
11673   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11674          "Invalid Pointer Size!");
11675 
11676   const TargetRegisterClass *RC =
11677     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11678   Register Tmp = MRI.createVirtualRegister(RC);
11679   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11680   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11681   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11682   unsigned BP =
11683       (PVT == MVT::i64)
11684           ? PPC::X30
11685           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11686                                                               : PPC::R30);
11687 
11688   MachineInstrBuilder MIB;
11689 
11690   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11691   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11692   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11693   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11694 
11695   Register BufReg = MI.getOperand(0).getReg();
11696 
11697   // Reload FP (the jumped-to function may not have had a
11698   // frame pointer, and if so, then its r31 will be restored
11699   // as necessary).
11700   if (PVT == MVT::i64) {
11701     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11702             .addImm(0)
11703             .addReg(BufReg);
11704   } else {
11705     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11706             .addImm(0)
11707             .addReg(BufReg);
11708   }
11709   MIB.cloneMemRefs(MI);
11710 
11711   // Reload IP
11712   if (PVT == MVT::i64) {
11713     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11714             .addImm(LabelOffset)
11715             .addReg(BufReg);
11716   } else {
11717     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11718             .addImm(LabelOffset)
11719             .addReg(BufReg);
11720   }
11721   MIB.cloneMemRefs(MI);
11722 
11723   // Reload SP
11724   if (PVT == MVT::i64) {
11725     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11726             .addImm(SPOffset)
11727             .addReg(BufReg);
11728   } else {
11729     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11730             .addImm(SPOffset)
11731             .addReg(BufReg);
11732   }
11733   MIB.cloneMemRefs(MI);
11734 
11735   // Reload BP
11736   if (PVT == MVT::i64) {
11737     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11738             .addImm(BPOffset)
11739             .addReg(BufReg);
11740   } else {
11741     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11742             .addImm(BPOffset)
11743             .addReg(BufReg);
11744   }
11745   MIB.cloneMemRefs(MI);
11746 
11747   // Reload TOC
11748   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11749     setUsesTOCBasePtr(*MBB->getParent());
11750     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11751               .addImm(TOCOffset)
11752               .addReg(BufReg)
11753               .cloneMemRefs(MI);
11754   }
11755 
11756   // Jump
11757   BuildMI(*MBB, MI, DL,
11758           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11759   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11760 
11761   MI.eraseFromParent();
11762   return MBB;
11763 }
11764 
11765 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11766   // If the function specifically requests inline stack probes, emit them.
11767   if (MF.getFunction().hasFnAttribute("probe-stack"))
11768     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11769            "inline-asm";
11770   return false;
11771 }
11772 
11773 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11774   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11775   unsigned StackAlign = TFI->getStackAlignment();
11776   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11777          "Unexpected stack alignment");
11778   // The default stack probe size is 4096 if the function has no
11779   // stack-probe-size attribute.
11780   unsigned StackProbeSize = 4096;
11781   const Function &Fn = MF.getFunction();
11782   if (Fn.hasFnAttribute("stack-probe-size"))
11783     Fn.getFnAttribute("stack-probe-size")
11784         .getValueAsString()
11785         .getAsInteger(0, StackProbeSize);
11786   // Round down to the stack alignment.
11787   StackProbeSize &= ~(StackAlign - 1);
11788   return StackProbeSize ? StackProbeSize : StackAlign;
11789 }
11790 
11791 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11792 // into three phases. In the first phase, it uses pseudo instruction
11793 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11794 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11795 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11796 // MaxCallFrameSize so that it can calculate correct data area pointer.
11797 MachineBasicBlock *
11798 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11799                                     MachineBasicBlock *MBB) const {
11800   const bool isPPC64 = Subtarget.isPPC64();
11801   MachineFunction *MF = MBB->getParent();
11802   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11803   DebugLoc DL = MI.getDebugLoc();
11804   const unsigned ProbeSize = getStackProbeSize(*MF);
11805   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11806   MachineRegisterInfo &MRI = MF->getRegInfo();
11807   // The CFG of probing stack looks as
11808   //         +-----+
11809   //         | MBB |
11810   //         +--+--+
11811   //            |
11812   //       +----v----+
11813   //  +--->+ TestMBB +---+
11814   //  |    +----+----+   |
11815   //  |         |        |
11816   //  |   +-----v----+   |
11817   //  +---+ BlockMBB |   |
11818   //      +----------+   |
11819   //                     |
11820   //       +---------+   |
11821   //       | TailMBB +<--+
11822   //       +---------+
11823   // In MBB, calculate previous frame pointer and final stack pointer.
11824   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11825   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11826   // TailMBB is spliced via \p MI.
11827   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11828   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11829   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11830 
11831   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11832   MF->insert(MBBIter, TestMBB);
11833   MF->insert(MBBIter, BlockMBB);
11834   MF->insert(MBBIter, TailMBB);
11835 
11836   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11837   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11838 
11839   Register DstReg = MI.getOperand(0).getReg();
11840   Register NegSizeReg = MI.getOperand(1).getReg();
11841   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11842   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11843   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11844   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11845 
11846   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11847   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11848   // NegSize.
11849   unsigned ProbeOpc;
11850   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11851     ProbeOpc =
11852         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11853   else
11854     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11855     // and NegSizeReg will be allocated in the same phyreg to avoid
11856     // redundant copy when NegSizeReg has only one use which is current MI and
11857     // will be replaced by PREPARE_PROBED_ALLOCA then.
11858     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11859                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11860   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11861       .addDef(ActualNegSizeReg)
11862       .addReg(NegSizeReg)
11863       .add(MI.getOperand(2))
11864       .add(MI.getOperand(3));
11865 
11866   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11867   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11868           FinalStackPtr)
11869       .addReg(SPReg)
11870       .addReg(ActualNegSizeReg);
11871 
11872   // Materialize a scratch register for update.
11873   int64_t NegProbeSize = -(int64_t)ProbeSize;
11874   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11875   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11876   if (!isInt<16>(NegProbeSize)) {
11877     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11878     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11879         .addImm(NegProbeSize >> 16);
11880     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11881             ScratchReg)
11882         .addReg(TempReg)
11883         .addImm(NegProbeSize & 0xFFFF);
11884   } else
11885     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11886         .addImm(NegProbeSize);
11887 
11888   {
11889     // Probing leading residual part.
11890     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11891     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11892         .addReg(ActualNegSizeReg)
11893         .addReg(ScratchReg);
11894     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11895     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11896         .addReg(Div)
11897         .addReg(ScratchReg);
11898     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11899     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11900         .addReg(Mul)
11901         .addReg(ActualNegSizeReg);
11902     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11903         .addReg(FramePointer)
11904         .addReg(SPReg)
11905         .addReg(NegMod);
11906   }
11907 
11908   {
11909     // Remaining part should be multiple of ProbeSize.
11910     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11911     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11912         .addReg(SPReg)
11913         .addReg(FinalStackPtr);
11914     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11915         .addImm(PPC::PRED_EQ)
11916         .addReg(CmpResult)
11917         .addMBB(TailMBB);
11918     TestMBB->addSuccessor(BlockMBB);
11919     TestMBB->addSuccessor(TailMBB);
11920   }
11921 
11922   {
11923     // Touch the block.
11924     // |P...|P...|P...
11925     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11926         .addReg(FramePointer)
11927         .addReg(SPReg)
11928         .addReg(ScratchReg);
11929     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11930     BlockMBB->addSuccessor(TestMBB);
11931   }
11932 
11933   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11934   // DYNAREAOFFSET pseudo instruction to get the future result.
11935   Register MaxCallFrameSizeReg =
11936       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11937   BuildMI(TailMBB, DL,
11938           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11939           MaxCallFrameSizeReg)
11940       .add(MI.getOperand(2))
11941       .add(MI.getOperand(3));
11942   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11943       .addReg(SPReg)
11944       .addReg(MaxCallFrameSizeReg);
11945 
11946   // Splice instructions after MI to TailMBB.
11947   TailMBB->splice(TailMBB->end(), MBB,
11948                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11949   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11950   MBB->addSuccessor(TestMBB);
11951 
11952   // Delete the pseudo instruction.
11953   MI.eraseFromParent();
11954 
11955   ++NumDynamicAllocaProbed;
11956   return TailMBB;
11957 }
11958 
11959 MachineBasicBlock *
11960 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11961                                                MachineBasicBlock *BB) const {
11962   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11963       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11964     if (Subtarget.is64BitELFABI() &&
11965         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11966         !Subtarget.isUsingPCRelativeCalls()) {
11967       // Call lowering should have added an r2 operand to indicate a dependence
11968       // on the TOC base pointer value. It can't however, because there is no
11969       // way to mark the dependence as implicit there, and so the stackmap code
11970       // will confuse it with a regular operand. Instead, add the dependence
11971       // here.
11972       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11973     }
11974 
11975     return emitPatchPoint(MI, BB);
11976   }
11977 
11978   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11979       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11980     return emitEHSjLjSetJmp(MI, BB);
11981   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11982              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11983     return emitEHSjLjLongJmp(MI, BB);
11984   }
11985 
11986   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11987 
11988   // To "insert" these instructions we actually have to insert their
11989   // control-flow patterns.
11990   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11991   MachineFunction::iterator It = ++BB->getIterator();
11992 
11993   MachineFunction *F = BB->getParent();
11994 
11995   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11996       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11997       MI.getOpcode() == PPC::SELECT_I8) {
11998     SmallVector<MachineOperand, 2> Cond;
11999     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12000         MI.getOpcode() == PPC::SELECT_CC_I8)
12001       Cond.push_back(MI.getOperand(4));
12002     else
12003       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
12004     Cond.push_back(MI.getOperand(1));
12005 
12006     DebugLoc dl = MI.getDebugLoc();
12007     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12008                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12009   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
12010              MI.getOpcode() == PPC::SELECT_CC_F8 ||
12011              MI.getOpcode() == PPC::SELECT_CC_F16 ||
12012              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
12013              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
12014              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
12015              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
12016              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
12017              MI.getOpcode() == PPC::SELECT_CC_SPE ||
12018              MI.getOpcode() == PPC::SELECT_F4 ||
12019              MI.getOpcode() == PPC::SELECT_F8 ||
12020              MI.getOpcode() == PPC::SELECT_F16 ||
12021              MI.getOpcode() == PPC::SELECT_SPE ||
12022              MI.getOpcode() == PPC::SELECT_SPE4 ||
12023              MI.getOpcode() == PPC::SELECT_VRRC ||
12024              MI.getOpcode() == PPC::SELECT_VSFRC ||
12025              MI.getOpcode() == PPC::SELECT_VSSRC ||
12026              MI.getOpcode() == PPC::SELECT_VSRC) {
12027     // The incoming instruction knows the destination vreg to set, the
12028     // condition code register to branch on, the true/false values to
12029     // select between, and a branch opcode to use.
12030 
12031     //  thisMBB:
12032     //  ...
12033     //   TrueVal = ...
12034     //   cmpTY ccX, r1, r2
12035     //   bCC copy1MBB
12036     //   fallthrough --> copy0MBB
12037     MachineBasicBlock *thisMBB = BB;
12038     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12039     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12040     DebugLoc dl = MI.getDebugLoc();
12041     F->insert(It, copy0MBB);
12042     F->insert(It, sinkMBB);
12043 
12044     // Transfer the remainder of BB and its successor edges to sinkMBB.
12045     sinkMBB->splice(sinkMBB->begin(), BB,
12046                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12047     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12048 
12049     // Next, add the true and fallthrough blocks as its successors.
12050     BB->addSuccessor(copy0MBB);
12051     BB->addSuccessor(sinkMBB);
12052 
12053     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12054         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12055         MI.getOpcode() == PPC::SELECT_F16 ||
12056         MI.getOpcode() == PPC::SELECT_SPE4 ||
12057         MI.getOpcode() == PPC::SELECT_SPE ||
12058         MI.getOpcode() == PPC::SELECT_VRRC ||
12059         MI.getOpcode() == PPC::SELECT_VSFRC ||
12060         MI.getOpcode() == PPC::SELECT_VSSRC ||
12061         MI.getOpcode() == PPC::SELECT_VSRC) {
12062       BuildMI(BB, dl, TII->get(PPC::BC))
12063           .addReg(MI.getOperand(1).getReg())
12064           .addMBB(sinkMBB);
12065     } else {
12066       unsigned SelectPred = MI.getOperand(4).getImm();
12067       BuildMI(BB, dl, TII->get(PPC::BCC))
12068           .addImm(SelectPred)
12069           .addReg(MI.getOperand(1).getReg())
12070           .addMBB(sinkMBB);
12071     }
12072 
12073     //  copy0MBB:
12074     //   %FalseValue = ...
12075     //   # fallthrough to sinkMBB
12076     BB = copy0MBB;
12077 
12078     // Update machine-CFG edges
12079     BB->addSuccessor(sinkMBB);
12080 
12081     //  sinkMBB:
12082     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12083     //  ...
12084     BB = sinkMBB;
12085     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12086         .addReg(MI.getOperand(3).getReg())
12087         .addMBB(copy0MBB)
12088         .addReg(MI.getOperand(2).getReg())
12089         .addMBB(thisMBB);
12090   } else if (MI.getOpcode() == PPC::ReadTB) {
12091     // To read the 64-bit time-base register on a 32-bit target, we read the
12092     // two halves. Should the counter have wrapped while it was being read, we
12093     // need to try again.
12094     // ...
12095     // readLoop:
12096     // mfspr Rx,TBU # load from TBU
12097     // mfspr Ry,TB  # load from TB
12098     // mfspr Rz,TBU # load from TBU
12099     // cmpw crX,Rx,Rz # check if 'old'='new'
12100     // bne readLoop   # branch if they're not equal
12101     // ...
12102 
12103     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12104     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12105     DebugLoc dl = MI.getDebugLoc();
12106     F->insert(It, readMBB);
12107     F->insert(It, sinkMBB);
12108 
12109     // Transfer the remainder of BB and its successor edges to sinkMBB.
12110     sinkMBB->splice(sinkMBB->begin(), BB,
12111                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12112     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12113 
12114     BB->addSuccessor(readMBB);
12115     BB = readMBB;
12116 
12117     MachineRegisterInfo &RegInfo = F->getRegInfo();
12118     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12119     Register LoReg = MI.getOperand(0).getReg();
12120     Register HiReg = MI.getOperand(1).getReg();
12121 
12122     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12123     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12124     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12125 
12126     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12127 
12128     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12129         .addReg(HiReg)
12130         .addReg(ReadAgainReg);
12131     BuildMI(BB, dl, TII->get(PPC::BCC))
12132         .addImm(PPC::PRED_NE)
12133         .addReg(CmpReg)
12134         .addMBB(readMBB);
12135 
12136     BB->addSuccessor(readMBB);
12137     BB->addSuccessor(sinkMBB);
12138   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12139     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12140   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12141     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12142   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12143     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12144   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12145     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12146 
12147   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12148     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12149   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12150     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12151   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12152     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12153   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12154     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12155 
12156   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12157     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12158   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12159     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12160   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12161     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12162   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12163     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12164 
12165   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12166     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12167   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12168     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12169   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12170     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12171   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12172     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12173 
12174   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12175     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12176   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12177     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12178   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12179     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12180   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12181     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12182 
12183   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12184     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12185   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12186     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12187   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12188     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12189   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12190     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12191 
12192   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12193     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12194   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12195     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12196   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12197     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12198   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12199     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12200 
12201   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12202     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12203   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12204     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12205   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12206     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12207   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12208     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12209 
12210   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12211     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12212   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12213     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12214   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12215     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12216   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12217     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12218 
12219   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12220     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12221   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12222     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12223   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12224     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12225   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12226     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12227 
12228   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12229     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12230   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12231     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12232   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12233     BB = EmitAtomicBinary(MI, BB, 4, 0);
12234   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12235     BB = EmitAtomicBinary(MI, BB, 8, 0);
12236   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12237            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12238            (Subtarget.hasPartwordAtomics() &&
12239             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12240            (Subtarget.hasPartwordAtomics() &&
12241             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12242     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12243 
12244     auto LoadMnemonic = PPC::LDARX;
12245     auto StoreMnemonic = PPC::STDCX;
12246     switch (MI.getOpcode()) {
12247     default:
12248       llvm_unreachable("Compare and swap of unknown size");
12249     case PPC::ATOMIC_CMP_SWAP_I8:
12250       LoadMnemonic = PPC::LBARX;
12251       StoreMnemonic = PPC::STBCX;
12252       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12253       break;
12254     case PPC::ATOMIC_CMP_SWAP_I16:
12255       LoadMnemonic = PPC::LHARX;
12256       StoreMnemonic = PPC::STHCX;
12257       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12258       break;
12259     case PPC::ATOMIC_CMP_SWAP_I32:
12260       LoadMnemonic = PPC::LWARX;
12261       StoreMnemonic = PPC::STWCX;
12262       break;
12263     case PPC::ATOMIC_CMP_SWAP_I64:
12264       LoadMnemonic = PPC::LDARX;
12265       StoreMnemonic = PPC::STDCX;
12266       break;
12267     }
12268     Register dest = MI.getOperand(0).getReg();
12269     Register ptrA = MI.getOperand(1).getReg();
12270     Register ptrB = MI.getOperand(2).getReg();
12271     Register oldval = MI.getOperand(3).getReg();
12272     Register newval = MI.getOperand(4).getReg();
12273     DebugLoc dl = MI.getDebugLoc();
12274 
12275     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12276     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12277     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12278     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12279     F->insert(It, loop1MBB);
12280     F->insert(It, loop2MBB);
12281     F->insert(It, midMBB);
12282     F->insert(It, exitMBB);
12283     exitMBB->splice(exitMBB->begin(), BB,
12284                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12285     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12286 
12287     //  thisMBB:
12288     //   ...
12289     //   fallthrough --> loopMBB
12290     BB->addSuccessor(loop1MBB);
12291 
12292     // loop1MBB:
12293     //   l[bhwd]arx dest, ptr
12294     //   cmp[wd] dest, oldval
12295     //   bne- midMBB
12296     // loop2MBB:
12297     //   st[bhwd]cx. newval, ptr
12298     //   bne- loopMBB
12299     //   b exitBB
12300     // midMBB:
12301     //   st[bhwd]cx. dest, ptr
12302     // exitBB:
12303     BB = loop1MBB;
12304     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12305     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12306         .addReg(oldval)
12307         .addReg(dest);
12308     BuildMI(BB, dl, TII->get(PPC::BCC))
12309         .addImm(PPC::PRED_NE)
12310         .addReg(PPC::CR0)
12311         .addMBB(midMBB);
12312     BB->addSuccessor(loop2MBB);
12313     BB->addSuccessor(midMBB);
12314 
12315     BB = loop2MBB;
12316     BuildMI(BB, dl, TII->get(StoreMnemonic))
12317         .addReg(newval)
12318         .addReg(ptrA)
12319         .addReg(ptrB);
12320     BuildMI(BB, dl, TII->get(PPC::BCC))
12321         .addImm(PPC::PRED_NE)
12322         .addReg(PPC::CR0)
12323         .addMBB(loop1MBB);
12324     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12325     BB->addSuccessor(loop1MBB);
12326     BB->addSuccessor(exitMBB);
12327 
12328     BB = midMBB;
12329     BuildMI(BB, dl, TII->get(StoreMnemonic))
12330         .addReg(dest)
12331         .addReg(ptrA)
12332         .addReg(ptrB);
12333     BB->addSuccessor(exitMBB);
12334 
12335     //  exitMBB:
12336     //   ...
12337     BB = exitMBB;
12338   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12339              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12340     // We must use 64-bit registers for addresses when targeting 64-bit,
12341     // since we're actually doing arithmetic on them.  Other registers
12342     // can be 32-bit.
12343     bool is64bit = Subtarget.isPPC64();
12344     bool isLittleEndian = Subtarget.isLittleEndian();
12345     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12346 
12347     Register dest = MI.getOperand(0).getReg();
12348     Register ptrA = MI.getOperand(1).getReg();
12349     Register ptrB = MI.getOperand(2).getReg();
12350     Register oldval = MI.getOperand(3).getReg();
12351     Register newval = MI.getOperand(4).getReg();
12352     DebugLoc dl = MI.getDebugLoc();
12353 
12354     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12355     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12356     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12357     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12358     F->insert(It, loop1MBB);
12359     F->insert(It, loop2MBB);
12360     F->insert(It, midMBB);
12361     F->insert(It, exitMBB);
12362     exitMBB->splice(exitMBB->begin(), BB,
12363                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12364     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12365 
12366     MachineRegisterInfo &RegInfo = F->getRegInfo();
12367     const TargetRegisterClass *RC =
12368         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12369     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12370 
12371     Register PtrReg = RegInfo.createVirtualRegister(RC);
12372     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12373     Register ShiftReg =
12374         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12375     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12376     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12377     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12378     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12379     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12380     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12381     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12382     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12383     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12384     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12385     Register Ptr1Reg;
12386     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12387     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12388     //  thisMBB:
12389     //   ...
12390     //   fallthrough --> loopMBB
12391     BB->addSuccessor(loop1MBB);
12392 
12393     // The 4-byte load must be aligned, while a char or short may be
12394     // anywhere in the word.  Hence all this nasty bookkeeping code.
12395     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12396     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12397     //   xori shift, shift1, 24 [16]
12398     //   rlwinm ptr, ptr1, 0, 0, 29
12399     //   slw newval2, newval, shift
12400     //   slw oldval2, oldval,shift
12401     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12402     //   slw mask, mask2, shift
12403     //   and newval3, newval2, mask
12404     //   and oldval3, oldval2, mask
12405     // loop1MBB:
12406     //   lwarx tmpDest, ptr
12407     //   and tmp, tmpDest, mask
12408     //   cmpw tmp, oldval3
12409     //   bne- midMBB
12410     // loop2MBB:
12411     //   andc tmp2, tmpDest, mask
12412     //   or tmp4, tmp2, newval3
12413     //   stwcx. tmp4, ptr
12414     //   bne- loop1MBB
12415     //   b exitBB
12416     // midMBB:
12417     //   stwcx. tmpDest, ptr
12418     // exitBB:
12419     //   srw dest, tmpDest, shift
12420     if (ptrA != ZeroReg) {
12421       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12422       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12423           .addReg(ptrA)
12424           .addReg(ptrB);
12425     } else {
12426       Ptr1Reg = ptrB;
12427     }
12428 
12429     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12430     // mode.
12431     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12432         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12433         .addImm(3)
12434         .addImm(27)
12435         .addImm(is8bit ? 28 : 27);
12436     if (!isLittleEndian)
12437       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12438           .addReg(Shift1Reg)
12439           .addImm(is8bit ? 24 : 16);
12440     if (is64bit)
12441       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12442           .addReg(Ptr1Reg)
12443           .addImm(0)
12444           .addImm(61);
12445     else
12446       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12447           .addReg(Ptr1Reg)
12448           .addImm(0)
12449           .addImm(0)
12450           .addImm(29);
12451     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12452         .addReg(newval)
12453         .addReg(ShiftReg);
12454     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12455         .addReg(oldval)
12456         .addReg(ShiftReg);
12457     if (is8bit)
12458       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12459     else {
12460       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12461       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12462           .addReg(Mask3Reg)
12463           .addImm(65535);
12464     }
12465     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12466         .addReg(Mask2Reg)
12467         .addReg(ShiftReg);
12468     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12469         .addReg(NewVal2Reg)
12470         .addReg(MaskReg);
12471     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12472         .addReg(OldVal2Reg)
12473         .addReg(MaskReg);
12474 
12475     BB = loop1MBB;
12476     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12477         .addReg(ZeroReg)
12478         .addReg(PtrReg);
12479     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12480         .addReg(TmpDestReg)
12481         .addReg(MaskReg);
12482     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12483         .addReg(TmpReg)
12484         .addReg(OldVal3Reg);
12485     BuildMI(BB, dl, TII->get(PPC::BCC))
12486         .addImm(PPC::PRED_NE)
12487         .addReg(PPC::CR0)
12488         .addMBB(midMBB);
12489     BB->addSuccessor(loop2MBB);
12490     BB->addSuccessor(midMBB);
12491 
12492     BB = loop2MBB;
12493     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12494         .addReg(TmpDestReg)
12495         .addReg(MaskReg);
12496     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12497         .addReg(Tmp2Reg)
12498         .addReg(NewVal3Reg);
12499     BuildMI(BB, dl, TII->get(PPC::STWCX))
12500         .addReg(Tmp4Reg)
12501         .addReg(ZeroReg)
12502         .addReg(PtrReg);
12503     BuildMI(BB, dl, TII->get(PPC::BCC))
12504         .addImm(PPC::PRED_NE)
12505         .addReg(PPC::CR0)
12506         .addMBB(loop1MBB);
12507     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12508     BB->addSuccessor(loop1MBB);
12509     BB->addSuccessor(exitMBB);
12510 
12511     BB = midMBB;
12512     BuildMI(BB, dl, TII->get(PPC::STWCX))
12513         .addReg(TmpDestReg)
12514         .addReg(ZeroReg)
12515         .addReg(PtrReg);
12516     BB->addSuccessor(exitMBB);
12517 
12518     //  exitMBB:
12519     //   ...
12520     BB = exitMBB;
12521     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12522         .addReg(TmpReg)
12523         .addReg(ShiftReg);
12524   } else if (MI.getOpcode() == PPC::FADDrtz) {
12525     // This pseudo performs an FADD with rounding mode temporarily forced
12526     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12527     // is not modeled at the SelectionDAG level.
12528     Register Dest = MI.getOperand(0).getReg();
12529     Register Src1 = MI.getOperand(1).getReg();
12530     Register Src2 = MI.getOperand(2).getReg();
12531     DebugLoc dl = MI.getDebugLoc();
12532 
12533     MachineRegisterInfo &RegInfo = F->getRegInfo();
12534     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12535 
12536     // Save FPSCR value.
12537     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12538 
12539     // Set rounding mode to round-to-zero.
12540     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12541         .addImm(31)
12542         .addReg(PPC::RM, RegState::ImplicitDefine);
12543 
12544     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12545         .addImm(30)
12546         .addReg(PPC::RM, RegState::ImplicitDefine);
12547 
12548     // Perform addition.
12549     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12550                    .addReg(Src1)
12551                    .addReg(Src2);
12552     if (MI.getFlag(MachineInstr::NoFPExcept))
12553       MIB.setMIFlag(MachineInstr::NoFPExcept);
12554 
12555     // Restore FPSCR value.
12556     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12557   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12558              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12559              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12560              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12561     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12562                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12563                           ? PPC::ANDI8_rec
12564                           : PPC::ANDI_rec;
12565     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12566                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12567 
12568     MachineRegisterInfo &RegInfo = F->getRegInfo();
12569     Register Dest = RegInfo.createVirtualRegister(
12570         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12571 
12572     DebugLoc Dl = MI.getDebugLoc();
12573     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12574         .addReg(MI.getOperand(1).getReg())
12575         .addImm(1);
12576     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12577             MI.getOperand(0).getReg())
12578         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12579   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12580     DebugLoc Dl = MI.getDebugLoc();
12581     MachineRegisterInfo &RegInfo = F->getRegInfo();
12582     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12583     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12584     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12585             MI.getOperand(0).getReg())
12586         .addReg(CRReg);
12587   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12588     DebugLoc Dl = MI.getDebugLoc();
12589     unsigned Imm = MI.getOperand(1).getImm();
12590     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12591     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12592             MI.getOperand(0).getReg())
12593         .addReg(PPC::CR0EQ);
12594   } else if (MI.getOpcode() == PPC::SETRNDi) {
12595     DebugLoc dl = MI.getDebugLoc();
12596     Register OldFPSCRReg = MI.getOperand(0).getReg();
12597 
12598     // Save FPSCR value.
12599     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12600 
12601     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12602     // the following settings:
12603     //   00 Round to nearest
12604     //   01 Round to 0
12605     //   10 Round to +inf
12606     //   11 Round to -inf
12607 
12608     // When the operand is immediate, using the two least significant bits of
12609     // the immediate to set the bits 62:63 of FPSCR.
12610     unsigned Mode = MI.getOperand(1).getImm();
12611     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12612         .addImm(31)
12613         .addReg(PPC::RM, RegState::ImplicitDefine);
12614 
12615     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12616         .addImm(30)
12617         .addReg(PPC::RM, RegState::ImplicitDefine);
12618   } else if (MI.getOpcode() == PPC::SETRND) {
12619     DebugLoc dl = MI.getDebugLoc();
12620 
12621     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12622     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12623     // If the target doesn't have DirectMove, we should use stack to do the
12624     // conversion, because the target doesn't have the instructions like mtvsrd
12625     // or mfvsrd to do this conversion directly.
12626     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12627       if (Subtarget.hasDirectMove()) {
12628         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12629           .addReg(SrcReg);
12630       } else {
12631         // Use stack to do the register copy.
12632         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12633         MachineRegisterInfo &RegInfo = F->getRegInfo();
12634         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12635         if (RC == &PPC::F8RCRegClass) {
12636           // Copy register from F8RCRegClass to G8RCRegclass.
12637           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12638                  "Unsupported RegClass.");
12639 
12640           StoreOp = PPC::STFD;
12641           LoadOp = PPC::LD;
12642         } else {
12643           // Copy register from G8RCRegClass to F8RCRegclass.
12644           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12645                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12646                  "Unsupported RegClass.");
12647         }
12648 
12649         MachineFrameInfo &MFI = F->getFrameInfo();
12650         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12651 
12652         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12653             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12654             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12655             MFI.getObjectAlign(FrameIdx));
12656 
12657         // Store the SrcReg into the stack.
12658         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12659           .addReg(SrcReg)
12660           .addImm(0)
12661           .addFrameIndex(FrameIdx)
12662           .addMemOperand(MMOStore);
12663 
12664         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12665             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12666             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12667             MFI.getObjectAlign(FrameIdx));
12668 
12669         // Load from the stack where SrcReg is stored, and save to DestReg,
12670         // so we have done the RegClass conversion from RegClass::SrcReg to
12671         // RegClass::DestReg.
12672         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12673           .addImm(0)
12674           .addFrameIndex(FrameIdx)
12675           .addMemOperand(MMOLoad);
12676       }
12677     };
12678 
12679     Register OldFPSCRReg = MI.getOperand(0).getReg();
12680 
12681     // Save FPSCR value.
12682     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12683 
12684     // When the operand is gprc register, use two least significant bits of the
12685     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12686     //
12687     // copy OldFPSCRTmpReg, OldFPSCRReg
12688     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12689     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12690     // copy NewFPSCRReg, NewFPSCRTmpReg
12691     // mtfsf 255, NewFPSCRReg
12692     MachineOperand SrcOp = MI.getOperand(1);
12693     MachineRegisterInfo &RegInfo = F->getRegInfo();
12694     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12695 
12696     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12697 
12698     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12699     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12700 
12701     // The first operand of INSERT_SUBREG should be a register which has
12702     // subregisters, we only care about its RegClass, so we should use an
12703     // IMPLICIT_DEF register.
12704     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12705     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12706       .addReg(ImDefReg)
12707       .add(SrcOp)
12708       .addImm(1);
12709 
12710     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12711     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12712       .addReg(OldFPSCRTmpReg)
12713       .addReg(ExtSrcReg)
12714       .addImm(0)
12715       .addImm(62);
12716 
12717     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12718     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12719 
12720     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12721     // bits of FPSCR.
12722     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12723       .addImm(255)
12724       .addReg(NewFPSCRReg)
12725       .addImm(0)
12726       .addImm(0);
12727   } else if (MI.getOpcode() == PPC::SETFLM) {
12728     DebugLoc Dl = MI.getDebugLoc();
12729 
12730     // Result of setflm is previous FPSCR content, so we need to save it first.
12731     Register OldFPSCRReg = MI.getOperand(0).getReg();
12732     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12733 
12734     // Put bits in 32:63 to FPSCR.
12735     Register NewFPSCRReg = MI.getOperand(1).getReg();
12736     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12737         .addImm(255)
12738         .addReg(NewFPSCRReg)
12739         .addImm(0)
12740         .addImm(0);
12741   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12742              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12743     return emitProbedAlloca(MI, BB);
12744   } else {
12745     llvm_unreachable("Unexpected instr type to insert");
12746   }
12747 
12748   MI.eraseFromParent(); // The pseudo instruction is gone now.
12749   return BB;
12750 }
12751 
12752 //===----------------------------------------------------------------------===//
12753 // Target Optimization Hooks
12754 //===----------------------------------------------------------------------===//
12755 
12756 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12757   // For the estimates, convergence is quadratic, so we essentially double the
12758   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12759   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12760   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12761   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12762   if (VT.getScalarType() == MVT::f64)
12763     RefinementSteps++;
12764   return RefinementSteps;
12765 }
12766 
12767 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12768                                             const DenormalMode &Mode) const {
12769   // We only have VSX Vector Test for software Square Root.
12770   EVT VT = Op.getValueType();
12771   if (!isTypeLegal(MVT::i1) ||
12772       (VT != MVT::f64 &&
12773        ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12774     return SDValue();
12775 
12776   SDLoc DL(Op);
12777   // The output register of FTSQRT is CR field.
12778   SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12779   // ftsqrt BF,FRB
12780   // Let e_b be the unbiased exponent of the double-precision
12781   // floating-point operand in register FRB.
12782   // fe_flag is set to 1 if either of the following conditions occurs.
12783   //   - The double-precision floating-point operand in register FRB is a zero,
12784   //     a NaN, or an infinity, or a negative value.
12785   //   - e_b is less than or equal to -970.
12786   // Otherwise fe_flag is set to 0.
12787   // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12788   // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12789   // exponent is less than -970)
12790   SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12791   return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12792                                     FTSQRT, SRIdxVal),
12793                  0);
12794 }
12795 
12796 SDValue
12797 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12798                                                SelectionDAG &DAG) const {
12799   // We only have VSX Vector Square Root.
12800   EVT VT = Op.getValueType();
12801   if (VT != MVT::f64 &&
12802       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12803     return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12804 
12805   return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12806 }
12807 
12808 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12809                                            int Enabled, int &RefinementSteps,
12810                                            bool &UseOneConstNR,
12811                                            bool Reciprocal) const {
12812   EVT VT = Operand.getValueType();
12813   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12814       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12815       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12816       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12817     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12818       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12819 
12820     // The Newton-Raphson computation with a single constant does not provide
12821     // enough accuracy on some CPUs.
12822     UseOneConstNR = !Subtarget.needsTwoConstNR();
12823     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12824   }
12825   return SDValue();
12826 }
12827 
12828 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12829                                             int Enabled,
12830                                             int &RefinementSteps) const {
12831   EVT VT = Operand.getValueType();
12832   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12833       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12834       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12835       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12836     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12837       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12838     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12839   }
12840   return SDValue();
12841 }
12842 
12843 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12844   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12845   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12846   // enabled for division), this functionality is redundant with the default
12847   // combiner logic (once the division -> reciprocal/multiply transformation
12848   // has taken place). As a result, this matters more for older cores than for
12849   // newer ones.
12850 
12851   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12852   // reciprocal if there are two or more FDIVs (for embedded cores with only
12853   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12854   switch (Subtarget.getCPUDirective()) {
12855   default:
12856     return 3;
12857   case PPC::DIR_440:
12858   case PPC::DIR_A2:
12859   case PPC::DIR_E500:
12860   case PPC::DIR_E500mc:
12861   case PPC::DIR_E5500:
12862     return 2;
12863   }
12864 }
12865 
12866 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12867 // collapsed, and so we need to look through chains of them.
12868 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12869                                      int64_t& Offset, SelectionDAG &DAG) {
12870   if (DAG.isBaseWithConstantOffset(Loc)) {
12871     Base = Loc.getOperand(0);
12872     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12873 
12874     // The base might itself be a base plus an offset, and if so, accumulate
12875     // that as well.
12876     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12877   }
12878 }
12879 
12880 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12881                             unsigned Bytes, int Dist,
12882                             SelectionDAG &DAG) {
12883   if (VT.getSizeInBits() / 8 != Bytes)
12884     return false;
12885 
12886   SDValue BaseLoc = Base->getBasePtr();
12887   if (Loc.getOpcode() == ISD::FrameIndex) {
12888     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12889       return false;
12890     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12891     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12892     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12893     int FS  = MFI.getObjectSize(FI);
12894     int BFS = MFI.getObjectSize(BFI);
12895     if (FS != BFS || FS != (int)Bytes) return false;
12896     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12897   }
12898 
12899   SDValue Base1 = Loc, Base2 = BaseLoc;
12900   int64_t Offset1 = 0, Offset2 = 0;
12901   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12902   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12903   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12904     return true;
12905 
12906   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12907   const GlobalValue *GV1 = nullptr;
12908   const GlobalValue *GV2 = nullptr;
12909   Offset1 = 0;
12910   Offset2 = 0;
12911   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12912   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12913   if (isGA1 && isGA2 && GV1 == GV2)
12914     return Offset1 == (Offset2 + Dist*Bytes);
12915   return false;
12916 }
12917 
12918 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12919 // not enforce equality of the chain operands.
12920 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12921                             unsigned Bytes, int Dist,
12922                             SelectionDAG &DAG) {
12923   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12924     EVT VT = LS->getMemoryVT();
12925     SDValue Loc = LS->getBasePtr();
12926     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12927   }
12928 
12929   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12930     EVT VT;
12931     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12932     default: return false;
12933     case Intrinsic::ppc_altivec_lvx:
12934     case Intrinsic::ppc_altivec_lvxl:
12935     case Intrinsic::ppc_vsx_lxvw4x:
12936     case Intrinsic::ppc_vsx_lxvw4x_be:
12937       VT = MVT::v4i32;
12938       break;
12939     case Intrinsic::ppc_vsx_lxvd2x:
12940     case Intrinsic::ppc_vsx_lxvd2x_be:
12941       VT = MVT::v2f64;
12942       break;
12943     case Intrinsic::ppc_altivec_lvebx:
12944       VT = MVT::i8;
12945       break;
12946     case Intrinsic::ppc_altivec_lvehx:
12947       VT = MVT::i16;
12948       break;
12949     case Intrinsic::ppc_altivec_lvewx:
12950       VT = MVT::i32;
12951       break;
12952     }
12953 
12954     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12955   }
12956 
12957   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12958     EVT VT;
12959     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12960     default: return false;
12961     case Intrinsic::ppc_altivec_stvx:
12962     case Intrinsic::ppc_altivec_stvxl:
12963     case Intrinsic::ppc_vsx_stxvw4x:
12964       VT = MVT::v4i32;
12965       break;
12966     case Intrinsic::ppc_vsx_stxvd2x:
12967       VT = MVT::v2f64;
12968       break;
12969     case Intrinsic::ppc_vsx_stxvw4x_be:
12970       VT = MVT::v4i32;
12971       break;
12972     case Intrinsic::ppc_vsx_stxvd2x_be:
12973       VT = MVT::v2f64;
12974       break;
12975     case Intrinsic::ppc_altivec_stvebx:
12976       VT = MVT::i8;
12977       break;
12978     case Intrinsic::ppc_altivec_stvehx:
12979       VT = MVT::i16;
12980       break;
12981     case Intrinsic::ppc_altivec_stvewx:
12982       VT = MVT::i32;
12983       break;
12984     }
12985 
12986     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12987   }
12988 
12989   return false;
12990 }
12991 
12992 // Return true is there is a nearyby consecutive load to the one provided
12993 // (regardless of alignment). We search up and down the chain, looking though
12994 // token factors and other loads (but nothing else). As a result, a true result
12995 // indicates that it is safe to create a new consecutive load adjacent to the
12996 // load provided.
12997 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12998   SDValue Chain = LD->getChain();
12999   EVT VT = LD->getMemoryVT();
13000 
13001   SmallSet<SDNode *, 16> LoadRoots;
13002   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13003   SmallSet<SDNode *, 16> Visited;
13004 
13005   // First, search up the chain, branching to follow all token-factor operands.
13006   // If we find a consecutive load, then we're done, otherwise, record all
13007   // nodes just above the top-level loads and token factors.
13008   while (!Queue.empty()) {
13009     SDNode *ChainNext = Queue.pop_back_val();
13010     if (!Visited.insert(ChainNext).second)
13011       continue;
13012 
13013     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13014       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13015         return true;
13016 
13017       if (!Visited.count(ChainLD->getChain().getNode()))
13018         Queue.push_back(ChainLD->getChain().getNode());
13019     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13020       for (const SDUse &O : ChainNext->ops())
13021         if (!Visited.count(O.getNode()))
13022           Queue.push_back(O.getNode());
13023     } else
13024       LoadRoots.insert(ChainNext);
13025   }
13026 
13027   // Second, search down the chain, starting from the top-level nodes recorded
13028   // in the first phase. These top-level nodes are the nodes just above all
13029   // loads and token factors. Starting with their uses, recursively look though
13030   // all loads (just the chain uses) and token factors to find a consecutive
13031   // load.
13032   Visited.clear();
13033   Queue.clear();
13034 
13035   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13036        IE = LoadRoots.end(); I != IE; ++I) {
13037     Queue.push_back(*I);
13038 
13039     while (!Queue.empty()) {
13040       SDNode *LoadRoot = Queue.pop_back_val();
13041       if (!Visited.insert(LoadRoot).second)
13042         continue;
13043 
13044       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13045         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13046           return true;
13047 
13048       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13049            UE = LoadRoot->use_end(); UI != UE; ++UI)
13050         if (((isa<MemSDNode>(*UI) &&
13051             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13052             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13053           Queue.push_back(*UI);
13054     }
13055   }
13056 
13057   return false;
13058 }
13059 
13060 /// This function is called when we have proved that a SETCC node can be replaced
13061 /// by subtraction (and other supporting instructions) so that the result of
13062 /// comparison is kept in a GPR instead of CR. This function is purely for
13063 /// codegen purposes and has some flags to guide the codegen process.
13064 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13065                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13066   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13067 
13068   // Zero extend the operands to the largest legal integer. Originally, they
13069   // must be of a strictly smaller size.
13070   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13071                          DAG.getConstant(Size, DL, MVT::i32));
13072   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13073                          DAG.getConstant(Size, DL, MVT::i32));
13074 
13075   // Swap if needed. Depends on the condition code.
13076   if (Swap)
13077     std::swap(Op0, Op1);
13078 
13079   // Subtract extended integers.
13080   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13081 
13082   // Move the sign bit to the least significant position and zero out the rest.
13083   // Now the least significant bit carries the result of original comparison.
13084   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13085                              DAG.getConstant(Size - 1, DL, MVT::i32));
13086   auto Final = Shifted;
13087 
13088   // Complement the result if needed. Based on the condition code.
13089   if (Complement)
13090     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13091                         DAG.getConstant(1, DL, MVT::i64));
13092 
13093   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13094 }
13095 
13096 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13097                                                   DAGCombinerInfo &DCI) const {
13098   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13099 
13100   SelectionDAG &DAG = DCI.DAG;
13101   SDLoc DL(N);
13102 
13103   // Size of integers being compared has a critical role in the following
13104   // analysis, so we prefer to do this when all types are legal.
13105   if (!DCI.isAfterLegalizeDAG())
13106     return SDValue();
13107 
13108   // If all users of SETCC extend its value to a legal integer type
13109   // then we replace SETCC with a subtraction
13110   for (SDNode::use_iterator UI = N->use_begin(),
13111        UE = N->use_end(); UI != UE; ++UI) {
13112     if (UI->getOpcode() != ISD::ZERO_EXTEND)
13113       return SDValue();
13114   }
13115 
13116   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13117   auto OpSize = N->getOperand(0).getValueSizeInBits();
13118 
13119   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13120 
13121   if (OpSize < Size) {
13122     switch (CC) {
13123     default: break;
13124     case ISD::SETULT:
13125       return generateEquivalentSub(N, Size, false, false, DL, DAG);
13126     case ISD::SETULE:
13127       return generateEquivalentSub(N, Size, true, true, DL, DAG);
13128     case ISD::SETUGT:
13129       return generateEquivalentSub(N, Size, false, true, DL, DAG);
13130     case ISD::SETUGE:
13131       return generateEquivalentSub(N, Size, true, false, DL, DAG);
13132     }
13133   }
13134 
13135   return SDValue();
13136 }
13137 
13138 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13139                                                   DAGCombinerInfo &DCI) const {
13140   SelectionDAG &DAG = DCI.DAG;
13141   SDLoc dl(N);
13142 
13143   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13144   // If we're tracking CR bits, we need to be careful that we don't have:
13145   //   trunc(binary-ops(zext(x), zext(y)))
13146   // or
13147   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13148   // such that we're unnecessarily moving things into GPRs when it would be
13149   // better to keep them in CR bits.
13150 
13151   // Note that trunc here can be an actual i1 trunc, or can be the effective
13152   // truncation that comes from a setcc or select_cc.
13153   if (N->getOpcode() == ISD::TRUNCATE &&
13154       N->getValueType(0) != MVT::i1)
13155     return SDValue();
13156 
13157   if (N->getOperand(0).getValueType() != MVT::i32 &&
13158       N->getOperand(0).getValueType() != MVT::i64)
13159     return SDValue();
13160 
13161   if (N->getOpcode() == ISD::SETCC ||
13162       N->getOpcode() == ISD::SELECT_CC) {
13163     // If we're looking at a comparison, then we need to make sure that the
13164     // high bits (all except for the first) don't matter the result.
13165     ISD::CondCode CC =
13166       cast<CondCodeSDNode>(N->getOperand(
13167         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13168     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13169 
13170     if (ISD::isSignedIntSetCC(CC)) {
13171       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13172           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13173         return SDValue();
13174     } else if (ISD::isUnsignedIntSetCC(CC)) {
13175       if (!DAG.MaskedValueIsZero(N->getOperand(0),
13176                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13177           !DAG.MaskedValueIsZero(N->getOperand(1),
13178                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
13179         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13180                                              : SDValue());
13181     } else {
13182       // This is neither a signed nor an unsigned comparison, just make sure
13183       // that the high bits are equal.
13184       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13185       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13186 
13187       // We don't really care about what is known about the first bit (if
13188       // anything), so clear it in all masks prior to comparing them.
13189       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
13190       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
13191 
13192       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
13193         return SDValue();
13194     }
13195   }
13196 
13197   // We now know that the higher-order bits are irrelevant, we just need to
13198   // make sure that all of the intermediate operations are bit operations, and
13199   // all inputs are extensions.
13200   if (N->getOperand(0).getOpcode() != ISD::AND &&
13201       N->getOperand(0).getOpcode() != ISD::OR  &&
13202       N->getOperand(0).getOpcode() != ISD::XOR &&
13203       N->getOperand(0).getOpcode() != ISD::SELECT &&
13204       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13205       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13206       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13207       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13208       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13209     return SDValue();
13210 
13211   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13212       N->getOperand(1).getOpcode() != ISD::AND &&
13213       N->getOperand(1).getOpcode() != ISD::OR  &&
13214       N->getOperand(1).getOpcode() != ISD::XOR &&
13215       N->getOperand(1).getOpcode() != ISD::SELECT &&
13216       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13217       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13218       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13219       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13220       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13221     return SDValue();
13222 
13223   SmallVector<SDValue, 4> Inputs;
13224   SmallVector<SDValue, 8> BinOps, PromOps;
13225   SmallPtrSet<SDNode *, 16> Visited;
13226 
13227   for (unsigned i = 0; i < 2; ++i) {
13228     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13229           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13230           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13231           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13232         isa<ConstantSDNode>(N->getOperand(i)))
13233       Inputs.push_back(N->getOperand(i));
13234     else
13235       BinOps.push_back(N->getOperand(i));
13236 
13237     if (N->getOpcode() == ISD::TRUNCATE)
13238       break;
13239   }
13240 
13241   // Visit all inputs, collect all binary operations (and, or, xor and
13242   // select) that are all fed by extensions.
13243   while (!BinOps.empty()) {
13244     SDValue BinOp = BinOps.back();
13245     BinOps.pop_back();
13246 
13247     if (!Visited.insert(BinOp.getNode()).second)
13248       continue;
13249 
13250     PromOps.push_back(BinOp);
13251 
13252     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13253       // The condition of the select is not promoted.
13254       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13255         continue;
13256       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13257         continue;
13258 
13259       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13260             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13261             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13262            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13263           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13264         Inputs.push_back(BinOp.getOperand(i));
13265       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13266                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13267                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13268                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13269                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13270                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13271                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13272                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13273                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13274         BinOps.push_back(BinOp.getOperand(i));
13275       } else {
13276         // We have an input that is not an extension or another binary
13277         // operation; we'll abort this transformation.
13278         return SDValue();
13279       }
13280     }
13281   }
13282 
13283   // Make sure that this is a self-contained cluster of operations (which
13284   // is not quite the same thing as saying that everything has only one
13285   // use).
13286   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13287     if (isa<ConstantSDNode>(Inputs[i]))
13288       continue;
13289 
13290     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13291                               UE = Inputs[i].getNode()->use_end();
13292          UI != UE; ++UI) {
13293       SDNode *User = *UI;
13294       if (User != N && !Visited.count(User))
13295         return SDValue();
13296 
13297       // Make sure that we're not going to promote the non-output-value
13298       // operand(s) or SELECT or SELECT_CC.
13299       // FIXME: Although we could sometimes handle this, and it does occur in
13300       // practice that one of the condition inputs to the select is also one of
13301       // the outputs, we currently can't deal with this.
13302       if (User->getOpcode() == ISD::SELECT) {
13303         if (User->getOperand(0) == Inputs[i])
13304           return SDValue();
13305       } else if (User->getOpcode() == ISD::SELECT_CC) {
13306         if (User->getOperand(0) == Inputs[i] ||
13307             User->getOperand(1) == Inputs[i])
13308           return SDValue();
13309       }
13310     }
13311   }
13312 
13313   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13314     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13315                               UE = PromOps[i].getNode()->use_end();
13316          UI != UE; ++UI) {
13317       SDNode *User = *UI;
13318       if (User != N && !Visited.count(User))
13319         return SDValue();
13320 
13321       // Make sure that we're not going to promote the non-output-value
13322       // operand(s) or SELECT or SELECT_CC.
13323       // FIXME: Although we could sometimes handle this, and it does occur in
13324       // practice that one of the condition inputs to the select is also one of
13325       // the outputs, we currently can't deal with this.
13326       if (User->getOpcode() == ISD::SELECT) {
13327         if (User->getOperand(0) == PromOps[i])
13328           return SDValue();
13329       } else if (User->getOpcode() == ISD::SELECT_CC) {
13330         if (User->getOperand(0) == PromOps[i] ||
13331             User->getOperand(1) == PromOps[i])
13332           return SDValue();
13333       }
13334     }
13335   }
13336 
13337   // Replace all inputs with the extension operand.
13338   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13339     // Constants may have users outside the cluster of to-be-promoted nodes,
13340     // and so we need to replace those as we do the promotions.
13341     if (isa<ConstantSDNode>(Inputs[i]))
13342       continue;
13343     else
13344       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13345   }
13346 
13347   std::list<HandleSDNode> PromOpHandles;
13348   for (auto &PromOp : PromOps)
13349     PromOpHandles.emplace_back(PromOp);
13350 
13351   // Replace all operations (these are all the same, but have a different
13352   // (i1) return type). DAG.getNode will validate that the types of
13353   // a binary operator match, so go through the list in reverse so that
13354   // we've likely promoted both operands first. Any intermediate truncations or
13355   // extensions disappear.
13356   while (!PromOpHandles.empty()) {
13357     SDValue PromOp = PromOpHandles.back().getValue();
13358     PromOpHandles.pop_back();
13359 
13360     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13361         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13362         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13363         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13364       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13365           PromOp.getOperand(0).getValueType() != MVT::i1) {
13366         // The operand is not yet ready (see comment below).
13367         PromOpHandles.emplace_front(PromOp);
13368         continue;
13369       }
13370 
13371       SDValue RepValue = PromOp.getOperand(0);
13372       if (isa<ConstantSDNode>(RepValue))
13373         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13374 
13375       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13376       continue;
13377     }
13378 
13379     unsigned C;
13380     switch (PromOp.getOpcode()) {
13381     default:             C = 0; break;
13382     case ISD::SELECT:    C = 1; break;
13383     case ISD::SELECT_CC: C = 2; break;
13384     }
13385 
13386     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13387          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13388         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13389          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13390       // The to-be-promoted operands of this node have not yet been
13391       // promoted (this should be rare because we're going through the
13392       // list backward, but if one of the operands has several users in
13393       // this cluster of to-be-promoted nodes, it is possible).
13394       PromOpHandles.emplace_front(PromOp);
13395       continue;
13396     }
13397 
13398     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13399                                 PromOp.getNode()->op_end());
13400 
13401     // If there are any constant inputs, make sure they're replaced now.
13402     for (unsigned i = 0; i < 2; ++i)
13403       if (isa<ConstantSDNode>(Ops[C+i]))
13404         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13405 
13406     DAG.ReplaceAllUsesOfValueWith(PromOp,
13407       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13408   }
13409 
13410   // Now we're left with the initial truncation itself.
13411   if (N->getOpcode() == ISD::TRUNCATE)
13412     return N->getOperand(0);
13413 
13414   // Otherwise, this is a comparison. The operands to be compared have just
13415   // changed type (to i1), but everything else is the same.
13416   return SDValue(N, 0);
13417 }
13418 
13419 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13420                                                   DAGCombinerInfo &DCI) const {
13421   SelectionDAG &DAG = DCI.DAG;
13422   SDLoc dl(N);
13423 
13424   // If we're tracking CR bits, we need to be careful that we don't have:
13425   //   zext(binary-ops(trunc(x), trunc(y)))
13426   // or
13427   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13428   // such that we're unnecessarily moving things into CR bits that can more
13429   // efficiently stay in GPRs. Note that if we're not certain that the high
13430   // bits are set as required by the final extension, we still may need to do
13431   // some masking to get the proper behavior.
13432 
13433   // This same functionality is important on PPC64 when dealing with
13434   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13435   // the return values of functions. Because it is so similar, it is handled
13436   // here as well.
13437 
13438   if (N->getValueType(0) != MVT::i32 &&
13439       N->getValueType(0) != MVT::i64)
13440     return SDValue();
13441 
13442   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13443         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13444     return SDValue();
13445 
13446   if (N->getOperand(0).getOpcode() != ISD::AND &&
13447       N->getOperand(0).getOpcode() != ISD::OR  &&
13448       N->getOperand(0).getOpcode() != ISD::XOR &&
13449       N->getOperand(0).getOpcode() != ISD::SELECT &&
13450       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13451     return SDValue();
13452 
13453   SmallVector<SDValue, 4> Inputs;
13454   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13455   SmallPtrSet<SDNode *, 16> Visited;
13456 
13457   // Visit all inputs, collect all binary operations (and, or, xor and
13458   // select) that are all fed by truncations.
13459   while (!BinOps.empty()) {
13460     SDValue BinOp = BinOps.back();
13461     BinOps.pop_back();
13462 
13463     if (!Visited.insert(BinOp.getNode()).second)
13464       continue;
13465 
13466     PromOps.push_back(BinOp);
13467 
13468     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13469       // The condition of the select is not promoted.
13470       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13471         continue;
13472       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13473         continue;
13474 
13475       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13476           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13477         Inputs.push_back(BinOp.getOperand(i));
13478       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13479                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13480                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13481                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13482                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13483         BinOps.push_back(BinOp.getOperand(i));
13484       } else {
13485         // We have an input that is not a truncation or another binary
13486         // operation; we'll abort this transformation.
13487         return SDValue();
13488       }
13489     }
13490   }
13491 
13492   // The operands of a select that must be truncated when the select is
13493   // promoted because the operand is actually part of the to-be-promoted set.
13494   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13495 
13496   // Make sure that this is a self-contained cluster of operations (which
13497   // is not quite the same thing as saying that everything has only one
13498   // use).
13499   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13500     if (isa<ConstantSDNode>(Inputs[i]))
13501       continue;
13502 
13503     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13504                               UE = Inputs[i].getNode()->use_end();
13505          UI != UE; ++UI) {
13506       SDNode *User = *UI;
13507       if (User != N && !Visited.count(User))
13508         return SDValue();
13509 
13510       // If we're going to promote the non-output-value operand(s) or SELECT or
13511       // SELECT_CC, record them for truncation.
13512       if (User->getOpcode() == ISD::SELECT) {
13513         if (User->getOperand(0) == Inputs[i])
13514           SelectTruncOp[0].insert(std::make_pair(User,
13515                                     User->getOperand(0).getValueType()));
13516       } else if (User->getOpcode() == ISD::SELECT_CC) {
13517         if (User->getOperand(0) == Inputs[i])
13518           SelectTruncOp[0].insert(std::make_pair(User,
13519                                     User->getOperand(0).getValueType()));
13520         if (User->getOperand(1) == Inputs[i])
13521           SelectTruncOp[1].insert(std::make_pair(User,
13522                                     User->getOperand(1).getValueType()));
13523       }
13524     }
13525   }
13526 
13527   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13528     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13529                               UE = PromOps[i].getNode()->use_end();
13530          UI != UE; ++UI) {
13531       SDNode *User = *UI;
13532       if (User != N && !Visited.count(User))
13533         return SDValue();
13534 
13535       // If we're going to promote the non-output-value operand(s) or SELECT or
13536       // SELECT_CC, record them for truncation.
13537       if (User->getOpcode() == ISD::SELECT) {
13538         if (User->getOperand(0) == PromOps[i])
13539           SelectTruncOp[0].insert(std::make_pair(User,
13540                                     User->getOperand(0).getValueType()));
13541       } else if (User->getOpcode() == ISD::SELECT_CC) {
13542         if (User->getOperand(0) == PromOps[i])
13543           SelectTruncOp[0].insert(std::make_pair(User,
13544                                     User->getOperand(0).getValueType()));
13545         if (User->getOperand(1) == PromOps[i])
13546           SelectTruncOp[1].insert(std::make_pair(User,
13547                                     User->getOperand(1).getValueType()));
13548       }
13549     }
13550   }
13551 
13552   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13553   bool ReallyNeedsExt = false;
13554   if (N->getOpcode() != ISD::ANY_EXTEND) {
13555     // If all of the inputs are not already sign/zero extended, then
13556     // we'll still need to do that at the end.
13557     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13558       if (isa<ConstantSDNode>(Inputs[i]))
13559         continue;
13560 
13561       unsigned OpBits =
13562         Inputs[i].getOperand(0).getValueSizeInBits();
13563       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13564 
13565       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13566            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13567                                   APInt::getHighBitsSet(OpBits,
13568                                                         OpBits-PromBits))) ||
13569           (N->getOpcode() == ISD::SIGN_EXTEND &&
13570            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13571              (OpBits-(PromBits-1)))) {
13572         ReallyNeedsExt = true;
13573         break;
13574       }
13575     }
13576   }
13577 
13578   // Replace all inputs, either with the truncation operand, or a
13579   // truncation or extension to the final output type.
13580   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13581     // Constant inputs need to be replaced with the to-be-promoted nodes that
13582     // use them because they might have users outside of the cluster of
13583     // promoted nodes.
13584     if (isa<ConstantSDNode>(Inputs[i]))
13585       continue;
13586 
13587     SDValue InSrc = Inputs[i].getOperand(0);
13588     if (Inputs[i].getValueType() == N->getValueType(0))
13589       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13590     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13591       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13592         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13593     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13594       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13595         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13596     else
13597       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13598         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13599   }
13600 
13601   std::list<HandleSDNode> PromOpHandles;
13602   for (auto &PromOp : PromOps)
13603     PromOpHandles.emplace_back(PromOp);
13604 
13605   // Replace all operations (these are all the same, but have a different
13606   // (promoted) return type). DAG.getNode will validate that the types of
13607   // a binary operator match, so go through the list in reverse so that
13608   // we've likely promoted both operands first.
13609   while (!PromOpHandles.empty()) {
13610     SDValue PromOp = PromOpHandles.back().getValue();
13611     PromOpHandles.pop_back();
13612 
13613     unsigned C;
13614     switch (PromOp.getOpcode()) {
13615     default:             C = 0; break;
13616     case ISD::SELECT:    C = 1; break;
13617     case ISD::SELECT_CC: C = 2; break;
13618     }
13619 
13620     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13621          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13622         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13623          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13624       // The to-be-promoted operands of this node have not yet been
13625       // promoted (this should be rare because we're going through the
13626       // list backward, but if one of the operands has several users in
13627       // this cluster of to-be-promoted nodes, it is possible).
13628       PromOpHandles.emplace_front(PromOp);
13629       continue;
13630     }
13631 
13632     // For SELECT and SELECT_CC nodes, we do a similar check for any
13633     // to-be-promoted comparison inputs.
13634     if (PromOp.getOpcode() == ISD::SELECT ||
13635         PromOp.getOpcode() == ISD::SELECT_CC) {
13636       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13637            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13638           (SelectTruncOp[1].count(PromOp.getNode()) &&
13639            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13640         PromOpHandles.emplace_front(PromOp);
13641         continue;
13642       }
13643     }
13644 
13645     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13646                                 PromOp.getNode()->op_end());
13647 
13648     // If this node has constant inputs, then they'll need to be promoted here.
13649     for (unsigned i = 0; i < 2; ++i) {
13650       if (!isa<ConstantSDNode>(Ops[C+i]))
13651         continue;
13652       if (Ops[C+i].getValueType() == N->getValueType(0))
13653         continue;
13654 
13655       if (N->getOpcode() == ISD::SIGN_EXTEND)
13656         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13657       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13658         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13659       else
13660         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13661     }
13662 
13663     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13664     // truncate them again to the original value type.
13665     if (PromOp.getOpcode() == ISD::SELECT ||
13666         PromOp.getOpcode() == ISD::SELECT_CC) {
13667       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13668       if (SI0 != SelectTruncOp[0].end())
13669         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13670       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13671       if (SI1 != SelectTruncOp[1].end())
13672         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13673     }
13674 
13675     DAG.ReplaceAllUsesOfValueWith(PromOp,
13676       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13677   }
13678 
13679   // Now we're left with the initial extension itself.
13680   if (!ReallyNeedsExt)
13681     return N->getOperand(0);
13682 
13683   // To zero extend, just mask off everything except for the first bit (in the
13684   // i1 case).
13685   if (N->getOpcode() == ISD::ZERO_EXTEND)
13686     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13687                        DAG.getConstant(APInt::getLowBitsSet(
13688                                          N->getValueSizeInBits(0), PromBits),
13689                                        dl, N->getValueType(0)));
13690 
13691   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13692          "Invalid extension type");
13693   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13694   SDValue ShiftCst =
13695       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13696   return DAG.getNode(
13697       ISD::SRA, dl, N->getValueType(0),
13698       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13699       ShiftCst);
13700 }
13701 
13702 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13703                                         DAGCombinerInfo &DCI) const {
13704   assert(N->getOpcode() == ISD::SETCC &&
13705          "Should be called with a SETCC node");
13706 
13707   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13708   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13709     SDValue LHS = N->getOperand(0);
13710     SDValue RHS = N->getOperand(1);
13711 
13712     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13713     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13714         LHS.hasOneUse())
13715       std::swap(LHS, RHS);
13716 
13717     // x == 0-y --> x+y == 0
13718     // x != 0-y --> x+y != 0
13719     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13720         RHS.hasOneUse()) {
13721       SDLoc DL(N);
13722       SelectionDAG &DAG = DCI.DAG;
13723       EVT VT = N->getValueType(0);
13724       EVT OpVT = LHS.getValueType();
13725       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13726       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13727     }
13728   }
13729 
13730   return DAGCombineTruncBoolExt(N, DCI);
13731 }
13732 
13733 // Is this an extending load from an f32 to an f64?
13734 static bool isFPExtLoad(SDValue Op) {
13735   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13736     return LD->getExtensionType() == ISD::EXTLOAD &&
13737       Op.getValueType() == MVT::f64;
13738   return false;
13739 }
13740 
13741 /// Reduces the number of fp-to-int conversion when building a vector.
13742 ///
13743 /// If this vector is built out of floating to integer conversions,
13744 /// transform it to a vector built out of floating point values followed by a
13745 /// single floating to integer conversion of the vector.
13746 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13747 /// becomes (fptosi (build_vector ($A, $B, ...)))
13748 SDValue PPCTargetLowering::
13749 combineElementTruncationToVectorTruncation(SDNode *N,
13750                                            DAGCombinerInfo &DCI) const {
13751   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13752          "Should be called with a BUILD_VECTOR node");
13753 
13754   SelectionDAG &DAG = DCI.DAG;
13755   SDLoc dl(N);
13756 
13757   SDValue FirstInput = N->getOperand(0);
13758   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13759          "The input operand must be an fp-to-int conversion.");
13760 
13761   // This combine happens after legalization so the fp_to_[su]i nodes are
13762   // already converted to PPCSISD nodes.
13763   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13764   if (FirstConversion == PPCISD::FCTIDZ ||
13765       FirstConversion == PPCISD::FCTIDUZ ||
13766       FirstConversion == PPCISD::FCTIWZ ||
13767       FirstConversion == PPCISD::FCTIWUZ) {
13768     bool IsSplat = true;
13769     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13770       FirstConversion == PPCISD::FCTIWUZ;
13771     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13772     SmallVector<SDValue, 4> Ops;
13773     EVT TargetVT = N->getValueType(0);
13774     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13775       SDValue NextOp = N->getOperand(i);
13776       if (NextOp.getOpcode() != PPCISD::MFVSR)
13777         return SDValue();
13778       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13779       if (NextConversion != FirstConversion)
13780         return SDValue();
13781       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13782       // This is not valid if the input was originally double precision. It is
13783       // also not profitable to do unless this is an extending load in which
13784       // case doing this combine will allow us to combine consecutive loads.
13785       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13786         return SDValue();
13787       if (N->getOperand(i) != FirstInput)
13788         IsSplat = false;
13789     }
13790 
13791     // If this is a splat, we leave it as-is since there will be only a single
13792     // fp-to-int conversion followed by a splat of the integer. This is better
13793     // for 32-bit and smaller ints and neutral for 64-bit ints.
13794     if (IsSplat)
13795       return SDValue();
13796 
13797     // Now that we know we have the right type of node, get its operands
13798     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13799       SDValue In = N->getOperand(i).getOperand(0);
13800       if (Is32Bit) {
13801         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13802         // here, we know that all inputs are extending loads so this is safe).
13803         if (In.isUndef())
13804           Ops.push_back(DAG.getUNDEF(SrcVT));
13805         else {
13806           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13807                                       MVT::f32, In.getOperand(0),
13808                                       DAG.getIntPtrConstant(1, dl));
13809           Ops.push_back(Trunc);
13810         }
13811       } else
13812         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13813     }
13814 
13815     unsigned Opcode;
13816     if (FirstConversion == PPCISD::FCTIDZ ||
13817         FirstConversion == PPCISD::FCTIWZ)
13818       Opcode = ISD::FP_TO_SINT;
13819     else
13820       Opcode = ISD::FP_TO_UINT;
13821 
13822     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13823     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13824     return DAG.getNode(Opcode, dl, TargetVT, BV);
13825   }
13826   return SDValue();
13827 }
13828 
13829 /// Reduce the number of loads when building a vector.
13830 ///
13831 /// Building a vector out of multiple loads can be converted to a load
13832 /// of the vector type if the loads are consecutive. If the loads are
13833 /// consecutive but in descending order, a shuffle is added at the end
13834 /// to reorder the vector.
13835 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13836   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13837          "Should be called with a BUILD_VECTOR node");
13838 
13839   SDLoc dl(N);
13840 
13841   // Return early for non byte-sized type, as they can't be consecutive.
13842   if (!N->getValueType(0).getVectorElementType().isByteSized())
13843     return SDValue();
13844 
13845   bool InputsAreConsecutiveLoads = true;
13846   bool InputsAreReverseConsecutive = true;
13847   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13848   SDValue FirstInput = N->getOperand(0);
13849   bool IsRoundOfExtLoad = false;
13850 
13851   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13852       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13853     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13854     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13855   }
13856   // Not a build vector of (possibly fp_rounded) loads.
13857   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13858       N->getNumOperands() == 1)
13859     return SDValue();
13860 
13861   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13862     // If any inputs are fp_round(extload), they all must be.
13863     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13864       return SDValue();
13865 
13866     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13867       N->getOperand(i);
13868     if (NextInput.getOpcode() != ISD::LOAD)
13869       return SDValue();
13870 
13871     SDValue PreviousInput =
13872       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13873     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13874     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13875 
13876     // If any inputs are fp_round(extload), they all must be.
13877     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13878       return SDValue();
13879 
13880     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13881       InputsAreConsecutiveLoads = false;
13882     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13883       InputsAreReverseConsecutive = false;
13884 
13885     // Exit early if the loads are neither consecutive nor reverse consecutive.
13886     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13887       return SDValue();
13888   }
13889 
13890   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13891          "The loads cannot be both consecutive and reverse consecutive.");
13892 
13893   SDValue FirstLoadOp =
13894     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13895   SDValue LastLoadOp =
13896     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13897                        N->getOperand(N->getNumOperands()-1);
13898 
13899   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13900   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13901   if (InputsAreConsecutiveLoads) {
13902     assert(LD1 && "Input needs to be a LoadSDNode.");
13903     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13904                        LD1->getBasePtr(), LD1->getPointerInfo(),
13905                        LD1->getAlignment());
13906   }
13907   if (InputsAreReverseConsecutive) {
13908     assert(LDL && "Input needs to be a LoadSDNode.");
13909     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13910                                LDL->getBasePtr(), LDL->getPointerInfo(),
13911                                LDL->getAlignment());
13912     SmallVector<int, 16> Ops;
13913     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13914       Ops.push_back(i);
13915 
13916     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13917                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13918   }
13919   return SDValue();
13920 }
13921 
13922 // This function adds the required vector_shuffle needed to get
13923 // the elements of the vector extract in the correct position
13924 // as specified by the CorrectElems encoding.
13925 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13926                                       SDValue Input, uint64_t Elems,
13927                                       uint64_t CorrectElems) {
13928   SDLoc dl(N);
13929 
13930   unsigned NumElems = Input.getValueType().getVectorNumElements();
13931   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13932 
13933   // Knowing the element indices being extracted from the original
13934   // vector and the order in which they're being inserted, just put
13935   // them at element indices required for the instruction.
13936   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13937     if (DAG.getDataLayout().isLittleEndian())
13938       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13939     else
13940       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13941     CorrectElems = CorrectElems >> 8;
13942     Elems = Elems >> 8;
13943   }
13944 
13945   SDValue Shuffle =
13946       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13947                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13948 
13949   EVT VT = N->getValueType(0);
13950   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13951 
13952   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13953                                Input.getValueType().getVectorElementType(),
13954                                VT.getVectorNumElements());
13955   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13956                      DAG.getValueType(ExtVT));
13957 }
13958 
13959 // Look for build vector patterns where input operands come from sign
13960 // extended vector_extract elements of specific indices. If the correct indices
13961 // aren't used, add a vector shuffle to fix up the indices and create
13962 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13963 // during instruction selection.
13964 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13965   // This array encodes the indices that the vector sign extend instructions
13966   // extract from when extending from one type to another for both BE and LE.
13967   // The right nibble of each byte corresponds to the LE incides.
13968   // and the left nibble of each byte corresponds to the BE incides.
13969   // For example: 0x3074B8FC  byte->word
13970   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13971   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13972   // For example: 0x000070F8  byte->double word
13973   // For LE: the allowed indices are: 0x0,0x8
13974   // For BE: the allowed indices are: 0x7,0xF
13975   uint64_t TargetElems[] = {
13976       0x3074B8FC, // b->w
13977       0x000070F8, // b->d
13978       0x10325476, // h->w
13979       0x00003074, // h->d
13980       0x00001032, // w->d
13981   };
13982 
13983   uint64_t Elems = 0;
13984   int Index;
13985   SDValue Input;
13986 
13987   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13988     if (!Op)
13989       return false;
13990     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13991         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13992       return false;
13993 
13994     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13995     // of the right width.
13996     SDValue Extract = Op.getOperand(0);
13997     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13998       Extract = Extract.getOperand(0);
13999     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14000       return false;
14001 
14002     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14003     if (!ExtOp)
14004       return false;
14005 
14006     Index = ExtOp->getZExtValue();
14007     if (Input && Input != Extract.getOperand(0))
14008       return false;
14009 
14010     if (!Input)
14011       Input = Extract.getOperand(0);
14012 
14013     Elems = Elems << 8;
14014     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14015     Elems |= Index;
14016 
14017     return true;
14018   };
14019 
14020   // If the build vector operands aren't sign extended vector extracts,
14021   // of the same input vector, then return.
14022   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14023     if (!isSExtOfVecExtract(N->getOperand(i))) {
14024       return SDValue();
14025     }
14026   }
14027 
14028   // If the vector extract indicies are not correct, add the appropriate
14029   // vector_shuffle.
14030   int TgtElemArrayIdx;
14031   int InputSize = Input.getValueType().getScalarSizeInBits();
14032   int OutputSize = N->getValueType(0).getScalarSizeInBits();
14033   if (InputSize + OutputSize == 40)
14034     TgtElemArrayIdx = 0;
14035   else if (InputSize + OutputSize == 72)
14036     TgtElemArrayIdx = 1;
14037   else if (InputSize + OutputSize == 48)
14038     TgtElemArrayIdx = 2;
14039   else if (InputSize + OutputSize == 80)
14040     TgtElemArrayIdx = 3;
14041   else if (InputSize + OutputSize == 96)
14042     TgtElemArrayIdx = 4;
14043   else
14044     return SDValue();
14045 
14046   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14047   CorrectElems = DAG.getDataLayout().isLittleEndian()
14048                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14049                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14050   if (Elems != CorrectElems) {
14051     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14052   }
14053 
14054   // Regular lowering will catch cases where a shuffle is not needed.
14055   return SDValue();
14056 }
14057 
14058 // Look for the pattern of a load from a narrow width to i128, feeding
14059 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
14060 // (LXVRZX). This node represents a zero extending load that will be matched
14061 // to the Load VSX Vector Rightmost instructions.
14062 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
14063   SDLoc DL(N);
14064 
14065   // This combine is only eligible for a BUILD_VECTOR of v1i128.
14066   if (N->getValueType(0) != MVT::v1i128)
14067     return SDValue();
14068 
14069   SDValue Operand = N->getOperand(0);
14070   // Proceed with the transformation if the operand to the BUILD_VECTOR
14071   // is a load instruction.
14072   if (Operand.getOpcode() != ISD::LOAD)
14073     return SDValue();
14074 
14075   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
14076   EVT MemoryType = LD->getMemoryVT();
14077 
14078   // This transformation is only valid if the we are loading either a byte,
14079   // halfword, word, or doubleword.
14080   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
14081                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
14082 
14083   // Ensure that the load from the narrow width is being zero extended to i128.
14084   if (!ValidLDType ||
14085       (LD->getExtensionType() != ISD::ZEXTLOAD &&
14086        LD->getExtensionType() != ISD::EXTLOAD))
14087     return SDValue();
14088 
14089   SDValue LoadOps[] = {
14090       LD->getChain(), LD->getBasePtr(),
14091       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
14092 
14093   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
14094                                  DAG.getVTList(MVT::v1i128, MVT::Other),
14095                                  LoadOps, MemoryType, LD->getMemOperand());
14096 }
14097 
14098 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14099                                                  DAGCombinerInfo &DCI) const {
14100   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14101          "Should be called with a BUILD_VECTOR node");
14102 
14103   SelectionDAG &DAG = DCI.DAG;
14104   SDLoc dl(N);
14105 
14106   if (!Subtarget.hasVSX())
14107     return SDValue();
14108 
14109   // The target independent DAG combiner will leave a build_vector of
14110   // float-to-int conversions intact. We can generate MUCH better code for
14111   // a float-to-int conversion of a vector of floats.
14112   SDValue FirstInput = N->getOperand(0);
14113   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14114     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14115     if (Reduced)
14116       return Reduced;
14117   }
14118 
14119   // If we're building a vector out of consecutive loads, just load that
14120   // vector type.
14121   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14122   if (Reduced)
14123     return Reduced;
14124 
14125   // If we're building a vector out of extended elements from another vector
14126   // we have P9 vector integer extend instructions. The code assumes legal
14127   // input types (i.e. it can't handle things like v4i16) so do not run before
14128   // legalization.
14129   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14130     Reduced = combineBVOfVecSExt(N, DAG);
14131     if (Reduced)
14132       return Reduced;
14133   }
14134 
14135   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
14136   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
14137   // is a load from <valid narrow width> to i128.
14138   if (Subtarget.isISA3_1()) {
14139     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
14140     if (BVOfZLoad)
14141       return BVOfZLoad;
14142   }
14143 
14144   if (N->getValueType(0) != MVT::v2f64)
14145     return SDValue();
14146 
14147   // Looking for:
14148   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14149   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14150       FirstInput.getOpcode() != ISD::UINT_TO_FP)
14151     return SDValue();
14152   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14153       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14154     return SDValue();
14155   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14156     return SDValue();
14157 
14158   SDValue Ext1 = FirstInput.getOperand(0);
14159   SDValue Ext2 = N->getOperand(1).getOperand(0);
14160   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14161      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14162     return SDValue();
14163 
14164   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14165   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14166   if (!Ext1Op || !Ext2Op)
14167     return SDValue();
14168   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14169       Ext1.getOperand(0) != Ext2.getOperand(0))
14170     return SDValue();
14171 
14172   int FirstElem = Ext1Op->getZExtValue();
14173   int SecondElem = Ext2Op->getZExtValue();
14174   int SubvecIdx;
14175   if (FirstElem == 0 && SecondElem == 1)
14176     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14177   else if (FirstElem == 2 && SecondElem == 3)
14178     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14179   else
14180     return SDValue();
14181 
14182   SDValue SrcVec = Ext1.getOperand(0);
14183   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14184     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14185   return DAG.getNode(NodeType, dl, MVT::v2f64,
14186                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14187 }
14188 
14189 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14190                                               DAGCombinerInfo &DCI) const {
14191   assert((N->getOpcode() == ISD::SINT_TO_FP ||
14192           N->getOpcode() == ISD::UINT_TO_FP) &&
14193          "Need an int -> FP conversion node here");
14194 
14195   if (useSoftFloat() || !Subtarget.has64BitSupport())
14196     return SDValue();
14197 
14198   SelectionDAG &DAG = DCI.DAG;
14199   SDLoc dl(N);
14200   SDValue Op(N, 0);
14201 
14202   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14203   // from the hardware.
14204   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14205     return SDValue();
14206   if (!Op.getOperand(0).getValueType().isSimple())
14207     return SDValue();
14208   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14209       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14210     return SDValue();
14211 
14212   SDValue FirstOperand(Op.getOperand(0));
14213   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14214     (FirstOperand.getValueType() == MVT::i8 ||
14215      FirstOperand.getValueType() == MVT::i16);
14216   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14217     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14218     bool DstDouble = Op.getValueType() == MVT::f64;
14219     unsigned ConvOp = Signed ?
14220       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14221       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14222     SDValue WidthConst =
14223       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14224                             dl, false);
14225     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14226     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14227     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14228                                          DAG.getVTList(MVT::f64, MVT::Other),
14229                                          Ops, MVT::i8, LDN->getMemOperand());
14230 
14231     // For signed conversion, we need to sign-extend the value in the VSR
14232     if (Signed) {
14233       SDValue ExtOps[] = { Ld, WidthConst };
14234       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14235       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14236     } else
14237       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14238   }
14239 
14240 
14241   // For i32 intermediate values, unfortunately, the conversion functions
14242   // leave the upper 32 bits of the value are undefined. Within the set of
14243   // scalar instructions, we have no method for zero- or sign-extending the
14244   // value. Thus, we cannot handle i32 intermediate values here.
14245   if (Op.getOperand(0).getValueType() == MVT::i32)
14246     return SDValue();
14247 
14248   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14249          "UINT_TO_FP is supported only with FPCVT");
14250 
14251   // If we have FCFIDS, then use it when converting to single-precision.
14252   // Otherwise, convert to double-precision and then round.
14253   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14254                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14255                                                             : PPCISD::FCFIDS)
14256                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14257                                                             : PPCISD::FCFID);
14258   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14259                   ? MVT::f32
14260                   : MVT::f64;
14261 
14262   // If we're converting from a float, to an int, and back to a float again,
14263   // then we don't need the store/load pair at all.
14264   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14265        Subtarget.hasFPCVT()) ||
14266       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14267     SDValue Src = Op.getOperand(0).getOperand(0);
14268     if (Src.getValueType() == MVT::f32) {
14269       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14270       DCI.AddToWorklist(Src.getNode());
14271     } else if (Src.getValueType() != MVT::f64) {
14272       // Make sure that we don't pick up a ppc_fp128 source value.
14273       return SDValue();
14274     }
14275 
14276     unsigned FCTOp =
14277       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14278                                                         PPCISD::FCTIDUZ;
14279 
14280     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14281     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14282 
14283     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14284       FP = DAG.getNode(ISD::FP_ROUND, dl,
14285                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14286       DCI.AddToWorklist(FP.getNode());
14287     }
14288 
14289     return FP;
14290   }
14291 
14292   return SDValue();
14293 }
14294 
14295 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14296 // builtins) into loads with swaps.
14297 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14298                                               DAGCombinerInfo &DCI) const {
14299   SelectionDAG &DAG = DCI.DAG;
14300   SDLoc dl(N);
14301   SDValue Chain;
14302   SDValue Base;
14303   MachineMemOperand *MMO;
14304 
14305   switch (N->getOpcode()) {
14306   default:
14307     llvm_unreachable("Unexpected opcode for little endian VSX load");
14308   case ISD::LOAD: {
14309     LoadSDNode *LD = cast<LoadSDNode>(N);
14310     Chain = LD->getChain();
14311     Base = LD->getBasePtr();
14312     MMO = LD->getMemOperand();
14313     // If the MMO suggests this isn't a load of a full vector, leave
14314     // things alone.  For a built-in, we have to make the change for
14315     // correctness, so if there is a size problem that will be a bug.
14316     if (MMO->getSize() < 16)
14317       return SDValue();
14318     break;
14319   }
14320   case ISD::INTRINSIC_W_CHAIN: {
14321     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14322     Chain = Intrin->getChain();
14323     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14324     // us what we want. Get operand 2 instead.
14325     Base = Intrin->getOperand(2);
14326     MMO = Intrin->getMemOperand();
14327     break;
14328   }
14329   }
14330 
14331   MVT VecTy = N->getValueType(0).getSimpleVT();
14332 
14333   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14334   // aligned and the type is a vector with elements up to 4 bytes
14335   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14336       VecTy.getScalarSizeInBits() <= 32) {
14337     return SDValue();
14338   }
14339 
14340   SDValue LoadOps[] = { Chain, Base };
14341   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14342                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14343                                          LoadOps, MVT::v2f64, MMO);
14344 
14345   DCI.AddToWorklist(Load.getNode());
14346   Chain = Load.getValue(1);
14347   SDValue Swap = DAG.getNode(
14348       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14349   DCI.AddToWorklist(Swap.getNode());
14350 
14351   // Add a bitcast if the resulting load type doesn't match v2f64.
14352   if (VecTy != MVT::v2f64) {
14353     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14354     DCI.AddToWorklist(N.getNode());
14355     // Package {bitcast value, swap's chain} to match Load's shape.
14356     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14357                        N, Swap.getValue(1));
14358   }
14359 
14360   return Swap;
14361 }
14362 
14363 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14364 // builtins) into stores with swaps.
14365 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14366                                                DAGCombinerInfo &DCI) const {
14367   SelectionDAG &DAG = DCI.DAG;
14368   SDLoc dl(N);
14369   SDValue Chain;
14370   SDValue Base;
14371   unsigned SrcOpnd;
14372   MachineMemOperand *MMO;
14373 
14374   switch (N->getOpcode()) {
14375   default:
14376     llvm_unreachable("Unexpected opcode for little endian VSX store");
14377   case ISD::STORE: {
14378     StoreSDNode *ST = cast<StoreSDNode>(N);
14379     Chain = ST->getChain();
14380     Base = ST->getBasePtr();
14381     MMO = ST->getMemOperand();
14382     SrcOpnd = 1;
14383     // If the MMO suggests this isn't a store of a full vector, leave
14384     // things alone.  For a built-in, we have to make the change for
14385     // correctness, so if there is a size problem that will be a bug.
14386     if (MMO->getSize() < 16)
14387       return SDValue();
14388     break;
14389   }
14390   case ISD::INTRINSIC_VOID: {
14391     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14392     Chain = Intrin->getChain();
14393     // Intrin->getBasePtr() oddly does not get what we want.
14394     Base = Intrin->getOperand(3);
14395     MMO = Intrin->getMemOperand();
14396     SrcOpnd = 2;
14397     break;
14398   }
14399   }
14400 
14401   SDValue Src = N->getOperand(SrcOpnd);
14402   MVT VecTy = Src.getValueType().getSimpleVT();
14403 
14404   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14405   // aligned and the type is a vector with elements up to 4 bytes
14406   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14407       VecTy.getScalarSizeInBits() <= 32) {
14408     return SDValue();
14409   }
14410 
14411   // All stores are done as v2f64 and possible bit cast.
14412   if (VecTy != MVT::v2f64) {
14413     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14414     DCI.AddToWorklist(Src.getNode());
14415   }
14416 
14417   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14418                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14419   DCI.AddToWorklist(Swap.getNode());
14420   Chain = Swap.getValue(1);
14421   SDValue StoreOps[] = { Chain, Swap, Base };
14422   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14423                                           DAG.getVTList(MVT::Other),
14424                                           StoreOps, VecTy, MMO);
14425   DCI.AddToWorklist(Store.getNode());
14426   return Store;
14427 }
14428 
14429 // Handle DAG combine for STORE (FP_TO_INT F).
14430 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14431                                                DAGCombinerInfo &DCI) const {
14432 
14433   SelectionDAG &DAG = DCI.DAG;
14434   SDLoc dl(N);
14435   unsigned Opcode = N->getOperand(1).getOpcode();
14436 
14437   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14438          && "Not a FP_TO_INT Instruction!");
14439 
14440   SDValue Val = N->getOperand(1).getOperand(0);
14441   EVT Op1VT = N->getOperand(1).getValueType();
14442   EVT ResVT = Val.getValueType();
14443 
14444   if (!isTypeLegal(ResVT))
14445     return SDValue();
14446 
14447   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14448   bool ValidTypeForStoreFltAsInt =
14449         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14450          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14451 
14452   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14453       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14454     return SDValue();
14455 
14456   // Extend f32 values to f64
14457   if (ResVT.getScalarSizeInBits() == 32) {
14458     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14459     DCI.AddToWorklist(Val.getNode());
14460   }
14461 
14462   // Set signed or unsigned conversion opcode.
14463   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14464                           PPCISD::FP_TO_SINT_IN_VSR :
14465                           PPCISD::FP_TO_UINT_IN_VSR;
14466 
14467   Val = DAG.getNode(ConvOpcode,
14468                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14469   DCI.AddToWorklist(Val.getNode());
14470 
14471   // Set number of bytes being converted.
14472   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14473   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14474                     DAG.getIntPtrConstant(ByteSize, dl, false),
14475                     DAG.getValueType(Op1VT) };
14476 
14477   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14478           DAG.getVTList(MVT::Other), Ops,
14479           cast<StoreSDNode>(N)->getMemoryVT(),
14480           cast<StoreSDNode>(N)->getMemOperand());
14481 
14482   DCI.AddToWorklist(Val.getNode());
14483   return Val;
14484 }
14485 
14486 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14487   // Check that the source of the element keeps flipping
14488   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14489   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14490   for (int i = 1, e = Mask.size(); i < e; i++) {
14491     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14492       return false;
14493     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14494       return false;
14495     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14496   }
14497   return true;
14498 }
14499 
14500 static bool isSplatBV(SDValue Op) {
14501   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14502     return false;
14503   SDValue FirstOp;
14504 
14505   // Find first non-undef input.
14506   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14507     FirstOp = Op.getOperand(i);
14508     if (!FirstOp.isUndef())
14509       break;
14510   }
14511 
14512   // All inputs are undef or the same as the first non-undef input.
14513   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14514     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14515       return false;
14516   return true;
14517 }
14518 
14519 static SDValue isScalarToVec(SDValue Op) {
14520   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14521     return Op;
14522   if (Op.getOpcode() != ISD::BITCAST)
14523     return SDValue();
14524   Op = Op.getOperand(0);
14525   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14526     return Op;
14527   return SDValue();
14528 }
14529 
14530 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14531                                             int LHSMaxIdx, int RHSMinIdx,
14532                                             int RHSMaxIdx, int HalfVec) {
14533   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14534     int Idx = ShuffV[i];
14535     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14536       ShuffV[i] += HalfVec;
14537   }
14538   return;
14539 }
14540 
14541 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14542 // the original is:
14543 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14544 // In such a case, just change the shuffle mask to extract the element
14545 // from the permuted index.
14546 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14547   SDLoc dl(OrigSToV);
14548   EVT VT = OrigSToV.getValueType();
14549   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14550          "Expecting a SCALAR_TO_VECTOR here");
14551   SDValue Input = OrigSToV.getOperand(0);
14552 
14553   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14554     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14555     SDValue OrigVector = Input.getOperand(0);
14556 
14557     // Can't handle non-const element indices or different vector types
14558     // for the input to the extract and the output of the scalar_to_vector.
14559     if (Idx && VT == OrigVector.getValueType()) {
14560       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14561       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14562       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14563     }
14564   }
14565   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14566                      OrigSToV.getOperand(0));
14567 }
14568 
14569 // On little endian subtargets, combine shuffles such as:
14570 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14571 // into:
14572 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14573 // because the latter can be matched to a single instruction merge.
14574 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14575 // to put the value into element zero. Adjust the shuffle mask so that the
14576 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14577 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14578                                                 SelectionDAG &DAG) const {
14579   SDValue LHS = SVN->getOperand(0);
14580   SDValue RHS = SVN->getOperand(1);
14581   auto Mask = SVN->getMask();
14582   int NumElts = LHS.getValueType().getVectorNumElements();
14583   SDValue Res(SVN, 0);
14584   SDLoc dl(SVN);
14585 
14586   // None of these combines are useful on big endian systems since the ISA
14587   // already has a big endian bias.
14588   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14589     return Res;
14590 
14591   // If this is not a shuffle of a shuffle and the first element comes from
14592   // the second vector, canonicalize to the commuted form. This will make it
14593   // more likely to match one of the single instruction patterns.
14594   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14595       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14596     std::swap(LHS, RHS);
14597     Res = DAG.getCommutedVectorShuffle(*SVN);
14598     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14599   }
14600 
14601   // Adjust the shuffle mask if either input vector comes from a
14602   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14603   // form (to prevent the need for a swap).
14604   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14605   SDValue SToVLHS = isScalarToVec(LHS);
14606   SDValue SToVRHS = isScalarToVec(RHS);
14607   if (SToVLHS || SToVRHS) {
14608     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14609                             : SToVRHS.getValueType().getVectorNumElements();
14610     int NumEltsOut = ShuffV.size();
14611 
14612     // Initially assume that neither input is permuted. These will be adjusted
14613     // accordingly if either input is.
14614     int LHSMaxIdx = -1;
14615     int RHSMinIdx = -1;
14616     int RHSMaxIdx = -1;
14617     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14618 
14619     // Get the permuted scalar to vector nodes for the source(s) that come from
14620     // ISD::SCALAR_TO_VECTOR.
14621     if (SToVLHS) {
14622       // Set up the values for the shuffle vector fixup.
14623       LHSMaxIdx = NumEltsOut / NumEltsIn;
14624       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14625       if (SToVLHS.getValueType() != LHS.getValueType())
14626         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14627       LHS = SToVLHS;
14628     }
14629     if (SToVRHS) {
14630       RHSMinIdx = NumEltsOut;
14631       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14632       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14633       if (SToVRHS.getValueType() != RHS.getValueType())
14634         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14635       RHS = SToVRHS;
14636     }
14637 
14638     // Fix up the shuffle mask to reflect where the desired element actually is.
14639     // The minimum and maximum indices that correspond to element zero for both
14640     // the LHS and RHS are computed and will control which shuffle mask entries
14641     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14642     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14643     // HalfVec to refer to the corresponding element in the permuted vector.
14644     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14645                                     HalfVec);
14646     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14647 
14648     // We may have simplified away the shuffle. We won't be able to do anything
14649     // further with it here.
14650     if (!isa<ShuffleVectorSDNode>(Res))
14651       return Res;
14652     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14653   }
14654 
14655   // The common case after we commuted the shuffle is that the RHS is a splat
14656   // and we have elements coming in from the splat at indices that are not
14657   // conducive to using a merge.
14658   // Example:
14659   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14660   if (!isSplatBV(RHS))
14661     return Res;
14662 
14663   // We are looking for a mask such that all even elements are from
14664   // one vector and all odd elements from the other.
14665   if (!isAlternatingShuffMask(Mask, NumElts))
14666     return Res;
14667 
14668   // Adjust the mask so we are pulling in the same index from the splat
14669   // as the index from the interesting vector in consecutive elements.
14670   // Example (even elements from first vector):
14671   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14672   if (Mask[0] < NumElts)
14673     for (int i = 1, e = Mask.size(); i < e; i += 2)
14674       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14675   // Example (odd elements from first vector):
14676   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14677   else
14678     for (int i = 0, e = Mask.size(); i < e; i += 2)
14679       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14680 
14681   // If the RHS has undefs, we need to remove them since we may have created
14682   // a shuffle that adds those instead of the splat value.
14683   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14684   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14685 
14686   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14687   return Res;
14688 }
14689 
14690 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14691                                                 LSBaseSDNode *LSBase,
14692                                                 DAGCombinerInfo &DCI) const {
14693   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14694         "Not a reverse memop pattern!");
14695 
14696   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14697     auto Mask = SVN->getMask();
14698     int i = 0;
14699     auto I = Mask.rbegin();
14700     auto E = Mask.rend();
14701 
14702     for (; I != E; ++I) {
14703       if (*I != i)
14704         return false;
14705       i++;
14706     }
14707     return true;
14708   };
14709 
14710   SelectionDAG &DAG = DCI.DAG;
14711   EVT VT = SVN->getValueType(0);
14712 
14713   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14714     return SDValue();
14715 
14716   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14717   // See comment in PPCVSXSwapRemoval.cpp.
14718   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14719   if (!Subtarget.hasP9Vector())
14720     return SDValue();
14721 
14722   if(!IsElementReverse(SVN))
14723     return SDValue();
14724 
14725   if (LSBase->getOpcode() == ISD::LOAD) {
14726     SDLoc dl(SVN);
14727     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14728     return DAG.getMemIntrinsicNode(
14729         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14730         LSBase->getMemoryVT(), LSBase->getMemOperand());
14731   }
14732 
14733   if (LSBase->getOpcode() == ISD::STORE) {
14734     SDLoc dl(LSBase);
14735     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14736                           LSBase->getBasePtr()};
14737     return DAG.getMemIntrinsicNode(
14738         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14739         LSBase->getMemoryVT(), LSBase->getMemOperand());
14740   }
14741 
14742   llvm_unreachable("Expected a load or store node here");
14743 }
14744 
14745 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14746                                              DAGCombinerInfo &DCI) const {
14747   SelectionDAG &DAG = DCI.DAG;
14748   SDLoc dl(N);
14749   switch (N->getOpcode()) {
14750   default: break;
14751   case ISD::ADD:
14752     return combineADD(N, DCI);
14753   case ISD::SHL:
14754     return combineSHL(N, DCI);
14755   case ISD::SRA:
14756     return combineSRA(N, DCI);
14757   case ISD::SRL:
14758     return combineSRL(N, DCI);
14759   case ISD::MUL:
14760     return combineMUL(N, DCI);
14761   case ISD::FMA:
14762   case PPCISD::FNMSUB:
14763     return combineFMALike(N, DCI);
14764   case PPCISD::SHL:
14765     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14766         return N->getOperand(0);
14767     break;
14768   case PPCISD::SRL:
14769     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14770         return N->getOperand(0);
14771     break;
14772   case PPCISD::SRA:
14773     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14774       if (C->isNullValue() ||   //  0 >>s V -> 0.
14775           C->isAllOnesValue())    // -1 >>s V -> -1.
14776         return N->getOperand(0);
14777     }
14778     break;
14779   case ISD::SIGN_EXTEND:
14780   case ISD::ZERO_EXTEND:
14781   case ISD::ANY_EXTEND:
14782     return DAGCombineExtBoolTrunc(N, DCI);
14783   case ISD::TRUNCATE:
14784     return combineTRUNCATE(N, DCI);
14785   case ISD::SETCC:
14786     if (SDValue CSCC = combineSetCC(N, DCI))
14787       return CSCC;
14788     LLVM_FALLTHROUGH;
14789   case ISD::SELECT_CC:
14790     return DAGCombineTruncBoolExt(N, DCI);
14791   case ISD::SINT_TO_FP:
14792   case ISD::UINT_TO_FP:
14793     return combineFPToIntToFP(N, DCI);
14794   case ISD::VECTOR_SHUFFLE:
14795     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14796       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14797       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14798     }
14799     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14800   case ISD::STORE: {
14801 
14802     EVT Op1VT = N->getOperand(1).getValueType();
14803     unsigned Opcode = N->getOperand(1).getOpcode();
14804 
14805     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14806       SDValue Val= combineStoreFPToInt(N, DCI);
14807       if (Val)
14808         return Val;
14809     }
14810 
14811     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14812       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14813       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14814       if (Val)
14815         return Val;
14816     }
14817 
14818     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14819     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14820         N->getOperand(1).getNode()->hasOneUse() &&
14821         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14822          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14823 
14824       // STBRX can only handle simple types and it makes no sense to store less
14825       // two bytes in byte-reversed order.
14826       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14827       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14828         break;
14829 
14830       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14831       // Do an any-extend to 32-bits if this is a half-word input.
14832       if (BSwapOp.getValueType() == MVT::i16)
14833         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14834 
14835       // If the type of BSWAP operand is wider than stored memory width
14836       // it need to be shifted to the right side before STBRX.
14837       if (Op1VT.bitsGT(mVT)) {
14838         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14839         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14840                               DAG.getConstant(Shift, dl, MVT::i32));
14841         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14842         if (Op1VT == MVT::i64)
14843           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14844       }
14845 
14846       SDValue Ops[] = {
14847         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14848       };
14849       return
14850         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14851                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14852                                 cast<StoreSDNode>(N)->getMemOperand());
14853     }
14854 
14855     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14856     // So it can increase the chance of CSE constant construction.
14857     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14858         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14859       // Need to sign-extended to 64-bits to handle negative values.
14860       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14861       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14862                                     MemVT.getSizeInBits());
14863       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14864 
14865       // DAG.getTruncStore() can't be used here because it doesn't accept
14866       // the general (base + offset) addressing mode.
14867       // So we use UpdateNodeOperands and setTruncatingStore instead.
14868       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14869                              N->getOperand(3));
14870       cast<StoreSDNode>(N)->setTruncatingStore(true);
14871       return SDValue(N, 0);
14872     }
14873 
14874     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14875     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14876     if (Op1VT.isSimple()) {
14877       MVT StoreVT = Op1VT.getSimpleVT();
14878       if (Subtarget.needsSwapsForVSXMemOps() &&
14879           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14880            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14881         return expandVSXStoreForLE(N, DCI);
14882     }
14883     break;
14884   }
14885   case ISD::LOAD: {
14886     LoadSDNode *LD = cast<LoadSDNode>(N);
14887     EVT VT = LD->getValueType(0);
14888 
14889     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14890     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14891     if (VT.isSimple()) {
14892       MVT LoadVT = VT.getSimpleVT();
14893       if (Subtarget.needsSwapsForVSXMemOps() &&
14894           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14895            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14896         return expandVSXLoadForLE(N, DCI);
14897     }
14898 
14899     // We sometimes end up with a 64-bit integer load, from which we extract
14900     // two single-precision floating-point numbers. This happens with
14901     // std::complex<float>, and other similar structures, because of the way we
14902     // canonicalize structure copies. However, if we lack direct moves,
14903     // then the final bitcasts from the extracted integer values to the
14904     // floating-point numbers turn into store/load pairs. Even with direct moves,
14905     // just loading the two floating-point numbers is likely better.
14906     auto ReplaceTwoFloatLoad = [&]() {
14907       if (VT != MVT::i64)
14908         return false;
14909 
14910       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14911           LD->isVolatile())
14912         return false;
14913 
14914       //  We're looking for a sequence like this:
14915       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14916       //      t16: i64 = srl t13, Constant:i32<32>
14917       //    t17: i32 = truncate t16
14918       //  t18: f32 = bitcast t17
14919       //    t19: i32 = truncate t13
14920       //  t20: f32 = bitcast t19
14921 
14922       if (!LD->hasNUsesOfValue(2, 0))
14923         return false;
14924 
14925       auto UI = LD->use_begin();
14926       while (UI.getUse().getResNo() != 0) ++UI;
14927       SDNode *Trunc = *UI++;
14928       while (UI.getUse().getResNo() != 0) ++UI;
14929       SDNode *RightShift = *UI;
14930       if (Trunc->getOpcode() != ISD::TRUNCATE)
14931         std::swap(Trunc, RightShift);
14932 
14933       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14934           Trunc->getValueType(0) != MVT::i32 ||
14935           !Trunc->hasOneUse())
14936         return false;
14937       if (RightShift->getOpcode() != ISD::SRL ||
14938           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14939           RightShift->getConstantOperandVal(1) != 32 ||
14940           !RightShift->hasOneUse())
14941         return false;
14942 
14943       SDNode *Trunc2 = *RightShift->use_begin();
14944       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14945           Trunc2->getValueType(0) != MVT::i32 ||
14946           !Trunc2->hasOneUse())
14947         return false;
14948 
14949       SDNode *Bitcast = *Trunc->use_begin();
14950       SDNode *Bitcast2 = *Trunc2->use_begin();
14951 
14952       if (Bitcast->getOpcode() != ISD::BITCAST ||
14953           Bitcast->getValueType(0) != MVT::f32)
14954         return false;
14955       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14956           Bitcast2->getValueType(0) != MVT::f32)
14957         return false;
14958 
14959       if (Subtarget.isLittleEndian())
14960         std::swap(Bitcast, Bitcast2);
14961 
14962       // Bitcast has the second float (in memory-layout order) and Bitcast2
14963       // has the first one.
14964 
14965       SDValue BasePtr = LD->getBasePtr();
14966       if (LD->isIndexed()) {
14967         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14968                "Non-pre-inc AM on PPC?");
14969         BasePtr =
14970           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14971                       LD->getOffset());
14972       }
14973 
14974       auto MMOFlags =
14975           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14976       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14977                                       LD->getPointerInfo(), LD->getAlignment(),
14978                                       MMOFlags, LD->getAAInfo());
14979       SDValue AddPtr =
14980         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14981                     BasePtr, DAG.getIntPtrConstant(4, dl));
14982       SDValue FloatLoad2 = DAG.getLoad(
14983           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14984           LD->getPointerInfo().getWithOffset(4),
14985           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14986 
14987       if (LD->isIndexed()) {
14988         // Note that DAGCombine should re-form any pre-increment load(s) from
14989         // what is produced here if that makes sense.
14990         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14991       }
14992 
14993       DCI.CombineTo(Bitcast2, FloatLoad);
14994       DCI.CombineTo(Bitcast, FloatLoad2);
14995 
14996       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14997                                     SDValue(FloatLoad2.getNode(), 1));
14998       return true;
14999     };
15000 
15001     if (ReplaceTwoFloatLoad())
15002       return SDValue(N, 0);
15003 
15004     EVT MemVT = LD->getMemoryVT();
15005     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15006     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15007     if (LD->isUnindexed() && VT.isVector() &&
15008         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15009           // P8 and later hardware should just use LOAD.
15010           !Subtarget.hasP8Vector() &&
15011           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15012            VT == MVT::v4f32))) &&
15013         LD->getAlign() < ABIAlignment) {
15014       // This is a type-legal unaligned Altivec load.
15015       SDValue Chain = LD->getChain();
15016       SDValue Ptr = LD->getBasePtr();
15017       bool isLittleEndian = Subtarget.isLittleEndian();
15018 
15019       // This implements the loading of unaligned vectors as described in
15020       // the venerable Apple Velocity Engine overview. Specifically:
15021       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15022       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15023       //
15024       // The general idea is to expand a sequence of one or more unaligned
15025       // loads into an alignment-based permutation-control instruction (lvsl
15026       // or lvsr), a series of regular vector loads (which always truncate
15027       // their input address to an aligned address), and a series of
15028       // permutations.  The results of these permutations are the requested
15029       // loaded values.  The trick is that the last "extra" load is not taken
15030       // from the address you might suspect (sizeof(vector) bytes after the
15031       // last requested load), but rather sizeof(vector) - 1 bytes after the
15032       // last requested vector. The point of this is to avoid a page fault if
15033       // the base address happened to be aligned. This works because if the
15034       // base address is aligned, then adding less than a full vector length
15035       // will cause the last vector in the sequence to be (re)loaded.
15036       // Otherwise, the next vector will be fetched as you might suspect was
15037       // necessary.
15038 
15039       // We might be able to reuse the permutation generation from
15040       // a different base address offset from this one by an aligned amount.
15041       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15042       // optimization later.
15043       Intrinsic::ID Intr, IntrLD, IntrPerm;
15044       MVT PermCntlTy, PermTy, LDTy;
15045       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15046                             : Intrinsic::ppc_altivec_lvsl;
15047       IntrLD = Intrinsic::ppc_altivec_lvx;
15048       IntrPerm = Intrinsic::ppc_altivec_vperm;
15049       PermCntlTy = MVT::v16i8;
15050       PermTy = MVT::v4i32;
15051       LDTy = MVT::v4i32;
15052 
15053       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15054 
15055       // Create the new MMO for the new base load. It is like the original MMO,
15056       // but represents an area in memory almost twice the vector size centered
15057       // on the original address. If the address is unaligned, we might start
15058       // reading up to (sizeof(vector)-1) bytes below the address of the
15059       // original unaligned load.
15060       MachineFunction &MF = DAG.getMachineFunction();
15061       MachineMemOperand *BaseMMO =
15062         MF.getMachineMemOperand(LD->getMemOperand(),
15063                                 -(long)MemVT.getStoreSize()+1,
15064                                 2*MemVT.getStoreSize()-1);
15065 
15066       // Create the new base load.
15067       SDValue LDXIntID =
15068           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15069       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15070       SDValue BaseLoad =
15071         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15072                                 DAG.getVTList(PermTy, MVT::Other),
15073                                 BaseLoadOps, LDTy, BaseMMO);
15074 
15075       // Note that the value of IncOffset (which is provided to the next
15076       // load's pointer info offset value, and thus used to calculate the
15077       // alignment), and the value of IncValue (which is actually used to
15078       // increment the pointer value) are different! This is because we
15079       // require the next load to appear to be aligned, even though it
15080       // is actually offset from the base pointer by a lesser amount.
15081       int IncOffset = VT.getSizeInBits() / 8;
15082       int IncValue = IncOffset;
15083 
15084       // Walk (both up and down) the chain looking for another load at the real
15085       // (aligned) offset (the alignment of the other load does not matter in
15086       // this case). If found, then do not use the offset reduction trick, as
15087       // that will prevent the loads from being later combined (as they would
15088       // otherwise be duplicates).
15089       if (!findConsecutiveLoad(LD, DAG))
15090         --IncValue;
15091 
15092       SDValue Increment =
15093           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15094       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15095 
15096       MachineMemOperand *ExtraMMO =
15097         MF.getMachineMemOperand(LD->getMemOperand(),
15098                                 1, 2*MemVT.getStoreSize()-1);
15099       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15100       SDValue ExtraLoad =
15101         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15102                                 DAG.getVTList(PermTy, MVT::Other),
15103                                 ExtraLoadOps, LDTy, ExtraMMO);
15104 
15105       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15106         BaseLoad.getValue(1), ExtraLoad.getValue(1));
15107 
15108       // Because vperm has a big-endian bias, we must reverse the order
15109       // of the input vectors and complement the permute control vector
15110       // when generating little endian code.  We have already handled the
15111       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15112       // and ExtraLoad here.
15113       SDValue Perm;
15114       if (isLittleEndian)
15115         Perm = BuildIntrinsicOp(IntrPerm,
15116                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15117       else
15118         Perm = BuildIntrinsicOp(IntrPerm,
15119                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15120 
15121       if (VT != PermTy)
15122         Perm = Subtarget.hasAltivec()
15123                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
15124                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
15125                                  DAG.getTargetConstant(1, dl, MVT::i64));
15126                                // second argument is 1 because this rounding
15127                                // is always exact.
15128 
15129       // The output of the permutation is our loaded result, the TokenFactor is
15130       // our new chain.
15131       DCI.CombineTo(N, Perm, TF);
15132       return SDValue(N, 0);
15133     }
15134     }
15135     break;
15136     case ISD::INTRINSIC_WO_CHAIN: {
15137       bool isLittleEndian = Subtarget.isLittleEndian();
15138       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15139       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15140                                            : Intrinsic::ppc_altivec_lvsl);
15141       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
15142         SDValue Add = N->getOperand(1);
15143 
15144         int Bits = 4 /* 16 byte alignment */;
15145 
15146         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15147                                   APInt::getAllOnesValue(Bits /* alignment */)
15148                                       .zext(Add.getScalarValueSizeInBits()))) {
15149           SDNode *BasePtr = Add->getOperand(0).getNode();
15150           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15151                                     UE = BasePtr->use_end();
15152                UI != UE; ++UI) {
15153             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15154                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
15155                     IID) {
15156               // We've found another LVSL/LVSR, and this address is an aligned
15157               // multiple of that one. The results will be the same, so use the
15158               // one we've just found instead.
15159 
15160               return SDValue(*UI, 0);
15161             }
15162           }
15163         }
15164 
15165         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15166           SDNode *BasePtr = Add->getOperand(0).getNode();
15167           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15168                UE = BasePtr->use_end(); UI != UE; ++UI) {
15169             if (UI->getOpcode() == ISD::ADD &&
15170                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15171                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15172                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15173                 (1ULL << Bits) == 0) {
15174               SDNode *OtherAdd = *UI;
15175               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15176                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15177                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15178                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15179                   return SDValue(*VI, 0);
15180                 }
15181               }
15182             }
15183           }
15184         }
15185       }
15186 
15187       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15188       // Expose the vabsduw/h/b opportunity for down stream
15189       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15190           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15191            IID == Intrinsic::ppc_altivec_vmaxsh ||
15192            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15193         SDValue V1 = N->getOperand(1);
15194         SDValue V2 = N->getOperand(2);
15195         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15196              V1.getSimpleValueType() == MVT::v8i16 ||
15197              V1.getSimpleValueType() == MVT::v16i8) &&
15198             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15199           // (0-a, a)
15200           if (V1.getOpcode() == ISD::SUB &&
15201               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15202               V1.getOperand(1) == V2) {
15203             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15204           }
15205           // (a, 0-a)
15206           if (V2.getOpcode() == ISD::SUB &&
15207               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15208               V2.getOperand(1) == V1) {
15209             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15210           }
15211           // (x-y, y-x)
15212           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15213               V1.getOperand(0) == V2.getOperand(1) &&
15214               V1.getOperand(1) == V2.getOperand(0)) {
15215             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15216           }
15217         }
15218       }
15219     }
15220 
15221     break;
15222   case ISD::INTRINSIC_W_CHAIN:
15223     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15224     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15225     if (Subtarget.needsSwapsForVSXMemOps()) {
15226       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15227       default:
15228         break;
15229       case Intrinsic::ppc_vsx_lxvw4x:
15230       case Intrinsic::ppc_vsx_lxvd2x:
15231         return expandVSXLoadForLE(N, DCI);
15232       }
15233     }
15234     break;
15235   case ISD::INTRINSIC_VOID:
15236     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15237     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15238     if (Subtarget.needsSwapsForVSXMemOps()) {
15239       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15240       default:
15241         break;
15242       case Intrinsic::ppc_vsx_stxvw4x:
15243       case Intrinsic::ppc_vsx_stxvd2x:
15244         return expandVSXStoreForLE(N, DCI);
15245       }
15246     }
15247     break;
15248   case ISD::BSWAP:
15249     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15250     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15251         N->getOperand(0).hasOneUse() &&
15252         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15253          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15254           N->getValueType(0) == MVT::i64))) {
15255       SDValue Load = N->getOperand(0);
15256       LoadSDNode *LD = cast<LoadSDNode>(Load);
15257       // Create the byte-swapping load.
15258       SDValue Ops[] = {
15259         LD->getChain(),    // Chain
15260         LD->getBasePtr(),  // Ptr
15261         DAG.getValueType(N->getValueType(0)) // VT
15262       };
15263       SDValue BSLoad =
15264         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15265                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15266                                               MVT::i64 : MVT::i32, MVT::Other),
15267                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15268 
15269       // If this is an i16 load, insert the truncate.
15270       SDValue ResVal = BSLoad;
15271       if (N->getValueType(0) == MVT::i16)
15272         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15273 
15274       // First, combine the bswap away.  This makes the value produced by the
15275       // load dead.
15276       DCI.CombineTo(N, ResVal);
15277 
15278       // Next, combine the load away, we give it a bogus result value but a real
15279       // chain result.  The result value is dead because the bswap is dead.
15280       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15281 
15282       // Return N so it doesn't get rechecked!
15283       return SDValue(N, 0);
15284     }
15285     break;
15286   case PPCISD::VCMP:
15287     // If a VCMP_rec node already exists with exactly the same operands as this
15288     // node, use its result instead of this node (VCMP_rec computes both a CR6
15289     // and a normal output).
15290     //
15291     if (!N->getOperand(0).hasOneUse() &&
15292         !N->getOperand(1).hasOneUse() &&
15293         !N->getOperand(2).hasOneUse()) {
15294 
15295       // Scan all of the users of the LHS, looking for VCMP_rec's that match.
15296       SDNode *VCMPrecNode = nullptr;
15297 
15298       SDNode *LHSN = N->getOperand(0).getNode();
15299       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15300            UI != E; ++UI)
15301         if (UI->getOpcode() == PPCISD::VCMP_rec &&
15302             UI->getOperand(1) == N->getOperand(1) &&
15303             UI->getOperand(2) == N->getOperand(2) &&
15304             UI->getOperand(0) == N->getOperand(0)) {
15305           VCMPrecNode = *UI;
15306           break;
15307         }
15308 
15309       // If there is no VCMP_rec node, or if the flag value has a single use,
15310       // don't transform this.
15311       if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
15312         break;
15313 
15314       // Look at the (necessarily single) use of the flag value.  If it has a
15315       // chain, this transformation is more complex.  Note that multiple things
15316       // could use the value result, which we should ignore.
15317       SDNode *FlagUser = nullptr;
15318       for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
15319            FlagUser == nullptr; ++UI) {
15320         assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
15321         SDNode *User = *UI;
15322         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15323           if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
15324             FlagUser = User;
15325             break;
15326           }
15327         }
15328       }
15329 
15330       // If the user is a MFOCRF instruction, we know this is safe.
15331       // Otherwise we give up for right now.
15332       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15333         return SDValue(VCMPrecNode, 0);
15334     }
15335     break;
15336   case ISD::BRCOND: {
15337     SDValue Cond = N->getOperand(1);
15338     SDValue Target = N->getOperand(2);
15339 
15340     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15341         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15342           Intrinsic::loop_decrement) {
15343 
15344       // We now need to make the intrinsic dead (it cannot be instruction
15345       // selected).
15346       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15347       assert(Cond.getNode()->hasOneUse() &&
15348              "Counter decrement has more than one use");
15349 
15350       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15351                          N->getOperand(0), Target);
15352     }
15353   }
15354   break;
15355   case ISD::BR_CC: {
15356     // If this is a branch on an altivec predicate comparison, lower this so
15357     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15358     // lowering is done pre-legalize, because the legalizer lowers the predicate
15359     // compare down to code that is difficult to reassemble.
15360     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15361     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15362 
15363     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15364     // value. If so, pass-through the AND to get to the intrinsic.
15365     if (LHS.getOpcode() == ISD::AND &&
15366         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15367         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15368           Intrinsic::loop_decrement &&
15369         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15370         !isNullConstant(LHS.getOperand(1)))
15371       LHS = LHS.getOperand(0);
15372 
15373     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15374         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15375           Intrinsic::loop_decrement &&
15376         isa<ConstantSDNode>(RHS)) {
15377       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15378              "Counter decrement comparison is not EQ or NE");
15379 
15380       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15381       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15382                     (CC == ISD::SETNE && !Val);
15383 
15384       // We now need to make the intrinsic dead (it cannot be instruction
15385       // selected).
15386       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15387       assert(LHS.getNode()->hasOneUse() &&
15388              "Counter decrement has more than one use");
15389 
15390       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15391                          N->getOperand(0), N->getOperand(4));
15392     }
15393 
15394     int CompareOpc;
15395     bool isDot;
15396 
15397     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15398         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15399         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15400       assert(isDot && "Can't compare against a vector result!");
15401 
15402       // If this is a comparison against something other than 0/1, then we know
15403       // that the condition is never/always true.
15404       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15405       if (Val != 0 && Val != 1) {
15406         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15407           return N->getOperand(0);
15408         // Always !=, turn it into an unconditional branch.
15409         return DAG.getNode(ISD::BR, dl, MVT::Other,
15410                            N->getOperand(0), N->getOperand(4));
15411       }
15412 
15413       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15414 
15415       // Create the PPCISD altivec 'dot' comparison node.
15416       SDValue Ops[] = {
15417         LHS.getOperand(2),  // LHS of compare
15418         LHS.getOperand(3),  // RHS of compare
15419         DAG.getConstant(CompareOpc, dl, MVT::i32)
15420       };
15421       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15422       SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
15423 
15424       // Unpack the result based on how the target uses it.
15425       PPC::Predicate CompOpc;
15426       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15427       default:  // Can't happen, don't crash on invalid number though.
15428       case 0:   // Branch on the value of the EQ bit of CR6.
15429         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15430         break;
15431       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15432         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15433         break;
15434       case 2:   // Branch on the value of the LT bit of CR6.
15435         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15436         break;
15437       case 3:   // Branch on the inverted value of the LT bit of CR6.
15438         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15439         break;
15440       }
15441 
15442       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15443                          DAG.getConstant(CompOpc, dl, MVT::i32),
15444                          DAG.getRegister(PPC::CR6, MVT::i32),
15445                          N->getOperand(4), CompNode.getValue(1));
15446     }
15447     break;
15448   }
15449   case ISD::BUILD_VECTOR:
15450     return DAGCombineBuildVector(N, DCI);
15451   case ISD::ABS:
15452     return combineABS(N, DCI);
15453   case ISD::VSELECT:
15454     return combineVSelect(N, DCI);
15455   }
15456 
15457   return SDValue();
15458 }
15459 
15460 SDValue
15461 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15462                                  SelectionDAG &DAG,
15463                                  SmallVectorImpl<SDNode *> &Created) const {
15464   // fold (sdiv X, pow2)
15465   EVT VT = N->getValueType(0);
15466   if (VT == MVT::i64 && !Subtarget.isPPC64())
15467     return SDValue();
15468   if ((VT != MVT::i32 && VT != MVT::i64) ||
15469       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15470     return SDValue();
15471 
15472   SDLoc DL(N);
15473   SDValue N0 = N->getOperand(0);
15474 
15475   bool IsNegPow2 = (-Divisor).isPowerOf2();
15476   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15477   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15478 
15479   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15480   Created.push_back(Op.getNode());
15481 
15482   if (IsNegPow2) {
15483     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15484     Created.push_back(Op.getNode());
15485   }
15486 
15487   return Op;
15488 }
15489 
15490 //===----------------------------------------------------------------------===//
15491 // Inline Assembly Support
15492 //===----------------------------------------------------------------------===//
15493 
15494 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15495                                                       KnownBits &Known,
15496                                                       const APInt &DemandedElts,
15497                                                       const SelectionDAG &DAG,
15498                                                       unsigned Depth) const {
15499   Known.resetAll();
15500   switch (Op.getOpcode()) {
15501   default: break;
15502   case PPCISD::LBRX: {
15503     // lhbrx is known to have the top bits cleared out.
15504     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15505       Known.Zero = 0xFFFF0000;
15506     break;
15507   }
15508   case ISD::INTRINSIC_WO_CHAIN: {
15509     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15510     default: break;
15511     case Intrinsic::ppc_altivec_vcmpbfp_p:
15512     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15513     case Intrinsic::ppc_altivec_vcmpequb_p:
15514     case Intrinsic::ppc_altivec_vcmpequh_p:
15515     case Intrinsic::ppc_altivec_vcmpequw_p:
15516     case Intrinsic::ppc_altivec_vcmpequd_p:
15517     case Intrinsic::ppc_altivec_vcmpequq_p:
15518     case Intrinsic::ppc_altivec_vcmpgefp_p:
15519     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15520     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15521     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15522     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15523     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15524     case Intrinsic::ppc_altivec_vcmpgtsq_p:
15525     case Intrinsic::ppc_altivec_vcmpgtub_p:
15526     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15527     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15528     case Intrinsic::ppc_altivec_vcmpgtud_p:
15529     case Intrinsic::ppc_altivec_vcmpgtuq_p:
15530       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15531       break;
15532     }
15533   }
15534   }
15535 }
15536 
15537 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15538   switch (Subtarget.getCPUDirective()) {
15539   default: break;
15540   case PPC::DIR_970:
15541   case PPC::DIR_PWR4:
15542   case PPC::DIR_PWR5:
15543   case PPC::DIR_PWR5X:
15544   case PPC::DIR_PWR6:
15545   case PPC::DIR_PWR6X:
15546   case PPC::DIR_PWR7:
15547   case PPC::DIR_PWR8:
15548   case PPC::DIR_PWR9:
15549   case PPC::DIR_PWR10:
15550   case PPC::DIR_PWR_FUTURE: {
15551     if (!ML)
15552       break;
15553 
15554     if (!DisableInnermostLoopAlign32) {
15555       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15556       // so that we can decrease cache misses and branch-prediction misses.
15557       // Actual alignment of the loop will depend on the hotness check and other
15558       // logic in alignBlocks.
15559       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15560         return Align(32);
15561     }
15562 
15563     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15564 
15565     // For small loops (between 5 and 8 instructions), align to a 32-byte
15566     // boundary so that the entire loop fits in one instruction-cache line.
15567     uint64_t LoopSize = 0;
15568     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15569       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15570         LoopSize += TII->getInstSizeInBytes(*J);
15571         if (LoopSize > 32)
15572           break;
15573       }
15574 
15575     if (LoopSize > 16 && LoopSize <= 32)
15576       return Align(32);
15577 
15578     break;
15579   }
15580   }
15581 
15582   return TargetLowering::getPrefLoopAlignment(ML);
15583 }
15584 
15585 /// getConstraintType - Given a constraint, return the type of
15586 /// constraint it is for this target.
15587 PPCTargetLowering::ConstraintType
15588 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15589   if (Constraint.size() == 1) {
15590     switch (Constraint[0]) {
15591     default: break;
15592     case 'b':
15593     case 'r':
15594     case 'f':
15595     case 'd':
15596     case 'v':
15597     case 'y':
15598       return C_RegisterClass;
15599     case 'Z':
15600       // FIXME: While Z does indicate a memory constraint, it specifically
15601       // indicates an r+r address (used in conjunction with the 'y' modifier
15602       // in the replacement string). Currently, we're forcing the base
15603       // register to be r0 in the asm printer (which is interpreted as zero)
15604       // and forming the complete address in the second register. This is
15605       // suboptimal.
15606       return C_Memory;
15607     }
15608   } else if (Constraint == "wc") { // individual CR bits.
15609     return C_RegisterClass;
15610   } else if (Constraint == "wa" || Constraint == "wd" ||
15611              Constraint == "wf" || Constraint == "ws" ||
15612              Constraint == "wi" || Constraint == "ww") {
15613     return C_RegisterClass; // VSX registers.
15614   }
15615   return TargetLowering::getConstraintType(Constraint);
15616 }
15617 
15618 /// Examine constraint type and operand type and determine a weight value.
15619 /// This object must already have been set up with the operand type
15620 /// and the current alternative constraint selected.
15621 TargetLowering::ConstraintWeight
15622 PPCTargetLowering::getSingleConstraintMatchWeight(
15623     AsmOperandInfo &info, const char *constraint) const {
15624   ConstraintWeight weight = CW_Invalid;
15625   Value *CallOperandVal = info.CallOperandVal;
15626     // If we don't have a value, we can't do a match,
15627     // but allow it at the lowest weight.
15628   if (!CallOperandVal)
15629     return CW_Default;
15630   Type *type = CallOperandVal->getType();
15631 
15632   // Look at the constraint type.
15633   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15634     return CW_Register; // an individual CR bit.
15635   else if ((StringRef(constraint) == "wa" ||
15636             StringRef(constraint) == "wd" ||
15637             StringRef(constraint) == "wf") &&
15638            type->isVectorTy())
15639     return CW_Register;
15640   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15641     return CW_Register; // just hold 64-bit integers data.
15642   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15643     return CW_Register;
15644   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15645     return CW_Register;
15646 
15647   switch (*constraint) {
15648   default:
15649     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15650     break;
15651   case 'b':
15652     if (type->isIntegerTy())
15653       weight = CW_Register;
15654     break;
15655   case 'f':
15656     if (type->isFloatTy())
15657       weight = CW_Register;
15658     break;
15659   case 'd':
15660     if (type->isDoubleTy())
15661       weight = CW_Register;
15662     break;
15663   case 'v':
15664     if (type->isVectorTy())
15665       weight = CW_Register;
15666     break;
15667   case 'y':
15668     weight = CW_Register;
15669     break;
15670   case 'Z':
15671     weight = CW_Memory;
15672     break;
15673   }
15674   return weight;
15675 }
15676 
15677 std::pair<unsigned, const TargetRegisterClass *>
15678 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15679                                                 StringRef Constraint,
15680                                                 MVT VT) const {
15681   if (Constraint.size() == 1) {
15682     // GCC RS6000 Constraint Letters
15683     switch (Constraint[0]) {
15684     case 'b':   // R1-R31
15685       if (VT == MVT::i64 && Subtarget.isPPC64())
15686         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15687       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15688     case 'r':   // R0-R31
15689       if (VT == MVT::i64 && Subtarget.isPPC64())
15690         return std::make_pair(0U, &PPC::G8RCRegClass);
15691       return std::make_pair(0U, &PPC::GPRCRegClass);
15692     // 'd' and 'f' constraints are both defined to be "the floating point
15693     // registers", where one is for 32-bit and the other for 64-bit. We don't
15694     // really care overly much here so just give them all the same reg classes.
15695     case 'd':
15696     case 'f':
15697       if (Subtarget.hasSPE()) {
15698         if (VT == MVT::f32 || VT == MVT::i32)
15699           return std::make_pair(0U, &PPC::GPRCRegClass);
15700         if (VT == MVT::f64 || VT == MVT::i64)
15701           return std::make_pair(0U, &PPC::SPERCRegClass);
15702       } else {
15703         if (VT == MVT::f32 || VT == MVT::i32)
15704           return std::make_pair(0U, &PPC::F4RCRegClass);
15705         if (VT == MVT::f64 || VT == MVT::i64)
15706           return std::make_pair(0U, &PPC::F8RCRegClass);
15707       }
15708       break;
15709     case 'v':
15710       if (Subtarget.hasAltivec())
15711         return std::make_pair(0U, &PPC::VRRCRegClass);
15712       break;
15713     case 'y':   // crrc
15714       return std::make_pair(0U, &PPC::CRRCRegClass);
15715     }
15716   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15717     // An individual CR bit.
15718     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15719   } else if ((Constraint == "wa" || Constraint == "wd" ||
15720              Constraint == "wf" || Constraint == "wi") &&
15721              Subtarget.hasVSX()) {
15722     return std::make_pair(0U, &PPC::VSRCRegClass);
15723   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15724     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15725       return std::make_pair(0U, &PPC::VSSRCRegClass);
15726     else
15727       return std::make_pair(0U, &PPC::VSFRCRegClass);
15728   }
15729 
15730   // If we name a VSX register, we can't defer to the base class because it
15731   // will not recognize the correct register (their names will be VSL{0-31}
15732   // and V{0-31} so they won't match). So we match them here.
15733   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15734     int VSNum = atoi(Constraint.data() + 3);
15735     assert(VSNum >= 0 && VSNum <= 63 &&
15736            "Attempted to access a vsr out of range");
15737     if (VSNum < 32)
15738       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15739     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15740   }
15741   std::pair<unsigned, const TargetRegisterClass *> R =
15742       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15743 
15744   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15745   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15746   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15747   // register.
15748   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15749   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15750   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15751       PPC::GPRCRegClass.contains(R.first))
15752     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15753                             PPC::sub_32, &PPC::G8RCRegClass),
15754                           &PPC::G8RCRegClass);
15755 
15756   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15757   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15758     R.first = PPC::CR0;
15759     R.second = &PPC::CRRCRegClass;
15760   }
15761 
15762   return R;
15763 }
15764 
15765 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15766 /// vector.  If it is invalid, don't add anything to Ops.
15767 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15768                                                      std::string &Constraint,
15769                                                      std::vector<SDValue>&Ops,
15770                                                      SelectionDAG &DAG) const {
15771   SDValue Result;
15772 
15773   // Only support length 1 constraints.
15774   if (Constraint.length() > 1) return;
15775 
15776   char Letter = Constraint[0];
15777   switch (Letter) {
15778   default: break;
15779   case 'I':
15780   case 'J':
15781   case 'K':
15782   case 'L':
15783   case 'M':
15784   case 'N':
15785   case 'O':
15786   case 'P': {
15787     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15788     if (!CST) return; // Must be an immediate to match.
15789     SDLoc dl(Op);
15790     int64_t Value = CST->getSExtValue();
15791     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15792                          // numbers are printed as such.
15793     switch (Letter) {
15794     default: llvm_unreachable("Unknown constraint letter!");
15795     case 'I':  // "I" is a signed 16-bit constant.
15796       if (isInt<16>(Value))
15797         Result = DAG.getTargetConstant(Value, dl, TCVT);
15798       break;
15799     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15800       if (isShiftedUInt<16, 16>(Value))
15801         Result = DAG.getTargetConstant(Value, dl, TCVT);
15802       break;
15803     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15804       if (isShiftedInt<16, 16>(Value))
15805         Result = DAG.getTargetConstant(Value, dl, TCVT);
15806       break;
15807     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15808       if (isUInt<16>(Value))
15809         Result = DAG.getTargetConstant(Value, dl, TCVT);
15810       break;
15811     case 'M':  // "M" is a constant that is greater than 31.
15812       if (Value > 31)
15813         Result = DAG.getTargetConstant(Value, dl, TCVT);
15814       break;
15815     case 'N':  // "N" is a positive constant that is an exact power of two.
15816       if (Value > 0 && isPowerOf2_64(Value))
15817         Result = DAG.getTargetConstant(Value, dl, TCVT);
15818       break;
15819     case 'O':  // "O" is the constant zero.
15820       if (Value == 0)
15821         Result = DAG.getTargetConstant(Value, dl, TCVT);
15822       break;
15823     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15824       if (isInt<16>(-Value))
15825         Result = DAG.getTargetConstant(Value, dl, TCVT);
15826       break;
15827     }
15828     break;
15829   }
15830   }
15831 
15832   if (Result.getNode()) {
15833     Ops.push_back(Result);
15834     return;
15835   }
15836 
15837   // Handle standard constraint letters.
15838   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15839 }
15840 
15841 // isLegalAddressingMode - Return true if the addressing mode represented
15842 // by AM is legal for this target, for a load/store of the specified type.
15843 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15844                                               const AddrMode &AM, Type *Ty,
15845                                               unsigned AS,
15846                                               Instruction *I) const {
15847   // Vector type r+i form is supported since power9 as DQ form. We don't check
15848   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15849   // imm form is preferred and the offset can be adjusted to use imm form later
15850   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15851   // max offset to check legal addressing mode, we should be a little aggressive
15852   // to contain other offsets for that LSRUse.
15853   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15854     return false;
15855 
15856   // PPC allows a sign-extended 16-bit immediate field.
15857   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15858     return false;
15859 
15860   // No global is ever allowed as a base.
15861   if (AM.BaseGV)
15862     return false;
15863 
15864   // PPC only support r+r,
15865   switch (AM.Scale) {
15866   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15867     break;
15868   case 1:
15869     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15870       return false;
15871     // Otherwise we have r+r or r+i.
15872     break;
15873   case 2:
15874     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15875       return false;
15876     // Allow 2*r as r+r.
15877     break;
15878   default:
15879     // No other scales are supported.
15880     return false;
15881   }
15882 
15883   return true;
15884 }
15885 
15886 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15887                                            SelectionDAG &DAG) const {
15888   MachineFunction &MF = DAG.getMachineFunction();
15889   MachineFrameInfo &MFI = MF.getFrameInfo();
15890   MFI.setReturnAddressIsTaken(true);
15891 
15892   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15893     return SDValue();
15894 
15895   SDLoc dl(Op);
15896   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15897 
15898   // Make sure the function does not optimize away the store of the RA to
15899   // the stack.
15900   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15901   FuncInfo->setLRStoreRequired();
15902   bool isPPC64 = Subtarget.isPPC64();
15903   auto PtrVT = getPointerTy(MF.getDataLayout());
15904 
15905   if (Depth > 0) {
15906     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15907     SDValue Offset =
15908         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15909                         isPPC64 ? MVT::i64 : MVT::i32);
15910     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15911                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15912                        MachinePointerInfo());
15913   }
15914 
15915   // Just load the return address off the stack.
15916   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15917   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15918                      MachinePointerInfo());
15919 }
15920 
15921 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15922                                           SelectionDAG &DAG) const {
15923   SDLoc dl(Op);
15924   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15925 
15926   MachineFunction &MF = DAG.getMachineFunction();
15927   MachineFrameInfo &MFI = MF.getFrameInfo();
15928   MFI.setFrameAddressIsTaken(true);
15929 
15930   EVT PtrVT = getPointerTy(MF.getDataLayout());
15931   bool isPPC64 = PtrVT == MVT::i64;
15932 
15933   // Naked functions never have a frame pointer, and so we use r1. For all
15934   // other functions, this decision must be delayed until during PEI.
15935   unsigned FrameReg;
15936   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15937     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15938   else
15939     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15940 
15941   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15942                                          PtrVT);
15943   while (Depth--)
15944     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15945                             FrameAddr, MachinePointerInfo());
15946   return FrameAddr;
15947 }
15948 
15949 // FIXME? Maybe this could be a TableGen attribute on some registers and
15950 // this table could be generated automatically from RegInfo.
15951 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15952                                               const MachineFunction &MF) const {
15953   bool isPPC64 = Subtarget.isPPC64();
15954 
15955   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15956   if (!is64Bit && VT != LLT::scalar(32))
15957     report_fatal_error("Invalid register global variable type");
15958 
15959   Register Reg = StringSwitch<Register>(RegName)
15960                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15961                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15962                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15963                      .Default(Register());
15964 
15965   if (Reg)
15966     return Reg;
15967   report_fatal_error("Invalid register name global variable");
15968 }
15969 
15970 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15971   // 32-bit SVR4 ABI access everything as got-indirect.
15972   if (Subtarget.is32BitELFABI())
15973     return true;
15974 
15975   // AIX accesses everything indirectly through the TOC, which is similar to
15976   // the GOT.
15977   if (Subtarget.isAIXABI())
15978     return true;
15979 
15980   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15981   // If it is small or large code model, module locals are accessed
15982   // indirectly by loading their address from .toc/.got.
15983   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15984     return true;
15985 
15986   // JumpTable and BlockAddress are accessed as got-indirect.
15987   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15988     return true;
15989 
15990   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15991     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15992 
15993   return false;
15994 }
15995 
15996 bool
15997 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15998   // The PowerPC target isn't yet aware of offsets.
15999   return false;
16000 }
16001 
16002 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16003                                            const CallInst &I,
16004                                            MachineFunction &MF,
16005                                            unsigned Intrinsic) const {
16006   switch (Intrinsic) {
16007   case Intrinsic::ppc_altivec_lvx:
16008   case Intrinsic::ppc_altivec_lvxl:
16009   case Intrinsic::ppc_altivec_lvebx:
16010   case Intrinsic::ppc_altivec_lvehx:
16011   case Intrinsic::ppc_altivec_lvewx:
16012   case Intrinsic::ppc_vsx_lxvd2x:
16013   case Intrinsic::ppc_vsx_lxvw4x:
16014   case Intrinsic::ppc_vsx_lxvd2x_be:
16015   case Intrinsic::ppc_vsx_lxvw4x_be:
16016   case Intrinsic::ppc_vsx_lxvl:
16017   case Intrinsic::ppc_vsx_lxvll: {
16018     EVT VT;
16019     switch (Intrinsic) {
16020     case Intrinsic::ppc_altivec_lvebx:
16021       VT = MVT::i8;
16022       break;
16023     case Intrinsic::ppc_altivec_lvehx:
16024       VT = MVT::i16;
16025       break;
16026     case Intrinsic::ppc_altivec_lvewx:
16027       VT = MVT::i32;
16028       break;
16029     case Intrinsic::ppc_vsx_lxvd2x:
16030     case Intrinsic::ppc_vsx_lxvd2x_be:
16031       VT = MVT::v2f64;
16032       break;
16033     default:
16034       VT = MVT::v4i32;
16035       break;
16036     }
16037 
16038     Info.opc = ISD::INTRINSIC_W_CHAIN;
16039     Info.memVT = VT;
16040     Info.ptrVal = I.getArgOperand(0);
16041     Info.offset = -VT.getStoreSize()+1;
16042     Info.size = 2*VT.getStoreSize()-1;
16043     Info.align = Align(1);
16044     Info.flags = MachineMemOperand::MOLoad;
16045     return true;
16046   }
16047   case Intrinsic::ppc_altivec_stvx:
16048   case Intrinsic::ppc_altivec_stvxl:
16049   case Intrinsic::ppc_altivec_stvebx:
16050   case Intrinsic::ppc_altivec_stvehx:
16051   case Intrinsic::ppc_altivec_stvewx:
16052   case Intrinsic::ppc_vsx_stxvd2x:
16053   case Intrinsic::ppc_vsx_stxvw4x:
16054   case Intrinsic::ppc_vsx_stxvd2x_be:
16055   case Intrinsic::ppc_vsx_stxvw4x_be:
16056   case Intrinsic::ppc_vsx_stxvl:
16057   case Intrinsic::ppc_vsx_stxvll: {
16058     EVT VT;
16059     switch (Intrinsic) {
16060     case Intrinsic::ppc_altivec_stvebx:
16061       VT = MVT::i8;
16062       break;
16063     case Intrinsic::ppc_altivec_stvehx:
16064       VT = MVT::i16;
16065       break;
16066     case Intrinsic::ppc_altivec_stvewx:
16067       VT = MVT::i32;
16068       break;
16069     case Intrinsic::ppc_vsx_stxvd2x:
16070     case Intrinsic::ppc_vsx_stxvd2x_be:
16071       VT = MVT::v2f64;
16072       break;
16073     default:
16074       VT = MVT::v4i32;
16075       break;
16076     }
16077 
16078     Info.opc = ISD::INTRINSIC_VOID;
16079     Info.memVT = VT;
16080     Info.ptrVal = I.getArgOperand(1);
16081     Info.offset = -VT.getStoreSize()+1;
16082     Info.size = 2*VT.getStoreSize()-1;
16083     Info.align = Align(1);
16084     Info.flags = MachineMemOperand::MOStore;
16085     return true;
16086   }
16087   default:
16088     break;
16089   }
16090 
16091   return false;
16092 }
16093 
16094 /// It returns EVT::Other if the type should be determined using generic
16095 /// target-independent logic.
16096 EVT PPCTargetLowering::getOptimalMemOpType(
16097     const MemOp &Op, const AttributeList &FuncAttributes) const {
16098   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16099     // We should use Altivec/VSX loads and stores when available. For unaligned
16100     // addresses, unaligned VSX loads are only fast starting with the P8.
16101     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16102         (Op.isAligned(Align(16)) ||
16103          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16104       return MVT::v4i32;
16105   }
16106 
16107   if (Subtarget.isPPC64()) {
16108     return MVT::i64;
16109   }
16110 
16111   return MVT::i32;
16112 }
16113 
16114 /// Returns true if it is beneficial to convert a load of a constant
16115 /// to just the constant itself.
16116 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16117                                                           Type *Ty) const {
16118   assert(Ty->isIntegerTy());
16119 
16120   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16121   return !(BitSize == 0 || BitSize > 64);
16122 }
16123 
16124 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16125   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16126     return false;
16127   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16128   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16129   return NumBits1 == 64 && NumBits2 == 32;
16130 }
16131 
16132 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16133   if (!VT1.isInteger() || !VT2.isInteger())
16134     return false;
16135   unsigned NumBits1 = VT1.getSizeInBits();
16136   unsigned NumBits2 = VT2.getSizeInBits();
16137   return NumBits1 == 64 && NumBits2 == 32;
16138 }
16139 
16140 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16141   // Generally speaking, zexts are not free, but they are free when they can be
16142   // folded with other operations.
16143   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16144     EVT MemVT = LD->getMemoryVT();
16145     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16146          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16147         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16148          LD->getExtensionType() == ISD::ZEXTLOAD))
16149       return true;
16150   }
16151 
16152   // FIXME: Add other cases...
16153   //  - 32-bit shifts with a zext to i64
16154   //  - zext after ctlz, bswap, etc.
16155   //  - zext after and by a constant mask
16156 
16157   return TargetLowering::isZExtFree(Val, VT2);
16158 }
16159 
16160 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16161   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16162          "invalid fpext types");
16163   // Extending to float128 is not free.
16164   if (DestVT == MVT::f128)
16165     return false;
16166   return true;
16167 }
16168 
16169 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16170   return isInt<16>(Imm) || isUInt<16>(Imm);
16171 }
16172 
16173 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16174   return isInt<16>(Imm) || isUInt<16>(Imm);
16175 }
16176 
16177 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
16178                                                        unsigned,
16179                                                        unsigned,
16180                                                        MachineMemOperand::Flags,
16181                                                        bool *Fast) const {
16182   if (DisablePPCUnaligned)
16183     return false;
16184 
16185   // PowerPC supports unaligned memory access for simple non-vector types.
16186   // Although accessing unaligned addresses is not as efficient as accessing
16187   // aligned addresses, it is generally more efficient than manual expansion,
16188   // and generally only traps for software emulation when crossing page
16189   // boundaries.
16190 
16191   if (!VT.isSimple())
16192     return false;
16193 
16194   if (VT.isFloatingPoint() && !VT.isVector() &&
16195       !Subtarget.allowsUnalignedFPAccess())
16196     return false;
16197 
16198   if (VT.getSimpleVT().isVector()) {
16199     if (Subtarget.hasVSX()) {
16200       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16201           VT != MVT::v4f32 && VT != MVT::v4i32)
16202         return false;
16203     } else {
16204       return false;
16205     }
16206   }
16207 
16208   if (VT == MVT::ppcf128)
16209     return false;
16210 
16211   if (Fast)
16212     *Fast = true;
16213 
16214   return true;
16215 }
16216 
16217 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
16218                                                SDValue C) const {
16219   // Check integral scalar types.
16220   if (!VT.isScalarInteger())
16221     return false;
16222   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
16223     if (!ConstNode->getAPIntValue().isSignedIntN(64))
16224       return false;
16225     // This transformation will generate >= 2 operations. But the following
16226     // cases will generate <= 2 instructions during ISEL. So exclude them.
16227     // 1. If the constant multiplier fits 16 bits, it can be handled by one
16228     // HW instruction, ie. MULLI
16229     // 2. If the multiplier after shifted fits 16 bits, an extra shift
16230     // instruction is needed than case 1, ie. MULLI and RLDICR
16231     int64_t Imm = ConstNode->getSExtValue();
16232     unsigned Shift = countTrailingZeros<uint64_t>(Imm);
16233     Imm >>= Shift;
16234     if (isInt<16>(Imm))
16235       return false;
16236     uint64_t UImm = static_cast<uint64_t>(Imm);
16237     if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
16238         isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
16239       return true;
16240   }
16241   return false;
16242 }
16243 
16244 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16245                                                    EVT VT) const {
16246   return isFMAFasterThanFMulAndFAdd(
16247       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16248 }
16249 
16250 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16251                                                    Type *Ty) const {
16252   switch (Ty->getScalarType()->getTypeID()) {
16253   case Type::FloatTyID:
16254   case Type::DoubleTyID:
16255     return true;
16256   case Type::FP128TyID:
16257     return Subtarget.hasP9Vector();
16258   default:
16259     return false;
16260   }
16261 }
16262 
16263 // FIXME: add more patterns which are not profitable to hoist.
16264 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16265   if (!I->hasOneUse())
16266     return true;
16267 
16268   Instruction *User = I->user_back();
16269   assert(User && "A single use instruction with no uses.");
16270 
16271   switch (I->getOpcode()) {
16272   case Instruction::FMul: {
16273     // Don't break FMA, PowerPC prefers FMA.
16274     if (User->getOpcode() != Instruction::FSub &&
16275         User->getOpcode() != Instruction::FAdd)
16276       return true;
16277 
16278     const TargetOptions &Options = getTargetMachine().Options;
16279     const Function *F = I->getFunction();
16280     const DataLayout &DL = F->getParent()->getDataLayout();
16281     Type *Ty = User->getOperand(0)->getType();
16282 
16283     return !(
16284         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16285         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16286         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16287   }
16288   case Instruction::Load: {
16289     // Don't break "store (load float*)" pattern, this pattern will be combined
16290     // to "store (load int32)" in later InstCombine pass. See function
16291     // combineLoadToOperationType. On PowerPC, loading a float point takes more
16292     // cycles than loading a 32 bit integer.
16293     LoadInst *LI = cast<LoadInst>(I);
16294     // For the loads that combineLoadToOperationType does nothing, like
16295     // ordered load, it should be profitable to hoist them.
16296     // For swifterror load, it can only be used for pointer to pointer type, so
16297     // later type check should get rid of this case.
16298     if (!LI->isUnordered())
16299       return true;
16300 
16301     if (User->getOpcode() != Instruction::Store)
16302       return true;
16303 
16304     if (I->getType()->getTypeID() != Type::FloatTyID)
16305       return true;
16306 
16307     return false;
16308   }
16309   default:
16310     return true;
16311   }
16312   return true;
16313 }
16314 
16315 const MCPhysReg *
16316 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16317   // LR is a callee-save register, but we must treat it as clobbered by any call
16318   // site. Hence we include LR in the scratch registers, which are in turn added
16319   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16320   // to CTR, which is used by any indirect call.
16321   static const MCPhysReg ScratchRegs[] = {
16322     PPC::X12, PPC::LR8, PPC::CTR8, 0
16323   };
16324 
16325   return ScratchRegs;
16326 }
16327 
16328 Register PPCTargetLowering::getExceptionPointerRegister(
16329     const Constant *PersonalityFn) const {
16330   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16331 }
16332 
16333 Register PPCTargetLowering::getExceptionSelectorRegister(
16334     const Constant *PersonalityFn) const {
16335   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16336 }
16337 
16338 bool
16339 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16340                      EVT VT , unsigned DefinedValues) const {
16341   if (VT == MVT::v2i64)
16342     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16343 
16344   if (Subtarget.hasVSX())
16345     return true;
16346 
16347   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16348 }
16349 
16350 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16351   if (DisableILPPref || Subtarget.enableMachineScheduler())
16352     return TargetLowering::getSchedulingPreference(N);
16353 
16354   return Sched::ILP;
16355 }
16356 
16357 // Create a fast isel object.
16358 FastISel *
16359 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16360                                   const TargetLibraryInfo *LibInfo) const {
16361   return PPC::createFastISel(FuncInfo, LibInfo);
16362 }
16363 
16364 // 'Inverted' means the FMA opcode after negating one multiplicand.
16365 // For example, (fma -a b c) = (fnmsub a b c)
16366 static unsigned invertFMAOpcode(unsigned Opc) {
16367   switch (Opc) {
16368   default:
16369     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16370   case ISD::FMA:
16371     return PPCISD::FNMSUB;
16372   case PPCISD::FNMSUB:
16373     return ISD::FMA;
16374   }
16375 }
16376 
16377 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16378                                                 bool LegalOps, bool OptForSize,
16379                                                 NegatibleCost &Cost,
16380                                                 unsigned Depth) const {
16381   if (Depth > SelectionDAG::MaxRecursionDepth)
16382     return SDValue();
16383 
16384   unsigned Opc = Op.getOpcode();
16385   EVT VT = Op.getValueType();
16386   SDNodeFlags Flags = Op.getNode()->getFlags();
16387 
16388   switch (Opc) {
16389   case PPCISD::FNMSUB:
16390     if (!Op.hasOneUse() || !isTypeLegal(VT))
16391       break;
16392 
16393     const TargetOptions &Options = getTargetMachine().Options;
16394     SDValue N0 = Op.getOperand(0);
16395     SDValue N1 = Op.getOperand(1);
16396     SDValue N2 = Op.getOperand(2);
16397     SDLoc Loc(Op);
16398 
16399     NegatibleCost N2Cost = NegatibleCost::Expensive;
16400     SDValue NegN2 =
16401         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16402 
16403     if (!NegN2)
16404       return SDValue();
16405 
16406     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16407     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16408     // These transformations may change sign of zeroes. For example,
16409     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16410     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16411       // Try and choose the cheaper one to negate.
16412       NegatibleCost N0Cost = NegatibleCost::Expensive;
16413       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16414                                            N0Cost, Depth + 1);
16415 
16416       NegatibleCost N1Cost = NegatibleCost::Expensive;
16417       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16418                                            N1Cost, Depth + 1);
16419 
16420       if (NegN0 && N0Cost <= N1Cost) {
16421         Cost = std::min(N0Cost, N2Cost);
16422         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16423       } else if (NegN1) {
16424         Cost = std::min(N1Cost, N2Cost);
16425         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16426       }
16427     }
16428 
16429     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16430     if (isOperationLegal(ISD::FMA, VT)) {
16431       Cost = N2Cost;
16432       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16433     }
16434 
16435     break;
16436   }
16437 
16438   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16439                                               Cost, Depth);
16440 }
16441 
16442 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16443 bool PPCTargetLowering::useLoadStackGuardNode() const {
16444   if (!Subtarget.isTargetLinux())
16445     return TargetLowering::useLoadStackGuardNode();
16446   return true;
16447 }
16448 
16449 // Override to disable global variable loading on Linux.
16450 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16451   if (!Subtarget.isTargetLinux())
16452     return TargetLowering::insertSSPDeclarations(M);
16453 }
16454 
16455 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16456                                      bool ForCodeSize) const {
16457   if (!VT.isSimple() || !Subtarget.hasVSX())
16458     return false;
16459 
16460   switch(VT.getSimpleVT().SimpleTy) {
16461   default:
16462     // For FP types that are currently not supported by PPC backend, return
16463     // false. Examples: f16, f80.
16464     return false;
16465   case MVT::f32:
16466   case MVT::f64:
16467     if (Subtarget.hasPrefixInstrs()) {
16468       // With prefixed instructions, we can materialize anything that can be
16469       // represented with a 32-bit immediate, not just positive zero.
16470       APFloat APFloatOfImm = Imm;
16471       return convertToNonDenormSingle(APFloatOfImm);
16472     }
16473     LLVM_FALLTHROUGH;
16474   case MVT::ppcf128:
16475     return Imm.isPosZero();
16476   }
16477 }
16478 
16479 // For vector shift operation op, fold
16480 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16481 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16482                                   SelectionDAG &DAG) {
16483   SDValue N0 = N->getOperand(0);
16484   SDValue N1 = N->getOperand(1);
16485   EVT VT = N0.getValueType();
16486   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16487   unsigned Opcode = N->getOpcode();
16488   unsigned TargetOpcode;
16489 
16490   switch (Opcode) {
16491   default:
16492     llvm_unreachable("Unexpected shift operation");
16493   case ISD::SHL:
16494     TargetOpcode = PPCISD::SHL;
16495     break;
16496   case ISD::SRL:
16497     TargetOpcode = PPCISD::SRL;
16498     break;
16499   case ISD::SRA:
16500     TargetOpcode = PPCISD::SRA;
16501     break;
16502   }
16503 
16504   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16505       N1->getOpcode() == ISD::AND)
16506     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16507       if (Mask->getZExtValue() == OpSizeInBits - 1)
16508         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16509 
16510   return SDValue();
16511 }
16512 
16513 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16514   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16515     return Value;
16516 
16517   SDValue N0 = N->getOperand(0);
16518   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16519   if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
16520       N0.getOpcode() != ISD::SIGN_EXTEND ||
16521       N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
16522       N->getValueType(0) != MVT::i64)
16523     return SDValue();
16524 
16525   // We can't save an operation here if the value is already extended, and
16526   // the existing shift is easier to combine.
16527   SDValue ExtsSrc = N0.getOperand(0);
16528   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16529       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16530     return SDValue();
16531 
16532   SDLoc DL(N0);
16533   SDValue ShiftBy = SDValue(CN1, 0);
16534   // We want the shift amount to be i32 on the extswli, but the shift could
16535   // have an i64.
16536   if (ShiftBy.getValueType() == MVT::i64)
16537     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16538 
16539   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16540                          ShiftBy);
16541 }
16542 
16543 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16544   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16545     return Value;
16546 
16547   return SDValue();
16548 }
16549 
16550 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16551   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16552     return Value;
16553 
16554   return SDValue();
16555 }
16556 
16557 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16558 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16559 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16560 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16561 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16562                                  const PPCSubtarget &Subtarget) {
16563   if (!Subtarget.isPPC64())
16564     return SDValue();
16565 
16566   SDValue LHS = N->getOperand(0);
16567   SDValue RHS = N->getOperand(1);
16568 
16569   auto isZextOfCompareWithConstant = [](SDValue Op) {
16570     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16571         Op.getValueType() != MVT::i64)
16572       return false;
16573 
16574     SDValue Cmp = Op.getOperand(0);
16575     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16576         Cmp.getOperand(0).getValueType() != MVT::i64)
16577       return false;
16578 
16579     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16580       int64_t NegConstant = 0 - Constant->getSExtValue();
16581       // Due to the limitations of the addi instruction,
16582       // -C is required to be [-32768, 32767].
16583       return isInt<16>(NegConstant);
16584     }
16585 
16586     return false;
16587   };
16588 
16589   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16590   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16591 
16592   // If there is a pattern, canonicalize a zext operand to the RHS.
16593   if (LHSHasPattern && !RHSHasPattern)
16594     std::swap(LHS, RHS);
16595   else if (!LHSHasPattern && !RHSHasPattern)
16596     return SDValue();
16597 
16598   SDLoc DL(N);
16599   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16600   SDValue Cmp = RHS.getOperand(0);
16601   SDValue Z = Cmp.getOperand(0);
16602   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16603 
16604   assert(Constant && "Constant Should not be a null pointer.");
16605   int64_t NegConstant = 0 - Constant->getSExtValue();
16606 
16607   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16608   default: break;
16609   case ISD::SETNE: {
16610     //                                 when C == 0
16611     //                             --> addze X, (addic Z, -1).carry
16612     //                            /
16613     // add X, (zext(setne Z, C))--
16614     //                            \    when -32768 <= -C <= 32767 && C != 0
16615     //                             --> addze X, (addic (addi Z, -C), -1).carry
16616     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16617                               DAG.getConstant(NegConstant, DL, MVT::i64));
16618     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16619     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16620                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16621     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16622                        SDValue(Addc.getNode(), 1));
16623     }
16624   case ISD::SETEQ: {
16625     //                                 when C == 0
16626     //                             --> addze X, (subfic Z, 0).carry
16627     //                            /
16628     // add X, (zext(sete  Z, C))--
16629     //                            \    when -32768 <= -C <= 32767 && C != 0
16630     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16631     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16632                               DAG.getConstant(NegConstant, DL, MVT::i64));
16633     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16634     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16635                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16636     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16637                        SDValue(Subc.getNode(), 1));
16638     }
16639   }
16640 
16641   return SDValue();
16642 }
16643 
16644 // Transform
16645 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16646 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16647 // In this case both C1 and C2 must be known constants.
16648 // C1+C2 must fit into a 34 bit signed integer.
16649 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16650                                           const PPCSubtarget &Subtarget) {
16651   if (!Subtarget.isUsingPCRelativeCalls())
16652     return SDValue();
16653 
16654   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16655   // If we find that node try to cast the Global Address and the Constant.
16656   SDValue LHS = N->getOperand(0);
16657   SDValue RHS = N->getOperand(1);
16658 
16659   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16660     std::swap(LHS, RHS);
16661 
16662   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16663     return SDValue();
16664 
16665   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16666   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16667   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16668 
16669   // Check that both casts succeeded.
16670   if (!GSDN || !ConstNode)
16671     return SDValue();
16672 
16673   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16674   SDLoc DL(GSDN);
16675 
16676   // The signed int offset needs to fit in 34 bits.
16677   if (!isInt<34>(NewOffset))
16678     return SDValue();
16679 
16680   // The new global address is a copy of the old global address except
16681   // that it has the updated Offset.
16682   SDValue GA =
16683       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16684                                  NewOffset, GSDN->getTargetFlags());
16685   SDValue MatPCRel =
16686       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16687   return MatPCRel;
16688 }
16689 
16690 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16691   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16692     return Value;
16693 
16694   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16695     return Value;
16696 
16697   return SDValue();
16698 }
16699 
16700 // Detect TRUNCATE operations on bitcasts of float128 values.
16701 // What we are looking for here is the situtation where we extract a subset
16702 // of bits from a 128 bit float.
16703 // This can be of two forms:
16704 // 1) BITCAST of f128 feeding TRUNCATE
16705 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16706 // The reason this is required is because we do not have a legal i128 type
16707 // and so we want to prevent having to store the f128 and then reload part
16708 // of it.
16709 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16710                                            DAGCombinerInfo &DCI) const {
16711   // If we are using CRBits then try that first.
16712   if (Subtarget.useCRBits()) {
16713     // Check if CRBits did anything and return that if it did.
16714     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16715       return CRTruncValue;
16716   }
16717 
16718   SDLoc dl(N);
16719   SDValue Op0 = N->getOperand(0);
16720 
16721   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16722   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16723     EVT VT = N->getValueType(0);
16724     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16725       return SDValue();
16726     SDValue Sub = Op0.getOperand(0);
16727     if (Sub.getOpcode() == ISD::SUB) {
16728       SDValue SubOp0 = Sub.getOperand(0);
16729       SDValue SubOp1 = Sub.getOperand(1);
16730       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16731           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16732         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16733                                SubOp1.getOperand(0),
16734                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16735       }
16736     }
16737   }
16738 
16739   // Looking for a truncate of i128 to i64.
16740   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16741     return SDValue();
16742 
16743   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16744 
16745   // SRL feeding TRUNCATE.
16746   if (Op0.getOpcode() == ISD::SRL) {
16747     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16748     // The right shift has to be by 64 bits.
16749     if (!ConstNode || ConstNode->getZExtValue() != 64)
16750       return SDValue();
16751 
16752     // Switch the element number to extract.
16753     EltToExtract = EltToExtract ? 0 : 1;
16754     // Update Op0 past the SRL.
16755     Op0 = Op0.getOperand(0);
16756   }
16757 
16758   // BITCAST feeding a TRUNCATE possibly via SRL.
16759   if (Op0.getOpcode() == ISD::BITCAST &&
16760       Op0.getValueType() == MVT::i128 &&
16761       Op0.getOperand(0).getValueType() == MVT::f128) {
16762     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16763     return DCI.DAG.getNode(
16764         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16765         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16766   }
16767   return SDValue();
16768 }
16769 
16770 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16771   SelectionDAG &DAG = DCI.DAG;
16772 
16773   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16774   if (!ConstOpOrElement)
16775     return SDValue();
16776 
16777   // An imul is usually smaller than the alternative sequence for legal type.
16778   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16779       isOperationLegal(ISD::MUL, N->getValueType(0)))
16780     return SDValue();
16781 
16782   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16783     switch (this->Subtarget.getCPUDirective()) {
16784     default:
16785       // TODO: enhance the condition for subtarget before pwr8
16786       return false;
16787     case PPC::DIR_PWR8:
16788       //  type        mul     add    shl
16789       // scalar        4       1      1
16790       // vector        7       2      2
16791       return true;
16792     case PPC::DIR_PWR9:
16793     case PPC::DIR_PWR10:
16794     case PPC::DIR_PWR_FUTURE:
16795       //  type        mul     add    shl
16796       // scalar        5       2      2
16797       // vector        7       2      2
16798 
16799       // The cycle RATIO of related operations are showed as a table above.
16800       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16801       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16802       // are 4, it is always profitable; but for 3 instrs patterns
16803       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16804       // So we should only do it for vector type.
16805       return IsAddOne && IsNeg ? VT.isVector() : true;
16806     }
16807   };
16808 
16809   EVT VT = N->getValueType(0);
16810   SDLoc DL(N);
16811 
16812   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16813   bool IsNeg = MulAmt.isNegative();
16814   APInt MulAmtAbs = MulAmt.abs();
16815 
16816   if ((MulAmtAbs - 1).isPowerOf2()) {
16817     // (mul x, 2^N + 1) => (add (shl x, N), x)
16818     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16819 
16820     if (!IsProfitable(IsNeg, true, VT))
16821       return SDValue();
16822 
16823     SDValue Op0 = N->getOperand(0);
16824     SDValue Op1 =
16825         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16826                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16827     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16828 
16829     if (!IsNeg)
16830       return Res;
16831 
16832     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16833   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16834     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16835     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16836 
16837     if (!IsProfitable(IsNeg, false, VT))
16838       return SDValue();
16839 
16840     SDValue Op0 = N->getOperand(0);
16841     SDValue Op1 =
16842         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16843                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16844 
16845     if (!IsNeg)
16846       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16847     else
16848       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16849 
16850   } else {
16851     return SDValue();
16852   }
16853 }
16854 
16855 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16856 // in combiner since we need to check SD flags and other subtarget features.
16857 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16858                                           DAGCombinerInfo &DCI) const {
16859   SDValue N0 = N->getOperand(0);
16860   SDValue N1 = N->getOperand(1);
16861   SDValue N2 = N->getOperand(2);
16862   SDNodeFlags Flags = N->getFlags();
16863   EVT VT = N->getValueType(0);
16864   SelectionDAG &DAG = DCI.DAG;
16865   const TargetOptions &Options = getTargetMachine().Options;
16866   unsigned Opc = N->getOpcode();
16867   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16868   bool LegalOps = !DCI.isBeforeLegalizeOps();
16869   SDLoc Loc(N);
16870 
16871   if (!isOperationLegal(ISD::FMA, VT))
16872     return SDValue();
16873 
16874   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16875   // since (fnmsub a b c)=-0 while c-ab=+0.
16876   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16877     return SDValue();
16878 
16879   // (fma (fneg a) b c) => (fnmsub a b c)
16880   // (fnmsub (fneg a) b c) => (fma a b c)
16881   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16882     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16883 
16884   // (fma a (fneg b) c) => (fnmsub a b c)
16885   // (fnmsub a (fneg b) c) => (fma a b c)
16886   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16887     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16888 
16889   return SDValue();
16890 }
16891 
16892 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16893   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16894   if (!Subtarget.is64BitELFABI())
16895     return false;
16896 
16897   // If not a tail call then no need to proceed.
16898   if (!CI->isTailCall())
16899     return false;
16900 
16901   // If sibling calls have been disabled and tail-calls aren't guaranteed
16902   // there is no reason to duplicate.
16903   auto &TM = getTargetMachine();
16904   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16905     return false;
16906 
16907   // Can't tail call a function called indirectly, or if it has variadic args.
16908   const Function *Callee = CI->getCalledFunction();
16909   if (!Callee || Callee->isVarArg())
16910     return false;
16911 
16912   // Make sure the callee and caller calling conventions are eligible for tco.
16913   const Function *Caller = CI->getParent()->getParent();
16914   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16915                                            CI->getCallingConv()))
16916       return false;
16917 
16918   // If the function is local then we have a good chance at tail-calling it
16919   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16920 }
16921 
16922 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16923   if (!Subtarget.hasVSX())
16924     return false;
16925   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16926     return true;
16927   return VT == MVT::f32 || VT == MVT::f64 ||
16928     VT == MVT::v4f32 || VT == MVT::v2f64;
16929 }
16930 
16931 bool PPCTargetLowering::
16932 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16933   const Value *Mask = AndI.getOperand(1);
16934   // If the mask is suitable for andi. or andis. we should sink the and.
16935   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16936     // Can't handle constants wider than 64-bits.
16937     if (CI->getBitWidth() > 64)
16938       return false;
16939     int64_t ConstVal = CI->getZExtValue();
16940     return isUInt<16>(ConstVal) ||
16941       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16942   }
16943 
16944   // For non-constant masks, we can always use the record-form and.
16945   return true;
16946 }
16947 
16948 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16949 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16950 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16951 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16952 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16953 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16954   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16955   assert(Subtarget.hasP9Altivec() &&
16956          "Only combine this when P9 altivec supported!");
16957   EVT VT = N->getValueType(0);
16958   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16959     return SDValue();
16960 
16961   SelectionDAG &DAG = DCI.DAG;
16962   SDLoc dl(N);
16963   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16964     // Even for signed integers, if it's known to be positive (as signed
16965     // integer) due to zero-extended inputs.
16966     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16967     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16968     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16969          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16970         (SubOpcd1 == ISD::ZERO_EXTEND ||
16971          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16972       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16973                          N->getOperand(0)->getOperand(0),
16974                          N->getOperand(0)->getOperand(1),
16975                          DAG.getTargetConstant(0, dl, MVT::i32));
16976     }
16977 
16978     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16979     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16980         N->getOperand(0).hasOneUse()) {
16981       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16982                          N->getOperand(0)->getOperand(0),
16983                          N->getOperand(0)->getOperand(1),
16984                          DAG.getTargetConstant(1, dl, MVT::i32));
16985     }
16986   }
16987 
16988   return SDValue();
16989 }
16990 
16991 // For type v4i32/v8ii16/v16i8, transform
16992 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16993 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16994 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16995 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16996 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16997                                           DAGCombinerInfo &DCI) const {
16998   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16999   assert(Subtarget.hasP9Altivec() &&
17000          "Only combine this when P9 altivec supported!");
17001 
17002   SelectionDAG &DAG = DCI.DAG;
17003   SDLoc dl(N);
17004   SDValue Cond = N->getOperand(0);
17005   SDValue TrueOpnd = N->getOperand(1);
17006   SDValue FalseOpnd = N->getOperand(2);
17007   EVT VT = N->getOperand(1).getValueType();
17008 
17009   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17010       FalseOpnd.getOpcode() != ISD::SUB)
17011     return SDValue();
17012 
17013   // ABSD only available for type v4i32/v8i16/v16i8
17014   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17015     return SDValue();
17016 
17017   // At least to save one more dependent computation
17018   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17019     return SDValue();
17020 
17021   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17022 
17023   // Can only handle unsigned comparison here
17024   switch (CC) {
17025   default:
17026     return SDValue();
17027   case ISD::SETUGT:
17028   case ISD::SETUGE:
17029     break;
17030   case ISD::SETULT:
17031   case ISD::SETULE:
17032     std::swap(TrueOpnd, FalseOpnd);
17033     break;
17034   }
17035 
17036   SDValue CmpOpnd1 = Cond.getOperand(0);
17037   SDValue CmpOpnd2 = Cond.getOperand(1);
17038 
17039   // SETCC CmpOpnd1 CmpOpnd2 cond
17040   // TrueOpnd = CmpOpnd1 - CmpOpnd2
17041   // FalseOpnd = CmpOpnd2 - CmpOpnd1
17042   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17043       TrueOpnd.getOperand(1) == CmpOpnd2 &&
17044       FalseOpnd.getOperand(0) == CmpOpnd2 &&
17045       FalseOpnd.getOperand(1) == CmpOpnd1) {
17046     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17047                        CmpOpnd1, CmpOpnd2,
17048                        DAG.getTargetConstant(0, dl, MVT::i32));
17049   }
17050 
17051   return SDValue();
17052 }
17053