1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSymbolXCOFF.h"
78 #include "llvm/Support/AtomicOrdering.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
82 #include "llvm/Support/CommandLine.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/MachineValueType.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetMachine.h"
92 #include "llvm/Target/TargetOptions.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <iterator>
97 #include <list>
98 #include <utility>
99 #include <vector>
100 
101 using namespace llvm;
102 
103 #define DEBUG_TYPE "ppc-lowering"
104 
105 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
106 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
109 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
112 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
113 
114 static cl::opt<bool> DisableSCO("disable-ppc-sco",
115 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
116 
117 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
118 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
119 
120 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
121 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
122 
123 STATISTIC(NumTailCalls, "Number of tail calls");
124 STATISTIC(NumSiblingCalls, "Number of sibling calls");
125 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
126 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
127 
128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
129 
130 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
131 
132 // FIXME: Remove this once the bug has been fixed!
133 extern cl::opt<bool> ANDIGlueBug;
134 
135 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
136                                      const PPCSubtarget &STI)
137     : TargetLowering(TM), Subtarget(STI) {
138   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
139   // arguments are at least 4/8 bytes aligned.
140   bool isPPC64 = Subtarget.isPPC64();
141   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
142 
143   // Set up the register classes.
144   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
145   if (!useSoftFloat()) {
146     if (hasSPE()) {
147       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
148       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
149     } else {
150       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
151       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
152     }
153   }
154 
155   // Match BITREVERSE to customized fast code sequence in the td file.
156   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
157   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
158 
159   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
160   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
161 
162   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
163   for (MVT VT : MVT::integer_valuetypes()) {
164     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
166   }
167 
168   if (Subtarget.isISA3_0()) {
169     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
170     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
171     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
172     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
173   } else {
174     // No extending loads from f16 or HW conversions back and forth.
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
176     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
177     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
178     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
179     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
180     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
181     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
182     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
183   }
184 
185   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
186 
187   // PowerPC has pre-inc load and store's.
188   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
189   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
190   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
191   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
192   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
193   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
194   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
195   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
196   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
197   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
198   if (!Subtarget.hasSPE()) {
199     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
200     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
201     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
202     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
203   }
204 
205   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
206   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
207   for (MVT VT : ScalarIntVTs) {
208     setOperationAction(ISD::ADDC, VT, Legal);
209     setOperationAction(ISD::ADDE, VT, Legal);
210     setOperationAction(ISD::SUBC, VT, Legal);
211     setOperationAction(ISD::SUBE, VT, Legal);
212   }
213 
214   if (Subtarget.useCRBits()) {
215     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
216 
217     if (isPPC64 || Subtarget.hasFPCVT()) {
218       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
219       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
220                          isPPC64 ? MVT::i64 : MVT::i32);
221       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
222       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
223                         isPPC64 ? MVT::i64 : MVT::i32);
224     } else {
225       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
226       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
227     }
228 
229     // PowerPC does not support direct load/store of condition registers.
230     setOperationAction(ISD::LOAD, MVT::i1, Custom);
231     setOperationAction(ISD::STORE, MVT::i1, Custom);
232 
233     // FIXME: Remove this once the ANDI glue bug is fixed:
234     if (ANDIGlueBug)
235       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
236 
237     for (MVT VT : MVT::integer_valuetypes()) {
238       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
239       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
240       setTruncStoreAction(VT, MVT::i1, Expand);
241     }
242 
243     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
244   }
245 
246   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
247   // PPC (the libcall is not available).
248   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
249   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
250 
251   // We do not currently implement these libm ops for PowerPC.
252   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
253   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
254   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
255   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
256   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
257   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
258 
259   // PowerPC has no SREM/UREM instructions unless we are on P9
260   // On P9 we may use a hardware instruction to compute the remainder.
261   // When the result of both the remainder and the division is required it is
262   // more efficient to compute the remainder from the result of the division
263   // rather than use the remainder instruction. The instructions are legalized
264   // directly because the DivRemPairsPass performs the transformation at the IR
265   // level.
266   if (Subtarget.isISA3_0()) {
267     setOperationAction(ISD::SREM, MVT::i32, Legal);
268     setOperationAction(ISD::UREM, MVT::i32, Legal);
269     setOperationAction(ISD::SREM, MVT::i64, Legal);
270     setOperationAction(ISD::UREM, MVT::i64, Legal);
271   } else {
272     setOperationAction(ISD::SREM, MVT::i32, Expand);
273     setOperationAction(ISD::UREM, MVT::i32, Expand);
274     setOperationAction(ISD::SREM, MVT::i64, Expand);
275     setOperationAction(ISD::UREM, MVT::i64, Expand);
276   }
277 
278   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
279   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
280   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
281   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
282   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
283   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
284   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
285   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
286   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
287 
288   // Handle constrained floating-point operations of scalar.
289   // TODO: Handle SPE specific operation.
290   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
291   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
292   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
293   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
294   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
295   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
296 
297   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
298   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
299   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
300   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
301   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
302   if (Subtarget.hasVSX())
303     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
304 
305   if (Subtarget.hasFSQRT()) {
306     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
307     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
308   }
309 
310   if (Subtarget.hasFPRND()) {
311     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
312     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
313     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
314     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
315 
316     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
317     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
318     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
319     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
320   }
321 
322   // We don't support sin/cos/sqrt/fmod/pow
323   setOperationAction(ISD::FSIN , MVT::f64, Expand);
324   setOperationAction(ISD::FCOS , MVT::f64, Expand);
325   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
326   setOperationAction(ISD::FREM , MVT::f64, Expand);
327   setOperationAction(ISD::FPOW , MVT::f64, Expand);
328   setOperationAction(ISD::FSIN , MVT::f32, Expand);
329   setOperationAction(ISD::FCOS , MVT::f32, Expand);
330   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
331   setOperationAction(ISD::FREM , MVT::f32, Expand);
332   setOperationAction(ISD::FPOW , MVT::f32, Expand);
333   if (Subtarget.hasSPE()) {
334     setOperationAction(ISD::FMA  , MVT::f64, Expand);
335     setOperationAction(ISD::FMA  , MVT::f32, Expand);
336   } else {
337     setOperationAction(ISD::FMA  , MVT::f64, Legal);
338     setOperationAction(ISD::FMA  , MVT::f32, Legal);
339   }
340 
341   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
342 
343   // If we're enabling GP optimizations, use hardware square root
344   if (!Subtarget.hasFSQRT() &&
345       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
346         Subtarget.hasFRE()))
347     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
348 
349   if (!Subtarget.hasFSQRT() &&
350       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
351         Subtarget.hasFRES()))
352     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
353 
354   if (Subtarget.hasFCPSGN()) {
355     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
356     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
357   } else {
358     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
359     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
360   }
361 
362   if (Subtarget.hasFPRND()) {
363     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
364     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
365     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
366     setOperationAction(ISD::FROUND, MVT::f64, Legal);
367 
368     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
369     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
370     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
371     setOperationAction(ISD::FROUND, MVT::f32, Legal);
372   }
373 
374   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
375   // to speed up scalar BSWAP64.
376   // CTPOP or CTTZ were introduced in P8/P9 respectively
377   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
378   if (Subtarget.hasP9Vector())
379     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
380   else
381     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
382   if (Subtarget.isISA3_0()) {
383     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
384     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
385   } else {
386     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
387     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
388   }
389 
390   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
391     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
392     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
393   } else {
394     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
395     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
396   }
397 
398   // PowerPC does not have ROTR
399   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
400   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
401 
402   if (!Subtarget.useCRBits()) {
403     // PowerPC does not have Select
404     setOperationAction(ISD::SELECT, MVT::i32, Expand);
405     setOperationAction(ISD::SELECT, MVT::i64, Expand);
406     setOperationAction(ISD::SELECT, MVT::f32, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Expand);
408   }
409 
410   // PowerPC wants to turn select_cc of FP into fsel when possible.
411   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
412   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
413 
414   // PowerPC wants to optimize integer setcc a bit
415   if (!Subtarget.useCRBits())
416     setOperationAction(ISD::SETCC, MVT::i32, Custom);
417 
418   // PowerPC does not have BRCOND which requires SetCC
419   if (!Subtarget.useCRBits())
420     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
421 
422   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
423 
424   if (Subtarget.hasSPE()) {
425     // SPE has built-in conversions
426     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
427     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
428     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
429     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
430     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
431     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
432   } else {
433     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
434     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
435 
436     // PowerPC does not have [U|S]INT_TO_FP
437     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
438     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
439   }
440 
441   if (Subtarget.hasDirectMove() && isPPC64) {
442     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
443     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
444     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
445     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
446     if (TM.Options.UnsafeFPMath) {
447       setOperationAction(ISD::LRINT, MVT::f64, Legal);
448       setOperationAction(ISD::LRINT, MVT::f32, Legal);
449       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
450       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
451       setOperationAction(ISD::LROUND, MVT::f64, Legal);
452       setOperationAction(ISD::LROUND, MVT::f32, Legal);
453       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
454       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
455     }
456   } else {
457     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
458     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
459     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
460     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
461   }
462 
463   // We cannot sextinreg(i1).  Expand to shifts.
464   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
465 
466   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
467   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
468   // support continuation, user-level threading, and etc.. As a result, no
469   // other SjLj exception interfaces are implemented and please don't build
470   // your own exception handling based on them.
471   // LLVM/Clang supports zero-cost DWARF exception handling.
472   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
473   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
474 
475   // We want to legalize GlobalAddress and ConstantPool nodes into the
476   // appropriate instructions to materialize the address.
477   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
478   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
479   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
480   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
481   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
482   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
483   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
484   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
485   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
486   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
487 
488   // TRAP is legal.
489   setOperationAction(ISD::TRAP, MVT::Other, Legal);
490 
491   // TRAMPOLINE is custom lowered.
492   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
493   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
494 
495   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
496   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
497 
498   if (Subtarget.is64BitELFABI()) {
499     // VAARG always uses double-word chunks, so promote anything smaller.
500     setOperationAction(ISD::VAARG, MVT::i1, Promote);
501     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
502     setOperationAction(ISD::VAARG, MVT::i8, Promote);
503     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
504     setOperationAction(ISD::VAARG, MVT::i16, Promote);
505     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
506     setOperationAction(ISD::VAARG, MVT::i32, Promote);
507     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
508     setOperationAction(ISD::VAARG, MVT::Other, Expand);
509   } else if (Subtarget.is32BitELFABI()) {
510     // VAARG is custom lowered with the 32-bit SVR4 ABI.
511     setOperationAction(ISD::VAARG, MVT::Other, Custom);
512     setOperationAction(ISD::VAARG, MVT::i64, Custom);
513   } else
514     setOperationAction(ISD::VAARG, MVT::Other, Expand);
515 
516   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
517   if (Subtarget.is32BitELFABI())
518     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
519   else
520     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
521 
522   // Use the default implementation.
523   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
524   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
525   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
526   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
527   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
528   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
529   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
530   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
531   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
532 
533   // We want to custom lower some of our intrinsics.
534   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
535 
536   // To handle counter-based loop conditions.
537   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
538 
539   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
540   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
541   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
542   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
543 
544   // Comparisons that require checking two conditions.
545   if (Subtarget.hasSPE()) {
546     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
547     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
548     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
549     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
550   }
551   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
552   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
553   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
554   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
555   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
556   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
557   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
558   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
559   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
560   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
561   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
562   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
563 
564   if (Subtarget.has64BitSupport()) {
565     // They also have instructions for converting between i64 and fp.
566     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
567     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
568     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
569     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
570     // This is just the low 32 bits of a (signed) fp->i64 conversion.
571     // We cannot do this with Promote because i64 is not a legal type.
572     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
573 
574     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
575       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
576   } else {
577     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
578     if (Subtarget.hasSPE()) {
579       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
580       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
581     } else
582       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
583   }
584 
585   // With the instructions enabled under FPCVT, we can do everything.
586   if (Subtarget.hasFPCVT()) {
587     if (Subtarget.has64BitSupport()) {
588       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
589       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
590       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
591       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
592     }
593 
594     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
595     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
596     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
597     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
598   }
599 
600   if (Subtarget.use64BitRegs()) {
601     // 64-bit PowerPC implementations can support i64 types directly
602     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
603     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
604     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
605     // 64-bit PowerPC wants to expand i128 shifts itself.
606     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
607     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
608     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
609   } else {
610     // 32-bit PowerPC wants to expand i64 shifts itself.
611     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
612     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
613     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
614   }
615 
616   if (Subtarget.hasVSX()) {
617     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
618     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
619     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
620     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
621   }
622 
623   if (Subtarget.hasAltivec()) {
624     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
625       setOperationAction(ISD::SADDSAT, VT, Legal);
626       setOperationAction(ISD::SSUBSAT, VT, Legal);
627       setOperationAction(ISD::UADDSAT, VT, Legal);
628       setOperationAction(ISD::USUBSAT, VT, Legal);
629     }
630     // First set operation action for all vector types to expand. Then we
631     // will selectively turn on ones that can be effectively codegen'd.
632     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
633       // add/sub are legal for all supported vector VT's.
634       setOperationAction(ISD::ADD, VT, Legal);
635       setOperationAction(ISD::SUB, VT, Legal);
636 
637       // For v2i64, these are only valid with P8Vector. This is corrected after
638       // the loop.
639       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
640         setOperationAction(ISD::SMAX, VT, Legal);
641         setOperationAction(ISD::SMIN, VT, Legal);
642         setOperationAction(ISD::UMAX, VT, Legal);
643         setOperationAction(ISD::UMIN, VT, Legal);
644       }
645       else {
646         setOperationAction(ISD::SMAX, VT, Expand);
647         setOperationAction(ISD::SMIN, VT, Expand);
648         setOperationAction(ISD::UMAX, VT, Expand);
649         setOperationAction(ISD::UMIN, VT, Expand);
650       }
651 
652       if (Subtarget.hasVSX()) {
653         setOperationAction(ISD::FMAXNUM, VT, Legal);
654         setOperationAction(ISD::FMINNUM, VT, Legal);
655       }
656 
657       // Vector instructions introduced in P8
658       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
659         setOperationAction(ISD::CTPOP, VT, Legal);
660         setOperationAction(ISD::CTLZ, VT, Legal);
661       }
662       else {
663         setOperationAction(ISD::CTPOP, VT, Expand);
664         setOperationAction(ISD::CTLZ, VT, Expand);
665       }
666 
667       // Vector instructions introduced in P9
668       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
669         setOperationAction(ISD::CTTZ, VT, Legal);
670       else
671         setOperationAction(ISD::CTTZ, VT, Expand);
672 
673       // We promote all shuffles to v16i8.
674       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
675       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
676 
677       // We promote all non-typed operations to v4i32.
678       setOperationAction(ISD::AND   , VT, Promote);
679       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
680       setOperationAction(ISD::OR    , VT, Promote);
681       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
682       setOperationAction(ISD::XOR   , VT, Promote);
683       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
684       setOperationAction(ISD::LOAD  , VT, Promote);
685       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
686       setOperationAction(ISD::SELECT, VT, Promote);
687       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
688       setOperationAction(ISD::VSELECT, VT, Legal);
689       setOperationAction(ISD::SELECT_CC, VT, Promote);
690       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
691       setOperationAction(ISD::STORE, VT, Promote);
692       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
693 
694       // No other operations are legal.
695       setOperationAction(ISD::MUL , VT, Expand);
696       setOperationAction(ISD::SDIV, VT, Expand);
697       setOperationAction(ISD::SREM, VT, Expand);
698       setOperationAction(ISD::UDIV, VT, Expand);
699       setOperationAction(ISD::UREM, VT, Expand);
700       setOperationAction(ISD::FDIV, VT, Expand);
701       setOperationAction(ISD::FREM, VT, Expand);
702       setOperationAction(ISD::FNEG, VT, Expand);
703       setOperationAction(ISD::FSQRT, VT, Expand);
704       setOperationAction(ISD::FLOG, VT, Expand);
705       setOperationAction(ISD::FLOG10, VT, Expand);
706       setOperationAction(ISD::FLOG2, VT, Expand);
707       setOperationAction(ISD::FEXP, VT, Expand);
708       setOperationAction(ISD::FEXP2, VT, Expand);
709       setOperationAction(ISD::FSIN, VT, Expand);
710       setOperationAction(ISD::FCOS, VT, Expand);
711       setOperationAction(ISD::FABS, VT, Expand);
712       setOperationAction(ISD::FFLOOR, VT, Expand);
713       setOperationAction(ISD::FCEIL,  VT, Expand);
714       setOperationAction(ISD::FTRUNC, VT, Expand);
715       setOperationAction(ISD::FRINT,  VT, Expand);
716       setOperationAction(ISD::FNEARBYINT, VT, Expand);
717       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
718       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
719       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
720       setOperationAction(ISD::MULHU, VT, Expand);
721       setOperationAction(ISD::MULHS, VT, Expand);
722       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
723       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
724       setOperationAction(ISD::UDIVREM, VT, Expand);
725       setOperationAction(ISD::SDIVREM, VT, Expand);
726       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
727       setOperationAction(ISD::FPOW, VT, Expand);
728       setOperationAction(ISD::BSWAP, VT, Expand);
729       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
730       setOperationAction(ISD::ROTL, VT, Expand);
731       setOperationAction(ISD::ROTR, VT, Expand);
732 
733       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
734         setTruncStoreAction(VT, InnerVT, Expand);
735         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
736         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
737         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
738       }
739     }
740     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
741     if (!Subtarget.hasP8Vector()) {
742       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
743       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
744       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
745       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
746     }
747 
748     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
749       setOperationAction(ISD::ABS, VT, Custom);
750 
751     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
752     // with merges, splats, etc.
753     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
754 
755     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
756     // are cheap, so handle them before they get expanded to scalar.
757     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
758     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
759     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
760     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
761     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
762 
763     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
764     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
765     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
766     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
767     setOperationAction(ISD::SELECT, MVT::v4i32,
768                        Subtarget.useCRBits() ? Legal : Expand);
769     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
770     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
771     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
772     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
773     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
774     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
775     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
776     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
777     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
778 
779     // Without hasP8Altivec set, v2i64 SMAX isn't available.
780     // But ABS custom lowering requires SMAX support.
781     if (!Subtarget.hasP8Altivec())
782       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
783 
784     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
785     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
786     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
787     if (Subtarget.hasAltivec())
788       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
789         setOperationAction(ISD::ROTL, VT, Legal);
790     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
791     if (Subtarget.hasP8Altivec())
792       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
793 
794     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
795     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
796     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
797     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
798 
799     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
800     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
801 
802     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
803       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
804       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
805     }
806 
807     if (Subtarget.hasP8Altivec())
808       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
809     else
810       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
811 
812     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
813     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
814 
815     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
816     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
817 
818     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
819     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
820     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
821     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
822 
823     // Altivec does not contain unordered floating-point compare instructions
824     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
825     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
826     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
827     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
828 
829     if (Subtarget.hasVSX()) {
830       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
831       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
832       if (Subtarget.hasP8Vector()) {
833         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
834         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
835       }
836       if (Subtarget.hasDirectMove() && isPPC64) {
837         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
838         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
839         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
840         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
841         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
842         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
843         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
844         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
845       }
846       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
847 
848       // The nearbyint variants are not allowed to raise the inexact exception
849       // so we can only code-gen them with unsafe math.
850       if (TM.Options.UnsafeFPMath) {
851         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
852         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
853       }
854 
855       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
856       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
857       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
858       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
859       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
860       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
861       setOperationAction(ISD::FROUND, MVT::f64, Legal);
862       setOperationAction(ISD::FRINT, MVT::f64, Legal);
863 
864       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
865       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
866       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
867       setOperationAction(ISD::FROUND, MVT::f32, Legal);
868       setOperationAction(ISD::FRINT, MVT::f32, Legal);
869 
870       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
871       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
872 
873       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
874       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
875 
876       // Share the Altivec comparison restrictions.
877       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
878       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
879       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
880       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
881 
882       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
883       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
884 
885       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
886 
887       if (Subtarget.hasP8Vector())
888         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
889 
890       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
891 
892       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
893       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
894       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
895 
896       if (Subtarget.hasP8Altivec()) {
897         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
898         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
899         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
900 
901         // 128 bit shifts can be accomplished via 3 instructions for SHL and
902         // SRL, but not for SRA because of the instructions available:
903         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
904         // doing
905         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
906         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
907         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
908 
909         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
910       }
911       else {
912         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
913         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
914         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
915 
916         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
917 
918         // VSX v2i64 only supports non-arithmetic operations.
919         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
920         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
921       }
922 
923       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
924       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
925       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
926       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
927 
928       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
929 
930       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
931       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
932       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
933       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
934 
935       // Custom handling for partial vectors of integers converted to
936       // floating point. We already have optimal handling for v2i32 through
937       // the DAG combine, so those aren't necessary.
938       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
939       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
940       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
941       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
942       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
943       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
944       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
945       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
946 
947       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
948       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
949       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
950       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
951       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
952       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
953 
954       if (Subtarget.hasDirectMove())
955         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
956       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
957 
958       // Handle constrained floating-point operations of vector.
959       // The predictor is `hasVSX` because altivec instruction has
960       // no exception but VSX vector instruction has.
961       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
962       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
963       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
964       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
965       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
966       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
967       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
968       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
969       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
970       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
971       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
972       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
973       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
974 
975       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
976       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
977       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
978       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
979       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
980       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
981       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
982       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
983       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
984       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
985       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
986       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
987       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
988 
989       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
990     }
991 
992     if (Subtarget.hasP8Altivec()) {
993       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
994       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
995     }
996 
997     if (Subtarget.hasP9Vector()) {
998       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
999       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1000 
1001       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1002       // SRL, but not for SRA because of the instructions available:
1003       // VS{RL} and VS{RL}O.
1004       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1005       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1006       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1007 
1008       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1009       setOperationAction(ISD::FADD, MVT::f128, Legal);
1010       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1011       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1012       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1013       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1014       // No extending loads to f128 on PPC.
1015       for (MVT FPT : MVT::fp_valuetypes())
1016         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1017       setOperationAction(ISD::FMA, MVT::f128, Legal);
1018       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1019       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1020       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1021       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1022       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1023       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1024 
1025       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1026       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1027       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1028       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1029       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1030       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1031 
1032       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1033       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1034       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1035       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1036       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1037       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1038       // No implementation for these ops for PowerPC.
1039       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1040       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1041       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1042       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1043       setOperationAction(ISD::FREM, MVT::f128, Expand);
1044 
1045       // Handle constrained floating-point operations of fp128
1046       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1047       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1048       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1049       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1050       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1051       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1052       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1053       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1054       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1055       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1056       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1057       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1058       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1059       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1060       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1061       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1062       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1063       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1064       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1065       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1066     }
1067 
1068     if (Subtarget.hasP9Altivec()) {
1069       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1070       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1071 
1072       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1073       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1074       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1075       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1076       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1077       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1078       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1079     }
1080   }
1081 
1082   if (Subtarget.hasQPX()) {
1083     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1084     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1085     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1086     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
1087 
1088     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
1089     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
1090 
1091     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
1092     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
1093 
1094     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1095     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1096 
1097     if (!Subtarget.useCRBits())
1098       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1099     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1100 
1101     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1102     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1103     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1104     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1105     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1106     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1107     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1108 
1109     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1110     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1111 
1112     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1113     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1114 
1115     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1116     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1117     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1118     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1119     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1120     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1121     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1122     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1123     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1124     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1125 
1126     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1127     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1128 
1129     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1130     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1131 
1132     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1133 
1134     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1135     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1136     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1138 
1139     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1140     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1141 
1142     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
1143     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1144 
1145     if (!Subtarget.useCRBits())
1146       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1147     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1148 
1149     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1150     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1151     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1152     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1153     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1154     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1155     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1156 
1157     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1158     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1159 
1160     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1161     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1162     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1163     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1164     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1165     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1166     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1167     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1168     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1169     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1170 
1171     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1172     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1173 
1174     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1175     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1176 
1177     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1178 
1179     setOperationAction(ISD::AND , MVT::v4i1, Legal);
1180     setOperationAction(ISD::OR , MVT::v4i1, Legal);
1181     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1182 
1183     if (!Subtarget.useCRBits())
1184       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1185     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1186 
1187     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
1188     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1189 
1190     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1191     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1192     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1193     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1194     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1195     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1196     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1197 
1198     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1199     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1200 
1201     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1202 
1203     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1204     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1205     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1206     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1207 
1208     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1209     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1210     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1211     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1212 
1213     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1214     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1215 
1216     // These need to set FE_INEXACT, and so cannot be vectorized here.
1217     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1218     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1219 
1220     if (TM.Options.UnsafeFPMath) {
1221       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1222       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1223 
1224       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1225       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1226     } else {
1227       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1228       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1229 
1230       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1231       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1232     }
1233 
1234     // TODO: Handle constrained floating-point operations of v4f64
1235   }
1236 
1237   if (Subtarget.has64BitSupport())
1238     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1239 
1240   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1241 
1242   if (!isPPC64) {
1243     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1244     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1245   }
1246 
1247   setBooleanContents(ZeroOrOneBooleanContent);
1248 
1249   if (Subtarget.hasAltivec()) {
1250     // Altivec instructions set fields to all zeros or all ones.
1251     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1252   }
1253 
1254   if (!isPPC64) {
1255     // These libcalls are not available in 32-bit.
1256     setLibcallName(RTLIB::SHL_I128, nullptr);
1257     setLibcallName(RTLIB::SRL_I128, nullptr);
1258     setLibcallName(RTLIB::SRA_I128, nullptr);
1259   }
1260 
1261   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1262 
1263   // We have target-specific dag combine patterns for the following nodes:
1264   setTargetDAGCombine(ISD::ADD);
1265   setTargetDAGCombine(ISD::SHL);
1266   setTargetDAGCombine(ISD::SRA);
1267   setTargetDAGCombine(ISD::SRL);
1268   setTargetDAGCombine(ISD::MUL);
1269   setTargetDAGCombine(ISD::FMA);
1270   setTargetDAGCombine(ISD::SINT_TO_FP);
1271   setTargetDAGCombine(ISD::BUILD_VECTOR);
1272   if (Subtarget.hasFPCVT())
1273     setTargetDAGCombine(ISD::UINT_TO_FP);
1274   setTargetDAGCombine(ISD::LOAD);
1275   setTargetDAGCombine(ISD::STORE);
1276   setTargetDAGCombine(ISD::BR_CC);
1277   if (Subtarget.useCRBits())
1278     setTargetDAGCombine(ISD::BRCOND);
1279   setTargetDAGCombine(ISD::BSWAP);
1280   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1281   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1282   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1283 
1284   setTargetDAGCombine(ISD::SIGN_EXTEND);
1285   setTargetDAGCombine(ISD::ZERO_EXTEND);
1286   setTargetDAGCombine(ISD::ANY_EXTEND);
1287 
1288   setTargetDAGCombine(ISD::TRUNCATE);
1289   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1290 
1291 
1292   if (Subtarget.useCRBits()) {
1293     setTargetDAGCombine(ISD::TRUNCATE);
1294     setTargetDAGCombine(ISD::SETCC);
1295     setTargetDAGCombine(ISD::SELECT_CC);
1296   }
1297 
1298   // Use reciprocal estimates.
1299   if (TM.Options.UnsafeFPMath) {
1300     setTargetDAGCombine(ISD::FDIV);
1301     setTargetDAGCombine(ISD::FSQRT);
1302   }
1303 
1304   if (Subtarget.hasP9Altivec()) {
1305     setTargetDAGCombine(ISD::ABS);
1306     setTargetDAGCombine(ISD::VSELECT);
1307   }
1308 
1309   setLibcallName(RTLIB::LOG_F128, "logf128");
1310   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1311   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1312   setLibcallName(RTLIB::EXP_F128, "expf128");
1313   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1314   setLibcallName(RTLIB::SIN_F128, "sinf128");
1315   setLibcallName(RTLIB::COS_F128, "cosf128");
1316   setLibcallName(RTLIB::POW_F128, "powf128");
1317   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1318   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1319   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1320   setLibcallName(RTLIB::REM_F128, "fmodf128");
1321 
1322   // With 32 condition bits, we don't need to sink (and duplicate) compares
1323   // aggressively in CodeGenPrep.
1324   if (Subtarget.useCRBits()) {
1325     setHasMultipleConditionRegisters();
1326     setJumpIsExpensive();
1327   }
1328 
1329   setMinFunctionAlignment(Align(4));
1330 
1331   switch (Subtarget.getCPUDirective()) {
1332   default: break;
1333   case PPC::DIR_970:
1334   case PPC::DIR_A2:
1335   case PPC::DIR_E500:
1336   case PPC::DIR_E500mc:
1337   case PPC::DIR_E5500:
1338   case PPC::DIR_PWR4:
1339   case PPC::DIR_PWR5:
1340   case PPC::DIR_PWR5X:
1341   case PPC::DIR_PWR6:
1342   case PPC::DIR_PWR6X:
1343   case PPC::DIR_PWR7:
1344   case PPC::DIR_PWR8:
1345   case PPC::DIR_PWR9:
1346   case PPC::DIR_PWR10:
1347   case PPC::DIR_PWR_FUTURE:
1348     setPrefLoopAlignment(Align(16));
1349     setPrefFunctionAlignment(Align(16));
1350     break;
1351   }
1352 
1353   if (Subtarget.enableMachineScheduler())
1354     setSchedulingPreference(Sched::Source);
1355   else
1356     setSchedulingPreference(Sched::Hybrid);
1357 
1358   computeRegisterProperties(STI.getRegisterInfo());
1359 
1360   // The Freescale cores do better with aggressive inlining of memcpy and
1361   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1362   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1363       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1364     MaxStoresPerMemset = 32;
1365     MaxStoresPerMemsetOptSize = 16;
1366     MaxStoresPerMemcpy = 32;
1367     MaxStoresPerMemcpyOptSize = 8;
1368     MaxStoresPerMemmove = 32;
1369     MaxStoresPerMemmoveOptSize = 8;
1370   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1371     // The A2 also benefits from (very) aggressive inlining of memcpy and
1372     // friends. The overhead of a the function call, even when warm, can be
1373     // over one hundred cycles.
1374     MaxStoresPerMemset = 128;
1375     MaxStoresPerMemcpy = 128;
1376     MaxStoresPerMemmove = 128;
1377     MaxLoadsPerMemcmp = 128;
1378   } else {
1379     MaxLoadsPerMemcmp = 8;
1380     MaxLoadsPerMemcmpOptSize = 4;
1381   }
1382 
1383   // Let the subtarget (CPU) decide if a predictable select is more expensive
1384   // than the corresponding branch. This information is used in CGP to decide
1385   // when to convert selects into branches.
1386   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1387 }
1388 
1389 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1390 /// the desired ByVal argument alignment.
1391 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1392   if (MaxAlign == MaxMaxAlign)
1393     return;
1394   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1395     if (MaxMaxAlign >= 32 &&
1396         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1397       MaxAlign = Align(32);
1398     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1399              MaxAlign < 16)
1400       MaxAlign = Align(16);
1401   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1402     Align EltAlign;
1403     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1404     if (EltAlign > MaxAlign)
1405       MaxAlign = EltAlign;
1406   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1407     for (auto *EltTy : STy->elements()) {
1408       Align EltAlign;
1409       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1410       if (EltAlign > MaxAlign)
1411         MaxAlign = EltAlign;
1412       if (MaxAlign == MaxMaxAlign)
1413         break;
1414     }
1415   }
1416 }
1417 
1418 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1419 /// function arguments in the caller parameter area.
1420 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1421                                                   const DataLayout &DL) const {
1422   // 16byte and wider vectors are passed on 16byte boundary.
1423   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1424   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1425   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1426     getMaxByValAlign(Ty, Alignment, Subtarget.hasQPX() ? Align(32) : Align(16));
1427   return Alignment.value();
1428 }
1429 
1430 bool PPCTargetLowering::useSoftFloat() const {
1431   return Subtarget.useSoftFloat();
1432 }
1433 
1434 bool PPCTargetLowering::hasSPE() const {
1435   return Subtarget.hasSPE();
1436 }
1437 
1438 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1439   return VT.isScalarInteger();
1440 }
1441 
1442 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1443 /// type is cheaper than a multiply followed by a shift.
1444 /// This is true for words and doublewords on 64-bit PowerPC.
1445 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1446   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1447                               isOperationLegal(ISD::MULHU, Type)))
1448     return true;
1449   return TargetLowering::isMulhCheaperThanMulShift(Type);
1450 }
1451 
1452 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1453   switch ((PPCISD::NodeType)Opcode) {
1454   case PPCISD::FIRST_NUMBER:    break;
1455   case PPCISD::FSEL:            return "PPCISD::FSEL";
1456   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1457   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1458   case PPCISD::FCFID:           return "PPCISD::FCFID";
1459   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1460   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1461   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1462   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1463   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1464   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1465   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1466   case PPCISD::FP_TO_UINT_IN_VSR:
1467                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1468   case PPCISD::FP_TO_SINT_IN_VSR:
1469                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1470   case PPCISD::FRE:             return "PPCISD::FRE";
1471   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1472   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1473   case PPCISD::VPERM:           return "PPCISD::VPERM";
1474   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1475   case PPCISD::XXSPLTI_SP_TO_DP:
1476     return "PPCISD::XXSPLTI_SP_TO_DP";
1477   case PPCISD::XXSPLTI32DX:
1478     return "PPCISD::XXSPLTI32DX";
1479   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1480   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1481   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1482   case PPCISD::CMPB:            return "PPCISD::CMPB";
1483   case PPCISD::Hi:              return "PPCISD::Hi";
1484   case PPCISD::Lo:              return "PPCISD::Lo";
1485   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1486   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1487   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1488   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1489   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1490   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1491   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1492   case PPCISD::SRL:             return "PPCISD::SRL";
1493   case PPCISD::SRA:             return "PPCISD::SRA";
1494   case PPCISD::SHL:             return "PPCISD::SHL";
1495   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1496   case PPCISD::CALL:            return "PPCISD::CALL";
1497   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1498   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1499   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1500   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1501   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1502   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1503   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1504   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1505   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1506   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1507   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1508   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1509   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1510   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1511   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1512   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1513     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1514   case PPCISD::ANDI_rec_1_EQ_BIT:
1515     return "PPCISD::ANDI_rec_1_EQ_BIT";
1516   case PPCISD::ANDI_rec_1_GT_BIT:
1517     return "PPCISD::ANDI_rec_1_GT_BIT";
1518   case PPCISD::VCMP:            return "PPCISD::VCMP";
1519   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1520   case PPCISD::LBRX:            return "PPCISD::LBRX";
1521   case PPCISD::STBRX:           return "PPCISD::STBRX";
1522   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1523   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1524   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1525   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1526   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1527   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1528   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1529   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1530   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1531   case PPCISD::ST_VSR_SCAL_INT:
1532                                 return "PPCISD::ST_VSR_SCAL_INT";
1533   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1534   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1535   case PPCISD::BDZ:             return "PPCISD::BDZ";
1536   case PPCISD::MFFS:            return "PPCISD::MFFS";
1537   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1538   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1539   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1540   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1541   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1542   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1543   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1544   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1545   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1546   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1547   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1548   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1549   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1550   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1551   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1552   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1553   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1554   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1555   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1556   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1557   case PPCISD::SC:              return "PPCISD::SC";
1558   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1559   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1560   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1561   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1562   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1563   case PPCISD::VABSD:           return "PPCISD::VABSD";
1564   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1565   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1566   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1567   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1568   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1569   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1570   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1571   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1572   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1573   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1574   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1575   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1576   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1577   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1578   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1579   }
1580   return nullptr;
1581 }
1582 
1583 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1584                                           EVT VT) const {
1585   if (!VT.isVector())
1586     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1587 
1588   if (Subtarget.hasQPX())
1589     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1590 
1591   return VT.changeVectorElementTypeToInteger();
1592 }
1593 
1594 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1595   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1596   return true;
1597 }
1598 
1599 //===----------------------------------------------------------------------===//
1600 // Node matching predicates, for use by the tblgen matching code.
1601 //===----------------------------------------------------------------------===//
1602 
1603 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1604 static bool isFloatingPointZero(SDValue Op) {
1605   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1606     return CFP->getValueAPF().isZero();
1607   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1608     // Maybe this has already been legalized into the constant pool?
1609     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1610       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1611         return CFP->getValueAPF().isZero();
1612   }
1613   return false;
1614 }
1615 
1616 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1617 /// true if Op is undef or if it matches the specified value.
1618 static bool isConstantOrUndef(int Op, int Val) {
1619   return Op < 0 || Op == Val;
1620 }
1621 
1622 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1623 /// VPKUHUM instruction.
1624 /// The ShuffleKind distinguishes between big-endian operations with
1625 /// two different inputs (0), either-endian operations with two identical
1626 /// inputs (1), and little-endian operations with two different inputs (2).
1627 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1628 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1629                                SelectionDAG &DAG) {
1630   bool IsLE = DAG.getDataLayout().isLittleEndian();
1631   if (ShuffleKind == 0) {
1632     if (IsLE)
1633       return false;
1634     for (unsigned i = 0; i != 16; ++i)
1635       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1636         return false;
1637   } else if (ShuffleKind == 2) {
1638     if (!IsLE)
1639       return false;
1640     for (unsigned i = 0; i != 16; ++i)
1641       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1642         return false;
1643   } else if (ShuffleKind == 1) {
1644     unsigned j = IsLE ? 0 : 1;
1645     for (unsigned i = 0; i != 8; ++i)
1646       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1647           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1648         return false;
1649   }
1650   return true;
1651 }
1652 
1653 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1654 /// VPKUWUM instruction.
1655 /// The ShuffleKind distinguishes between big-endian operations with
1656 /// two different inputs (0), either-endian operations with two identical
1657 /// inputs (1), and little-endian operations with two different inputs (2).
1658 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1659 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1660                                SelectionDAG &DAG) {
1661   bool IsLE = DAG.getDataLayout().isLittleEndian();
1662   if (ShuffleKind == 0) {
1663     if (IsLE)
1664       return false;
1665     for (unsigned i = 0; i != 16; i += 2)
1666       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1667           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1668         return false;
1669   } else if (ShuffleKind == 2) {
1670     if (!IsLE)
1671       return false;
1672     for (unsigned i = 0; i != 16; i += 2)
1673       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1674           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1675         return false;
1676   } else if (ShuffleKind == 1) {
1677     unsigned j = IsLE ? 0 : 2;
1678     for (unsigned i = 0; i != 8; i += 2)
1679       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1680           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1681           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1682           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1683         return false;
1684   }
1685   return true;
1686 }
1687 
1688 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1689 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1690 /// current subtarget.
1691 ///
1692 /// The ShuffleKind distinguishes between big-endian operations with
1693 /// two different inputs (0), either-endian operations with two identical
1694 /// inputs (1), and little-endian operations with two different inputs (2).
1695 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1696 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1697                                SelectionDAG &DAG) {
1698   const PPCSubtarget& Subtarget =
1699       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1700   if (!Subtarget.hasP8Vector())
1701     return false;
1702 
1703   bool IsLE = DAG.getDataLayout().isLittleEndian();
1704   if (ShuffleKind == 0) {
1705     if (IsLE)
1706       return false;
1707     for (unsigned i = 0; i != 16; i += 4)
1708       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1709           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1710           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1711           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1712         return false;
1713   } else if (ShuffleKind == 2) {
1714     if (!IsLE)
1715       return false;
1716     for (unsigned i = 0; i != 16; i += 4)
1717       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1718           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1719           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1720           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1721         return false;
1722   } else if (ShuffleKind == 1) {
1723     unsigned j = IsLE ? 0 : 4;
1724     for (unsigned i = 0; i != 8; i += 4)
1725       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1726           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1727           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1728           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1729           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1730           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1731           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1732           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1733         return false;
1734   }
1735   return true;
1736 }
1737 
1738 /// isVMerge - Common function, used to match vmrg* shuffles.
1739 ///
1740 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1741                      unsigned LHSStart, unsigned RHSStart) {
1742   if (N->getValueType(0) != MVT::v16i8)
1743     return false;
1744   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1745          "Unsupported merge size!");
1746 
1747   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1748     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1749       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1750                              LHSStart+j+i*UnitSize) ||
1751           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1752                              RHSStart+j+i*UnitSize))
1753         return false;
1754     }
1755   return true;
1756 }
1757 
1758 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1759 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1760 /// The ShuffleKind distinguishes between big-endian merges with two
1761 /// different inputs (0), either-endian merges with two identical inputs (1),
1762 /// and little-endian merges with two different inputs (2).  For the latter,
1763 /// the input operands are swapped (see PPCInstrAltivec.td).
1764 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1765                              unsigned ShuffleKind, SelectionDAG &DAG) {
1766   if (DAG.getDataLayout().isLittleEndian()) {
1767     if (ShuffleKind == 1) // unary
1768       return isVMerge(N, UnitSize, 0, 0);
1769     else if (ShuffleKind == 2) // swapped
1770       return isVMerge(N, UnitSize, 0, 16);
1771     else
1772       return false;
1773   } else {
1774     if (ShuffleKind == 1) // unary
1775       return isVMerge(N, UnitSize, 8, 8);
1776     else if (ShuffleKind == 0) // normal
1777       return isVMerge(N, UnitSize, 8, 24);
1778     else
1779       return false;
1780   }
1781 }
1782 
1783 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1784 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1785 /// The ShuffleKind distinguishes between big-endian merges with two
1786 /// different inputs (0), either-endian merges with two identical inputs (1),
1787 /// and little-endian merges with two different inputs (2).  For the latter,
1788 /// the input operands are swapped (see PPCInstrAltivec.td).
1789 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1790                              unsigned ShuffleKind, SelectionDAG &DAG) {
1791   if (DAG.getDataLayout().isLittleEndian()) {
1792     if (ShuffleKind == 1) // unary
1793       return isVMerge(N, UnitSize, 8, 8);
1794     else if (ShuffleKind == 2) // swapped
1795       return isVMerge(N, UnitSize, 8, 24);
1796     else
1797       return false;
1798   } else {
1799     if (ShuffleKind == 1) // unary
1800       return isVMerge(N, UnitSize, 0, 0);
1801     else if (ShuffleKind == 0) // normal
1802       return isVMerge(N, UnitSize, 0, 16);
1803     else
1804       return false;
1805   }
1806 }
1807 
1808 /**
1809  * Common function used to match vmrgew and vmrgow shuffles
1810  *
1811  * The indexOffset determines whether to look for even or odd words in
1812  * the shuffle mask. This is based on the of the endianness of the target
1813  * machine.
1814  *   - Little Endian:
1815  *     - Use offset of 0 to check for odd elements
1816  *     - Use offset of 4 to check for even elements
1817  *   - Big Endian:
1818  *     - Use offset of 0 to check for even elements
1819  *     - Use offset of 4 to check for odd elements
1820  * A detailed description of the vector element ordering for little endian and
1821  * big endian can be found at
1822  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1823  * Targeting your applications - what little endian and big endian IBM XL C/C++
1824  * compiler differences mean to you
1825  *
1826  * The mask to the shuffle vector instruction specifies the indices of the
1827  * elements from the two input vectors to place in the result. The elements are
1828  * numbered in array-access order, starting with the first vector. These vectors
1829  * are always of type v16i8, thus each vector will contain 16 elements of size
1830  * 8. More info on the shuffle vector can be found in the
1831  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1832  * Language Reference.
1833  *
1834  * The RHSStartValue indicates whether the same input vectors are used (unary)
1835  * or two different input vectors are used, based on the following:
1836  *   - If the instruction uses the same vector for both inputs, the range of the
1837  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1838  *     be 0.
1839  *   - If the instruction has two different vectors then the range of the
1840  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1841  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1842  *     to 31 specify elements in the second vector).
1843  *
1844  * \param[in] N The shuffle vector SD Node to analyze
1845  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1846  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1847  * vector to the shuffle_vector instruction
1848  * \return true iff this shuffle vector represents an even or odd word merge
1849  */
1850 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1851                      unsigned RHSStartValue) {
1852   if (N->getValueType(0) != MVT::v16i8)
1853     return false;
1854 
1855   for (unsigned i = 0; i < 2; ++i)
1856     for (unsigned j = 0; j < 4; ++j)
1857       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1858                              i*RHSStartValue+j+IndexOffset) ||
1859           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1860                              i*RHSStartValue+j+IndexOffset+8))
1861         return false;
1862   return true;
1863 }
1864 
1865 /**
1866  * Determine if the specified shuffle mask is suitable for the vmrgew or
1867  * vmrgow instructions.
1868  *
1869  * \param[in] N The shuffle vector SD Node to analyze
1870  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1871  * \param[in] ShuffleKind Identify the type of merge:
1872  *   - 0 = big-endian merge with two different inputs;
1873  *   - 1 = either-endian merge with two identical inputs;
1874  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1875  *     little-endian merges).
1876  * \param[in] DAG The current SelectionDAG
1877  * \return true iff this shuffle mask
1878  */
1879 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1880                               unsigned ShuffleKind, SelectionDAG &DAG) {
1881   if (DAG.getDataLayout().isLittleEndian()) {
1882     unsigned indexOffset = CheckEven ? 4 : 0;
1883     if (ShuffleKind == 1) // Unary
1884       return isVMerge(N, indexOffset, 0);
1885     else if (ShuffleKind == 2) // swapped
1886       return isVMerge(N, indexOffset, 16);
1887     else
1888       return false;
1889   }
1890   else {
1891     unsigned indexOffset = CheckEven ? 0 : 4;
1892     if (ShuffleKind == 1) // Unary
1893       return isVMerge(N, indexOffset, 0);
1894     else if (ShuffleKind == 0) // Normal
1895       return isVMerge(N, indexOffset, 16);
1896     else
1897       return false;
1898   }
1899   return false;
1900 }
1901 
1902 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1903 /// amount, otherwise return -1.
1904 /// The ShuffleKind distinguishes between big-endian operations with two
1905 /// different inputs (0), either-endian operations with two identical inputs
1906 /// (1), and little-endian operations with two different inputs (2).  For the
1907 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1908 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1909                              SelectionDAG &DAG) {
1910   if (N->getValueType(0) != MVT::v16i8)
1911     return -1;
1912 
1913   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1914 
1915   // Find the first non-undef value in the shuffle mask.
1916   unsigned i;
1917   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1918     /*search*/;
1919 
1920   if (i == 16) return -1;  // all undef.
1921 
1922   // Otherwise, check to see if the rest of the elements are consecutively
1923   // numbered from this value.
1924   unsigned ShiftAmt = SVOp->getMaskElt(i);
1925   if (ShiftAmt < i) return -1;
1926 
1927   ShiftAmt -= i;
1928   bool isLE = DAG.getDataLayout().isLittleEndian();
1929 
1930   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1931     // Check the rest of the elements to see if they are consecutive.
1932     for (++i; i != 16; ++i)
1933       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1934         return -1;
1935   } else if (ShuffleKind == 1) {
1936     // Check the rest of the elements to see if they are consecutive.
1937     for (++i; i != 16; ++i)
1938       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1939         return -1;
1940   } else
1941     return -1;
1942 
1943   if (isLE)
1944     ShiftAmt = 16 - ShiftAmt;
1945 
1946   return ShiftAmt;
1947 }
1948 
1949 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1950 /// specifies a splat of a single element that is suitable for input to
1951 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1952 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1953   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1954          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1955 
1956   // The consecutive indices need to specify an element, not part of two
1957   // different elements.  So abandon ship early if this isn't the case.
1958   if (N->getMaskElt(0) % EltSize != 0)
1959     return false;
1960 
1961   // This is a splat operation if each element of the permute is the same, and
1962   // if the value doesn't reference the second vector.
1963   unsigned ElementBase = N->getMaskElt(0);
1964 
1965   // FIXME: Handle UNDEF elements too!
1966   if (ElementBase >= 16)
1967     return false;
1968 
1969   // Check that the indices are consecutive, in the case of a multi-byte element
1970   // splatted with a v16i8 mask.
1971   for (unsigned i = 1; i != EltSize; ++i)
1972     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1973       return false;
1974 
1975   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1976     if (N->getMaskElt(i) < 0) continue;
1977     for (unsigned j = 0; j != EltSize; ++j)
1978       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1979         return false;
1980   }
1981   return true;
1982 }
1983 
1984 /// Check that the mask is shuffling N byte elements. Within each N byte
1985 /// element of the mask, the indices could be either in increasing or
1986 /// decreasing order as long as they are consecutive.
1987 /// \param[in] N the shuffle vector SD Node to analyze
1988 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1989 /// Word/DoubleWord/QuadWord).
1990 /// \param[in] StepLen the delta indices number among the N byte element, if
1991 /// the mask is in increasing/decreasing order then it is 1/-1.
1992 /// \return true iff the mask is shuffling N byte elements.
1993 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1994                                    int StepLen) {
1995   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1996          "Unexpected element width.");
1997   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1998 
1999   unsigned NumOfElem = 16 / Width;
2000   unsigned MaskVal[16]; //  Width is never greater than 16
2001   for (unsigned i = 0; i < NumOfElem; ++i) {
2002     MaskVal[0] = N->getMaskElt(i * Width);
2003     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2004       return false;
2005     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2006       return false;
2007     }
2008 
2009     for (unsigned int j = 1; j < Width; ++j) {
2010       MaskVal[j] = N->getMaskElt(i * Width + j);
2011       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2012         return false;
2013       }
2014     }
2015   }
2016 
2017   return true;
2018 }
2019 
2020 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2021                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2022   if (!isNByteElemShuffleMask(N, 4, 1))
2023     return false;
2024 
2025   // Now we look at mask elements 0,4,8,12
2026   unsigned M0 = N->getMaskElt(0) / 4;
2027   unsigned M1 = N->getMaskElt(4) / 4;
2028   unsigned M2 = N->getMaskElt(8) / 4;
2029   unsigned M3 = N->getMaskElt(12) / 4;
2030   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2031   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2032 
2033   // Below, let H and L be arbitrary elements of the shuffle mask
2034   // where H is in the range [4,7] and L is in the range [0,3].
2035   // H, 1, 2, 3 or L, 5, 6, 7
2036   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2037       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2038     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2039     InsertAtByte = IsLE ? 12 : 0;
2040     Swap = M0 < 4;
2041     return true;
2042   }
2043   // 0, H, 2, 3 or 4, L, 6, 7
2044   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2045       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2046     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2047     InsertAtByte = IsLE ? 8 : 4;
2048     Swap = M1 < 4;
2049     return true;
2050   }
2051   // 0, 1, H, 3 or 4, 5, L, 7
2052   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2053       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2054     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2055     InsertAtByte = IsLE ? 4 : 8;
2056     Swap = M2 < 4;
2057     return true;
2058   }
2059   // 0, 1, 2, H or 4, 5, 6, L
2060   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2061       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2062     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2063     InsertAtByte = IsLE ? 0 : 12;
2064     Swap = M3 < 4;
2065     return true;
2066   }
2067 
2068   // If both vector operands for the shuffle are the same vector, the mask will
2069   // contain only elements from the first one and the second one will be undef.
2070   if (N->getOperand(1).isUndef()) {
2071     ShiftElts = 0;
2072     Swap = true;
2073     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2074     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2075       InsertAtByte = IsLE ? 12 : 0;
2076       return true;
2077     }
2078     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2079       InsertAtByte = IsLE ? 8 : 4;
2080       return true;
2081     }
2082     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2083       InsertAtByte = IsLE ? 4 : 8;
2084       return true;
2085     }
2086     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2087       InsertAtByte = IsLE ? 0 : 12;
2088       return true;
2089     }
2090   }
2091 
2092   return false;
2093 }
2094 
2095 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2096                                bool &Swap, bool IsLE) {
2097   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2098   // Ensure each byte index of the word is consecutive.
2099   if (!isNByteElemShuffleMask(N, 4, 1))
2100     return false;
2101 
2102   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2103   unsigned M0 = N->getMaskElt(0) / 4;
2104   unsigned M1 = N->getMaskElt(4) / 4;
2105   unsigned M2 = N->getMaskElt(8) / 4;
2106   unsigned M3 = N->getMaskElt(12) / 4;
2107 
2108   // If both vector operands for the shuffle are the same vector, the mask will
2109   // contain only elements from the first one and the second one will be undef.
2110   if (N->getOperand(1).isUndef()) {
2111     assert(M0 < 4 && "Indexing into an undef vector?");
2112     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2113       return false;
2114 
2115     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2116     Swap = false;
2117     return true;
2118   }
2119 
2120   // Ensure each word index of the ShuffleVector Mask is consecutive.
2121   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2122     return false;
2123 
2124   if (IsLE) {
2125     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2126       // Input vectors don't need to be swapped if the leading element
2127       // of the result is one of the 3 left elements of the second vector
2128       // (or if there is no shift to be done at all).
2129       Swap = false;
2130       ShiftElts = (8 - M0) % 8;
2131     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2132       // Input vectors need to be swapped if the leading element
2133       // of the result is one of the 3 left elements of the first vector
2134       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2135       Swap = true;
2136       ShiftElts = (4 - M0) % 4;
2137     }
2138 
2139     return true;
2140   } else {                                          // BE
2141     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2142       // Input vectors don't need to be swapped if the leading element
2143       // of the result is one of the 4 elements of the first vector.
2144       Swap = false;
2145       ShiftElts = M0;
2146     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2147       // Input vectors need to be swapped if the leading element
2148       // of the result is one of the 4 elements of the right vector.
2149       Swap = true;
2150       ShiftElts = M0 - 4;
2151     }
2152 
2153     return true;
2154   }
2155 }
2156 
2157 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2158   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2159 
2160   if (!isNByteElemShuffleMask(N, Width, -1))
2161     return false;
2162 
2163   for (int i = 0; i < 16; i += Width)
2164     if (N->getMaskElt(i) != i + Width - 1)
2165       return false;
2166 
2167   return true;
2168 }
2169 
2170 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2171   return isXXBRShuffleMaskHelper(N, 2);
2172 }
2173 
2174 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2175   return isXXBRShuffleMaskHelper(N, 4);
2176 }
2177 
2178 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2179   return isXXBRShuffleMaskHelper(N, 8);
2180 }
2181 
2182 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2183   return isXXBRShuffleMaskHelper(N, 16);
2184 }
2185 
2186 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2187 /// if the inputs to the instruction should be swapped and set \p DM to the
2188 /// value for the immediate.
2189 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2190 /// AND element 0 of the result comes from the first input (LE) or second input
2191 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2192 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2193 /// mask.
2194 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2195                                bool &Swap, bool IsLE) {
2196   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2197 
2198   // Ensure each byte index of the double word is consecutive.
2199   if (!isNByteElemShuffleMask(N, 8, 1))
2200     return false;
2201 
2202   unsigned M0 = N->getMaskElt(0) / 8;
2203   unsigned M1 = N->getMaskElt(8) / 8;
2204   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2205 
2206   // If both vector operands for the shuffle are the same vector, the mask will
2207   // contain only elements from the first one and the second one will be undef.
2208   if (N->getOperand(1).isUndef()) {
2209     if ((M0 | M1) < 2) {
2210       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2211       Swap = false;
2212       return true;
2213     } else
2214       return false;
2215   }
2216 
2217   if (IsLE) {
2218     if (M0 > 1 && M1 < 2) {
2219       Swap = false;
2220     } else if (M0 < 2 && M1 > 1) {
2221       M0 = (M0 + 2) % 4;
2222       M1 = (M1 + 2) % 4;
2223       Swap = true;
2224     } else
2225       return false;
2226 
2227     // Note: if control flow comes here that means Swap is already set above
2228     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2229     return true;
2230   } else { // BE
2231     if (M0 < 2 && M1 > 1) {
2232       Swap = false;
2233     } else if (M0 > 1 && M1 < 2) {
2234       M0 = (M0 + 2) % 4;
2235       M1 = (M1 + 2) % 4;
2236       Swap = true;
2237     } else
2238       return false;
2239 
2240     // Note: if control flow comes here that means Swap is already set above
2241     DM = (M0 << 1) + (M1 & 1);
2242     return true;
2243   }
2244 }
2245 
2246 
2247 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2248 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2249 /// elements are counted from the left of the vector register).
2250 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2251                                          SelectionDAG &DAG) {
2252   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2253   assert(isSplatShuffleMask(SVOp, EltSize));
2254   if (DAG.getDataLayout().isLittleEndian())
2255     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2256   else
2257     return SVOp->getMaskElt(0) / EltSize;
2258 }
2259 
2260 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2261 /// by using a vspltis[bhw] instruction of the specified element size, return
2262 /// the constant being splatted.  The ByteSize field indicates the number of
2263 /// bytes of each element [124] -> [bhw].
2264 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2265   SDValue OpVal(nullptr, 0);
2266 
2267   // If ByteSize of the splat is bigger than the element size of the
2268   // build_vector, then we have a case where we are checking for a splat where
2269   // multiple elements of the buildvector are folded together into a single
2270   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2271   unsigned EltSize = 16/N->getNumOperands();
2272   if (EltSize < ByteSize) {
2273     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2274     SDValue UniquedVals[4];
2275     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2276 
2277     // See if all of the elements in the buildvector agree across.
2278     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2279       if (N->getOperand(i).isUndef()) continue;
2280       // If the element isn't a constant, bail fully out.
2281       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2282 
2283       if (!UniquedVals[i&(Multiple-1)].getNode())
2284         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2285       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2286         return SDValue();  // no match.
2287     }
2288 
2289     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2290     // either constant or undef values that are identical for each chunk.  See
2291     // if these chunks can form into a larger vspltis*.
2292 
2293     // Check to see if all of the leading entries are either 0 or -1.  If
2294     // neither, then this won't fit into the immediate field.
2295     bool LeadingZero = true;
2296     bool LeadingOnes = true;
2297     for (unsigned i = 0; i != Multiple-1; ++i) {
2298       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2299 
2300       LeadingZero &= isNullConstant(UniquedVals[i]);
2301       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2302     }
2303     // Finally, check the least significant entry.
2304     if (LeadingZero) {
2305       if (!UniquedVals[Multiple-1].getNode())
2306         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2307       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2308       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2309         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2310     }
2311     if (LeadingOnes) {
2312       if (!UniquedVals[Multiple-1].getNode())
2313         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2314       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2315       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2316         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2317     }
2318 
2319     return SDValue();
2320   }
2321 
2322   // Check to see if this buildvec has a single non-undef value in its elements.
2323   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2324     if (N->getOperand(i).isUndef()) continue;
2325     if (!OpVal.getNode())
2326       OpVal = N->getOperand(i);
2327     else if (OpVal != N->getOperand(i))
2328       return SDValue();
2329   }
2330 
2331   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2332 
2333   unsigned ValSizeInBytes = EltSize;
2334   uint64_t Value = 0;
2335   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2336     Value = CN->getZExtValue();
2337   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2338     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2339     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2340   }
2341 
2342   // If the splat value is larger than the element value, then we can never do
2343   // this splat.  The only case that we could fit the replicated bits into our
2344   // immediate field for would be zero, and we prefer to use vxor for it.
2345   if (ValSizeInBytes < ByteSize) return SDValue();
2346 
2347   // If the element value is larger than the splat value, check if it consists
2348   // of a repeated bit pattern of size ByteSize.
2349   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2350     return SDValue();
2351 
2352   // Properly sign extend the value.
2353   int MaskVal = SignExtend32(Value, ByteSize * 8);
2354 
2355   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2356   if (MaskVal == 0) return SDValue();
2357 
2358   // Finally, if this value fits in a 5 bit sext field, return it
2359   if (SignExtend32<5>(MaskVal) == MaskVal)
2360     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2361   return SDValue();
2362 }
2363 
2364 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2365 /// amount, otherwise return -1.
2366 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2367   EVT VT = N->getValueType(0);
2368   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2369     return -1;
2370 
2371   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2372 
2373   // Find the first non-undef value in the shuffle mask.
2374   unsigned i;
2375   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2376     /*search*/;
2377 
2378   if (i == 4) return -1;  // all undef.
2379 
2380   // Otherwise, check to see if the rest of the elements are consecutively
2381   // numbered from this value.
2382   unsigned ShiftAmt = SVOp->getMaskElt(i);
2383   if (ShiftAmt < i) return -1;
2384   ShiftAmt -= i;
2385 
2386   // Check the rest of the elements to see if they are consecutive.
2387   for (++i; i != 4; ++i)
2388     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2389       return -1;
2390 
2391   return ShiftAmt;
2392 }
2393 
2394 //===----------------------------------------------------------------------===//
2395 //  Addressing Mode Selection
2396 //===----------------------------------------------------------------------===//
2397 
2398 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2399 /// or 64-bit immediate, and if the value can be accurately represented as a
2400 /// sign extension from a 16-bit value.  If so, this returns true and the
2401 /// immediate.
2402 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2403   if (!isa<ConstantSDNode>(N))
2404     return false;
2405 
2406   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2407   if (N->getValueType(0) == MVT::i32)
2408     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2409   else
2410     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2411 }
2412 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2413   return isIntS16Immediate(Op.getNode(), Imm);
2414 }
2415 
2416 
2417 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2418 /// be represented as an indexed [r+r] operation.
2419 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2420                                                SDValue &Index,
2421                                                SelectionDAG &DAG) const {
2422   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2423       UI != E; ++UI) {
2424     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2425       if (Memop->getMemoryVT() == MVT::f64) {
2426           Base = N.getOperand(0);
2427           Index = N.getOperand(1);
2428           return true;
2429       }
2430     }
2431   }
2432   return false;
2433 }
2434 
2435 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2436 /// can be represented as an indexed [r+r] operation.  Returns false if it
2437 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2438 /// non-zero and N can be represented by a base register plus a signed 16-bit
2439 /// displacement, make a more precise judgement by checking (displacement % \p
2440 /// EncodingAlignment).
2441 bool PPCTargetLowering::SelectAddressRegReg(
2442     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2443     MaybeAlign EncodingAlignment) const {
2444   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2445   // a [pc+imm].
2446   if (SelectAddressPCRel(N, Base))
2447     return false;
2448 
2449   int16_t Imm = 0;
2450   if (N.getOpcode() == ISD::ADD) {
2451     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2452     // SPE load/store can only handle 8-bit offsets.
2453     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2454         return true;
2455     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2456         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2457       return false; // r+i
2458     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2459       return false;    // r+i
2460 
2461     Base = N.getOperand(0);
2462     Index = N.getOperand(1);
2463     return true;
2464   } else if (N.getOpcode() == ISD::OR) {
2465     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2466         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2467       return false; // r+i can fold it if we can.
2468 
2469     // If this is an or of disjoint bitfields, we can codegen this as an add
2470     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2471     // disjoint.
2472     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2473 
2474     if (LHSKnown.Zero.getBoolValue()) {
2475       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2476       // If all of the bits are known zero on the LHS or RHS, the add won't
2477       // carry.
2478       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2479         Base = N.getOperand(0);
2480         Index = N.getOperand(1);
2481         return true;
2482       }
2483     }
2484   }
2485 
2486   return false;
2487 }
2488 
2489 // If we happen to be doing an i64 load or store into a stack slot that has
2490 // less than a 4-byte alignment, then the frame-index elimination may need to
2491 // use an indexed load or store instruction (because the offset may not be a
2492 // multiple of 4). The extra register needed to hold the offset comes from the
2493 // register scavenger, and it is possible that the scavenger will need to use
2494 // an emergency spill slot. As a result, we need to make sure that a spill slot
2495 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2496 // stack slot.
2497 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2498   // FIXME: This does not handle the LWA case.
2499   if (VT != MVT::i64)
2500     return;
2501 
2502   // NOTE: We'll exclude negative FIs here, which come from argument
2503   // lowering, because there are no known test cases triggering this problem
2504   // using packed structures (or similar). We can remove this exclusion if
2505   // we find such a test case. The reason why this is so test-case driven is
2506   // because this entire 'fixup' is only to prevent crashes (from the
2507   // register scavenger) on not-really-valid inputs. For example, if we have:
2508   //   %a = alloca i1
2509   //   %b = bitcast i1* %a to i64*
2510   //   store i64* a, i64 b
2511   // then the store should really be marked as 'align 1', but is not. If it
2512   // were marked as 'align 1' then the indexed form would have been
2513   // instruction-selected initially, and the problem this 'fixup' is preventing
2514   // won't happen regardless.
2515   if (FrameIdx < 0)
2516     return;
2517 
2518   MachineFunction &MF = DAG.getMachineFunction();
2519   MachineFrameInfo &MFI = MF.getFrameInfo();
2520 
2521   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2522     return;
2523 
2524   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2525   FuncInfo->setHasNonRISpills();
2526 }
2527 
2528 /// Returns true if the address N can be represented by a base register plus
2529 /// a signed 16-bit displacement [r+imm], and if it is not better
2530 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2531 /// displacements that are multiples of that value.
2532 bool PPCTargetLowering::SelectAddressRegImm(
2533     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2534     MaybeAlign EncodingAlignment) const {
2535   // FIXME dl should come from parent load or store, not from address
2536   SDLoc dl(N);
2537 
2538   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2539   // a [pc+imm].
2540   if (SelectAddressPCRel(N, Base))
2541     return false;
2542 
2543   // If this can be more profitably realized as r+r, fail.
2544   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2545     return false;
2546 
2547   if (N.getOpcode() == ISD::ADD) {
2548     int16_t imm = 0;
2549     if (isIntS16Immediate(N.getOperand(1), imm) &&
2550         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2551       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2552       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2553         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2554         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2555       } else {
2556         Base = N.getOperand(0);
2557       }
2558       return true; // [r+i]
2559     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2560       // Match LOAD (ADD (X, Lo(G))).
2561       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2562              && "Cannot handle constant offsets yet!");
2563       Disp = N.getOperand(1).getOperand(0);  // The global address.
2564       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2565              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2566              Disp.getOpcode() == ISD::TargetConstantPool ||
2567              Disp.getOpcode() == ISD::TargetJumpTable);
2568       Base = N.getOperand(0);
2569       return true;  // [&g+r]
2570     }
2571   } else if (N.getOpcode() == ISD::OR) {
2572     int16_t imm = 0;
2573     if (isIntS16Immediate(N.getOperand(1), imm) &&
2574         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2575       // If this is an or of disjoint bitfields, we can codegen this as an add
2576       // (for better address arithmetic) if the LHS and RHS of the OR are
2577       // provably disjoint.
2578       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2579 
2580       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2581         // If all of the bits are known zero on the LHS or RHS, the add won't
2582         // carry.
2583         if (FrameIndexSDNode *FI =
2584               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2585           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2586           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2587         } else {
2588           Base = N.getOperand(0);
2589         }
2590         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2591         return true;
2592       }
2593     }
2594   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2595     // Loading from a constant address.
2596 
2597     // If this address fits entirely in a 16-bit sext immediate field, codegen
2598     // this as "d, 0"
2599     int16_t Imm;
2600     if (isIntS16Immediate(CN, Imm) &&
2601         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2602       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2603       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2604                              CN->getValueType(0));
2605       return true;
2606     }
2607 
2608     // Handle 32-bit sext immediates with LIS + addr mode.
2609     if ((CN->getValueType(0) == MVT::i32 ||
2610          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2611         (!EncodingAlignment ||
2612          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2613       int Addr = (int)CN->getZExtValue();
2614 
2615       // Otherwise, break this down into an LIS + disp.
2616       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2617 
2618       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2619                                    MVT::i32);
2620       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2621       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2622       return true;
2623     }
2624   }
2625 
2626   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2627   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2628     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2629     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2630   } else
2631     Base = N;
2632   return true;      // [r+0]
2633 }
2634 
2635 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2636 /// represented as an indexed [r+r] operation.
2637 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2638                                                 SDValue &Index,
2639                                                 SelectionDAG &DAG) const {
2640   // Check to see if we can easily represent this as an [r+r] address.  This
2641   // will fail if it thinks that the address is more profitably represented as
2642   // reg+imm, e.g. where imm = 0.
2643   if (SelectAddressRegReg(N, Base, Index, DAG))
2644     return true;
2645 
2646   // If the address is the result of an add, we will utilize the fact that the
2647   // address calculation includes an implicit add.  However, we can reduce
2648   // register pressure if we do not materialize a constant just for use as the
2649   // index register.  We only get rid of the add if it is not an add of a
2650   // value and a 16-bit signed constant and both have a single use.
2651   int16_t imm = 0;
2652   if (N.getOpcode() == ISD::ADD &&
2653       (!isIntS16Immediate(N.getOperand(1), imm) ||
2654        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2655     Base = N.getOperand(0);
2656     Index = N.getOperand(1);
2657     return true;
2658   }
2659 
2660   // Otherwise, do it the hard way, using R0 as the base register.
2661   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2662                          N.getValueType());
2663   Index = N;
2664   return true;
2665 }
2666 
2667 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2668   Ty *PCRelCand = dyn_cast<Ty>(N);
2669   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2670 }
2671 
2672 /// Returns true if this address is a PC Relative address.
2673 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2674 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2675 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2676   // This is a materialize PC Relative node. Always select this as PC Relative.
2677   Base = N;
2678   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2679     return true;
2680   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2681       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2682       isValidPCRelNode<JumpTableSDNode>(N) ||
2683       isValidPCRelNode<BlockAddressSDNode>(N))
2684     return true;
2685   return false;
2686 }
2687 
2688 /// Returns true if we should use a direct load into vector instruction
2689 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2690 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2691 
2692   // If there are any other uses other than scalar to vector, then we should
2693   // keep it as a scalar load -> direct move pattern to prevent multiple
2694   // loads.
2695   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2696   if (!LD)
2697     return false;
2698 
2699   EVT MemVT = LD->getMemoryVT();
2700   if (!MemVT.isSimple())
2701     return false;
2702   switch(MemVT.getSimpleVT().SimpleTy) {
2703   case MVT::i64:
2704     break;
2705   case MVT::i32:
2706     if (!ST.hasP8Vector())
2707       return false;
2708     break;
2709   case MVT::i16:
2710   case MVT::i8:
2711     if (!ST.hasP9Vector())
2712       return false;
2713     break;
2714   default:
2715     return false;
2716   }
2717 
2718   SDValue LoadedVal(N, 0);
2719   if (!LoadedVal.hasOneUse())
2720     return false;
2721 
2722   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2723        UI != UE; ++UI)
2724     if (UI.getUse().get().getResNo() == 0 &&
2725         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2726         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2727       return false;
2728 
2729   return true;
2730 }
2731 
2732 /// getPreIndexedAddressParts - returns true by value, base pointer and
2733 /// offset pointer and addressing mode by reference if the node's address
2734 /// can be legally represented as pre-indexed load / store address.
2735 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2736                                                   SDValue &Offset,
2737                                                   ISD::MemIndexedMode &AM,
2738                                                   SelectionDAG &DAG) const {
2739   if (DisablePPCPreinc) return false;
2740 
2741   bool isLoad = true;
2742   SDValue Ptr;
2743   EVT VT;
2744   unsigned Alignment;
2745   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2746     Ptr = LD->getBasePtr();
2747     VT = LD->getMemoryVT();
2748     Alignment = LD->getAlignment();
2749   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2750     Ptr = ST->getBasePtr();
2751     VT  = ST->getMemoryVT();
2752     Alignment = ST->getAlignment();
2753     isLoad = false;
2754   } else
2755     return false;
2756 
2757   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2758   // instructions because we can fold these into a more efficient instruction
2759   // instead, (such as LXSD).
2760   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2761     return false;
2762   }
2763 
2764   // PowerPC doesn't have preinc load/store instructions for vectors (except
2765   // for QPX, which does have preinc r+r forms).
2766   if (VT.isVector()) {
2767     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2768       return false;
2769     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2770       AM = ISD::PRE_INC;
2771       return true;
2772     }
2773   }
2774 
2775   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2776     // Common code will reject creating a pre-inc form if the base pointer
2777     // is a frame index, or if N is a store and the base pointer is either
2778     // the same as or a predecessor of the value being stored.  Check for
2779     // those situations here, and try with swapped Base/Offset instead.
2780     bool Swap = false;
2781 
2782     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2783       Swap = true;
2784     else if (!isLoad) {
2785       SDValue Val = cast<StoreSDNode>(N)->getValue();
2786       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2787         Swap = true;
2788     }
2789 
2790     if (Swap)
2791       std::swap(Base, Offset);
2792 
2793     AM = ISD::PRE_INC;
2794     return true;
2795   }
2796 
2797   // LDU/STU can only handle immediates that are a multiple of 4.
2798   if (VT != MVT::i64) {
2799     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2800       return false;
2801   } else {
2802     // LDU/STU need an address with at least 4-byte alignment.
2803     if (Alignment < 4)
2804       return false;
2805 
2806     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2807       return false;
2808   }
2809 
2810   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2811     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2812     // sext i32 to i64 when addr mode is r+i.
2813     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2814         LD->getExtensionType() == ISD::SEXTLOAD &&
2815         isa<ConstantSDNode>(Offset))
2816       return false;
2817   }
2818 
2819   AM = ISD::PRE_INC;
2820   return true;
2821 }
2822 
2823 //===----------------------------------------------------------------------===//
2824 //  LowerOperation implementation
2825 //===----------------------------------------------------------------------===//
2826 
2827 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2828 /// and LoOpFlags to the target MO flags.
2829 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2830                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2831                                const GlobalValue *GV = nullptr) {
2832   HiOpFlags = PPCII::MO_HA;
2833   LoOpFlags = PPCII::MO_LO;
2834 
2835   // Don't use the pic base if not in PIC relocation model.
2836   if (IsPIC) {
2837     HiOpFlags |= PPCII::MO_PIC_FLAG;
2838     LoOpFlags |= PPCII::MO_PIC_FLAG;
2839   }
2840 }
2841 
2842 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2843                              SelectionDAG &DAG) {
2844   SDLoc DL(HiPart);
2845   EVT PtrVT = HiPart.getValueType();
2846   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2847 
2848   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2849   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2850 
2851   // With PIC, the first instruction is actually "GR+hi(&G)".
2852   if (isPIC)
2853     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2854                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2855 
2856   // Generate non-pic code that has direct accesses to the constant pool.
2857   // The address of the global is just (hi(&g)+lo(&g)).
2858   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2859 }
2860 
2861 static void setUsesTOCBasePtr(MachineFunction &MF) {
2862   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2863   FuncInfo->setUsesTOCBasePtr();
2864 }
2865 
2866 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2867   setUsesTOCBasePtr(DAG.getMachineFunction());
2868 }
2869 
2870 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2871                                        SDValue GA) const {
2872   const bool Is64Bit = Subtarget.isPPC64();
2873   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2874   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2875                         : Subtarget.isAIXABI()
2876                               ? DAG.getRegister(PPC::R2, VT)
2877                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2878   SDValue Ops[] = { GA, Reg };
2879   return DAG.getMemIntrinsicNode(
2880       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2881       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2882       MachineMemOperand::MOLoad);
2883 }
2884 
2885 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2886                                              SelectionDAG &DAG) const {
2887   EVT PtrVT = Op.getValueType();
2888   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2889   const Constant *C = CP->getConstVal();
2890 
2891   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2892   // The actual address of the GlobalValue is stored in the TOC.
2893   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2894     if (Subtarget.isUsingPCRelativeCalls()) {
2895       SDLoc DL(CP);
2896       EVT Ty = getPointerTy(DAG.getDataLayout());
2897       SDValue ConstPool = DAG.getTargetConstantPool(
2898           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2899       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2900     }
2901     setUsesTOCBasePtr(DAG);
2902     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2903     return getTOCEntry(DAG, SDLoc(CP), GA);
2904   }
2905 
2906   unsigned MOHiFlag, MOLoFlag;
2907   bool IsPIC = isPositionIndependent();
2908   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2909 
2910   if (IsPIC && Subtarget.isSVR4ABI()) {
2911     SDValue GA =
2912         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2913     return getTOCEntry(DAG, SDLoc(CP), GA);
2914   }
2915 
2916   SDValue CPIHi =
2917       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2918   SDValue CPILo =
2919       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2920   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2921 }
2922 
2923 // For 64-bit PowerPC, prefer the more compact relative encodings.
2924 // This trades 32 bits per jump table entry for one or two instructions
2925 // on the jump site.
2926 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2927   if (isJumpTableRelative())
2928     return MachineJumpTableInfo::EK_LabelDifference32;
2929 
2930   return TargetLowering::getJumpTableEncoding();
2931 }
2932 
2933 bool PPCTargetLowering::isJumpTableRelative() const {
2934   if (UseAbsoluteJumpTables)
2935     return false;
2936   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2937     return true;
2938   return TargetLowering::isJumpTableRelative();
2939 }
2940 
2941 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2942                                                     SelectionDAG &DAG) const {
2943   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2944     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2945 
2946   switch (getTargetMachine().getCodeModel()) {
2947   case CodeModel::Small:
2948   case CodeModel::Medium:
2949     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2950   default:
2951     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2952                        getPointerTy(DAG.getDataLayout()));
2953   }
2954 }
2955 
2956 const MCExpr *
2957 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2958                                                 unsigned JTI,
2959                                                 MCContext &Ctx) const {
2960   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2961     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2962 
2963   switch (getTargetMachine().getCodeModel()) {
2964   case CodeModel::Small:
2965   case CodeModel::Medium:
2966     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2967   default:
2968     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2969   }
2970 }
2971 
2972 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2973   EVT PtrVT = Op.getValueType();
2974   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2975 
2976   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2977   if (Subtarget.isUsingPCRelativeCalls()) {
2978     SDLoc DL(JT);
2979     EVT Ty = getPointerTy(DAG.getDataLayout());
2980     SDValue GA =
2981         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2982     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2983     return MatAddr;
2984   }
2985 
2986   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2987   // The actual address of the GlobalValue is stored in the TOC.
2988   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2989     setUsesTOCBasePtr(DAG);
2990     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2991     return getTOCEntry(DAG, SDLoc(JT), GA);
2992   }
2993 
2994   unsigned MOHiFlag, MOLoFlag;
2995   bool IsPIC = isPositionIndependent();
2996   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2997 
2998   if (IsPIC && Subtarget.isSVR4ABI()) {
2999     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3000                                         PPCII::MO_PIC_FLAG);
3001     return getTOCEntry(DAG, SDLoc(GA), GA);
3002   }
3003 
3004   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3005   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3006   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3007 }
3008 
3009 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3010                                              SelectionDAG &DAG) const {
3011   EVT PtrVT = Op.getValueType();
3012   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3013   const BlockAddress *BA = BASDN->getBlockAddress();
3014 
3015   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3016   if (Subtarget.isUsingPCRelativeCalls()) {
3017     SDLoc DL(BASDN);
3018     EVT Ty = getPointerTy(DAG.getDataLayout());
3019     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3020                                            PPCII::MO_PCREL_FLAG);
3021     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3022     return MatAddr;
3023   }
3024 
3025   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3026   // The actual BlockAddress is stored in the TOC.
3027   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3028     setUsesTOCBasePtr(DAG);
3029     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3030     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3031   }
3032 
3033   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3034   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3035     return getTOCEntry(
3036         DAG, SDLoc(BASDN),
3037         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3038 
3039   unsigned MOHiFlag, MOLoFlag;
3040   bool IsPIC = isPositionIndependent();
3041   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3042   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3043   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3044   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3045 }
3046 
3047 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3048                                               SelectionDAG &DAG) const {
3049   // FIXME: TLS addresses currently use medium model code sequences,
3050   // which is the most useful form.  Eventually support for small and
3051   // large models could be added if users need it, at the cost of
3052   // additional complexity.
3053   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3054   if (DAG.getTarget().useEmulatedTLS())
3055     return LowerToTLSEmulatedModel(GA, DAG);
3056 
3057   SDLoc dl(GA);
3058   const GlobalValue *GV = GA->getGlobal();
3059   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3060   bool is64bit = Subtarget.isPPC64();
3061   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3062   PICLevel::Level picLevel = M->getPICLevel();
3063 
3064   const TargetMachine &TM = getTargetMachine();
3065   TLSModel::Model Model = TM.getTLSModel(GV);
3066 
3067   if (Model == TLSModel::LocalExec) {
3068     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3069                                                PPCII::MO_TPREL_HA);
3070     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3071                                                PPCII::MO_TPREL_LO);
3072     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3073                              : DAG.getRegister(PPC::R2, MVT::i32);
3074 
3075     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3076     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3077   }
3078 
3079   if (Model == TLSModel::InitialExec) {
3080     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3081     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3082                                                 PPCII::MO_TLS);
3083     SDValue GOTPtr;
3084     if (is64bit) {
3085       setUsesTOCBasePtr(DAG);
3086       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3087       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
3088                            PtrVT, GOTReg, TGA);
3089     } else {
3090       if (!TM.isPositionIndependent())
3091         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3092       else if (picLevel == PICLevel::SmallPIC)
3093         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3094       else
3095         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3096     }
3097     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
3098                                    PtrVT, TGA, GOTPtr);
3099     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3100   }
3101 
3102   if (Model == TLSModel::GeneralDynamic) {
3103     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3104     SDValue GOTPtr;
3105     if (is64bit) {
3106       setUsesTOCBasePtr(DAG);
3107       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3108       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3109                                    GOTReg, TGA);
3110     } else {
3111       if (picLevel == PICLevel::SmallPIC)
3112         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3113       else
3114         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3115     }
3116     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3117                        GOTPtr, TGA, TGA);
3118   }
3119 
3120   if (Model == TLSModel::LocalDynamic) {
3121     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3122     SDValue GOTPtr;
3123     if (is64bit) {
3124       setUsesTOCBasePtr(DAG);
3125       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3126       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3127                            GOTReg, TGA);
3128     } else {
3129       if (picLevel == PICLevel::SmallPIC)
3130         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3131       else
3132         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3133     }
3134     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3135                                   PtrVT, GOTPtr, TGA, TGA);
3136     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3137                                       PtrVT, TLSAddr, TGA);
3138     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3139   }
3140 
3141   llvm_unreachable("Unknown TLS model!");
3142 }
3143 
3144 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3145                                               SelectionDAG &DAG) const {
3146   EVT PtrVT = Op.getValueType();
3147   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3148   SDLoc DL(GSDN);
3149   const GlobalValue *GV = GSDN->getGlobal();
3150 
3151   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3152   // The actual address of the GlobalValue is stored in the TOC.
3153   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3154     if (Subtarget.isUsingPCRelativeCalls()) {
3155       EVT Ty = getPointerTy(DAG.getDataLayout());
3156       if (isAccessedAsGotIndirect(Op)) {
3157         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3158                                                 PPCII::MO_PCREL_FLAG |
3159                                                     PPCII::MO_GOT_FLAG);
3160         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3161         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3162                                    MachinePointerInfo());
3163         return Load;
3164       } else {
3165         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3166                                                 PPCII::MO_PCREL_FLAG);
3167         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3168       }
3169     }
3170     setUsesTOCBasePtr(DAG);
3171     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3172     return getTOCEntry(DAG, DL, GA);
3173   }
3174 
3175   unsigned MOHiFlag, MOLoFlag;
3176   bool IsPIC = isPositionIndependent();
3177   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3178 
3179   if (IsPIC && Subtarget.isSVR4ABI()) {
3180     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3181                                             GSDN->getOffset(),
3182                                             PPCII::MO_PIC_FLAG);
3183     return getTOCEntry(DAG, DL, GA);
3184   }
3185 
3186   SDValue GAHi =
3187     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3188   SDValue GALo =
3189     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3190 
3191   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3192 }
3193 
3194 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3195   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3196   SDLoc dl(Op);
3197 
3198   if (Op.getValueType() == MVT::v2i64) {
3199     // When the operands themselves are v2i64 values, we need to do something
3200     // special because VSX has no underlying comparison operations for these.
3201     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3202       // Equality can be handled by casting to the legal type for Altivec
3203       // comparisons, everything else needs to be expanded.
3204       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3205         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3206                  DAG.getSetCC(dl, MVT::v4i32,
3207                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3208                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3209                    CC));
3210       }
3211 
3212       return SDValue();
3213     }
3214 
3215     // We handle most of these in the usual way.
3216     return Op;
3217   }
3218 
3219   // If we're comparing for equality to zero, expose the fact that this is
3220   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3221   // fold the new nodes.
3222   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3223     return V;
3224 
3225   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3226     // Leave comparisons against 0 and -1 alone for now, since they're usually
3227     // optimized.  FIXME: revisit this when we can custom lower all setcc
3228     // optimizations.
3229     if (C->isAllOnesValue() || C->isNullValue())
3230       return SDValue();
3231   }
3232 
3233   // If we have an integer seteq/setne, turn it into a compare against zero
3234   // by xor'ing the rhs with the lhs, which is faster than setting a
3235   // condition register, reading it back out, and masking the correct bit.  The
3236   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3237   // the result to other bit-twiddling opportunities.
3238   EVT LHSVT = Op.getOperand(0).getValueType();
3239   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3240     EVT VT = Op.getValueType();
3241     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3242                                 Op.getOperand(1));
3243     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3244   }
3245   return SDValue();
3246 }
3247 
3248 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3249   SDNode *Node = Op.getNode();
3250   EVT VT = Node->getValueType(0);
3251   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3252   SDValue InChain = Node->getOperand(0);
3253   SDValue VAListPtr = Node->getOperand(1);
3254   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3255   SDLoc dl(Node);
3256 
3257   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3258 
3259   // gpr_index
3260   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3261                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3262   InChain = GprIndex.getValue(1);
3263 
3264   if (VT == MVT::i64) {
3265     // Check if GprIndex is even
3266     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3267                                  DAG.getConstant(1, dl, MVT::i32));
3268     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3269                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3270     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3271                                           DAG.getConstant(1, dl, MVT::i32));
3272     // Align GprIndex to be even if it isn't
3273     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3274                            GprIndex);
3275   }
3276 
3277   // fpr index is 1 byte after gpr
3278   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3279                                DAG.getConstant(1, dl, MVT::i32));
3280 
3281   // fpr
3282   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3283                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3284   InChain = FprIndex.getValue(1);
3285 
3286   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3287                                        DAG.getConstant(8, dl, MVT::i32));
3288 
3289   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3290                                         DAG.getConstant(4, dl, MVT::i32));
3291 
3292   // areas
3293   SDValue OverflowArea =
3294       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3295   InChain = OverflowArea.getValue(1);
3296 
3297   SDValue RegSaveArea =
3298       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3299   InChain = RegSaveArea.getValue(1);
3300 
3301   // select overflow_area if index > 8
3302   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3303                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3304 
3305   // adjustment constant gpr_index * 4/8
3306   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3307                                     VT.isInteger() ? GprIndex : FprIndex,
3308                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3309                                                     MVT::i32));
3310 
3311   // OurReg = RegSaveArea + RegConstant
3312   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3313                                RegConstant);
3314 
3315   // Floating types are 32 bytes into RegSaveArea
3316   if (VT.isFloatingPoint())
3317     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3318                          DAG.getConstant(32, dl, MVT::i32));
3319 
3320   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3321   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3322                                    VT.isInteger() ? GprIndex : FprIndex,
3323                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3324                                                    MVT::i32));
3325 
3326   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3327                               VT.isInteger() ? VAListPtr : FprPtr,
3328                               MachinePointerInfo(SV), MVT::i8);
3329 
3330   // determine if we should load from reg_save_area or overflow_area
3331   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3332 
3333   // increase overflow_area by 4/8 if gpr/fpr > 8
3334   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3335                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3336                                           dl, MVT::i32));
3337 
3338   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3339                              OverflowAreaPlusN);
3340 
3341   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3342                               MachinePointerInfo(), MVT::i32);
3343 
3344   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3345 }
3346 
3347 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3348   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3349 
3350   // We have to copy the entire va_list struct:
3351   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3352   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3353                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3354                        false, true, false, MachinePointerInfo(),
3355                        MachinePointerInfo());
3356 }
3357 
3358 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3359                                                   SelectionDAG &DAG) const {
3360   if (Subtarget.isAIXABI())
3361     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3362 
3363   return Op.getOperand(0);
3364 }
3365 
3366 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3367                                                 SelectionDAG &DAG) const {
3368   if (Subtarget.isAIXABI())
3369     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3370 
3371   SDValue Chain = Op.getOperand(0);
3372   SDValue Trmp = Op.getOperand(1); // trampoline
3373   SDValue FPtr = Op.getOperand(2); // nested function
3374   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3375   SDLoc dl(Op);
3376 
3377   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3378   bool isPPC64 = (PtrVT == MVT::i64);
3379   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3380 
3381   TargetLowering::ArgListTy Args;
3382   TargetLowering::ArgListEntry Entry;
3383 
3384   Entry.Ty = IntPtrTy;
3385   Entry.Node = Trmp; Args.push_back(Entry);
3386 
3387   // TrampSize == (isPPC64 ? 48 : 40);
3388   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3389                                isPPC64 ? MVT::i64 : MVT::i32);
3390   Args.push_back(Entry);
3391 
3392   Entry.Node = FPtr; Args.push_back(Entry);
3393   Entry.Node = Nest; Args.push_back(Entry);
3394 
3395   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3396   TargetLowering::CallLoweringInfo CLI(DAG);
3397   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3398       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3399       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3400 
3401   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3402   return CallResult.second;
3403 }
3404 
3405 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3406   MachineFunction &MF = DAG.getMachineFunction();
3407   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3408   EVT PtrVT = getPointerTy(MF.getDataLayout());
3409 
3410   SDLoc dl(Op);
3411 
3412   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3413     // vastart just stores the address of the VarArgsFrameIndex slot into the
3414     // memory location argument.
3415     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3416     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3417     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3418                         MachinePointerInfo(SV));
3419   }
3420 
3421   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3422   // We suppose the given va_list is already allocated.
3423   //
3424   // typedef struct {
3425   //  char gpr;     /* index into the array of 8 GPRs
3426   //                 * stored in the register save area
3427   //                 * gpr=0 corresponds to r3,
3428   //                 * gpr=1 to r4, etc.
3429   //                 */
3430   //  char fpr;     /* index into the array of 8 FPRs
3431   //                 * stored in the register save area
3432   //                 * fpr=0 corresponds to f1,
3433   //                 * fpr=1 to f2, etc.
3434   //                 */
3435   //  char *overflow_arg_area;
3436   //                /* location on stack that holds
3437   //                 * the next overflow argument
3438   //                 */
3439   //  char *reg_save_area;
3440   //               /* where r3:r10 and f1:f8 (if saved)
3441   //                * are stored
3442   //                */
3443   // } va_list[1];
3444 
3445   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3446   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3447   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3448                                             PtrVT);
3449   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3450                                  PtrVT);
3451 
3452   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3453   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3454 
3455   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3456   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3457 
3458   uint64_t FPROffset = 1;
3459   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3460 
3461   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3462 
3463   // Store first byte : number of int regs
3464   SDValue firstStore =
3465       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3466                         MachinePointerInfo(SV), MVT::i8);
3467   uint64_t nextOffset = FPROffset;
3468   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3469                                   ConstFPROffset);
3470 
3471   // Store second byte : number of float regs
3472   SDValue secondStore =
3473       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3474                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3475   nextOffset += StackOffset;
3476   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3477 
3478   // Store second word : arguments given on stack
3479   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3480                                     MachinePointerInfo(SV, nextOffset));
3481   nextOffset += FrameOffset;
3482   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3483 
3484   // Store third word : arguments given in registers
3485   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3486                       MachinePointerInfo(SV, nextOffset));
3487 }
3488 
3489 /// FPR - The set of FP registers that should be allocated for arguments
3490 /// on Darwin and AIX.
3491 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3492                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3493                                 PPC::F11, PPC::F12, PPC::F13};
3494 
3495 /// QFPR - The set of QPX registers that should be allocated for arguments.
3496 static const MCPhysReg QFPR[] = {
3497     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3498     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3499 
3500 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3501 /// the stack.
3502 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3503                                        unsigned PtrByteSize) {
3504   unsigned ArgSize = ArgVT.getStoreSize();
3505   if (Flags.isByVal())
3506     ArgSize = Flags.getByValSize();
3507 
3508   // Round up to multiples of the pointer size, except for array members,
3509   // which are always packed.
3510   if (!Flags.isInConsecutiveRegs())
3511     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3512 
3513   return ArgSize;
3514 }
3515 
3516 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3517 /// on the stack.
3518 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3519                                          ISD::ArgFlagsTy Flags,
3520                                          unsigned PtrByteSize) {
3521   Align Alignment(PtrByteSize);
3522 
3523   // Altivec parameters are padded to a 16 byte boundary.
3524   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3525       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3526       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3527       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3528     Alignment = Align(16);
3529   // QPX vector types stored in double-precision are padded to a 32 byte
3530   // boundary.
3531   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3532     Alignment = Align(32);
3533 
3534   // ByVal parameters are aligned as requested.
3535   if (Flags.isByVal()) {
3536     auto BVAlign = Flags.getNonZeroByValAlign();
3537     if (BVAlign > PtrByteSize) {
3538       if (BVAlign.value() % PtrByteSize != 0)
3539         llvm_unreachable(
3540             "ByVal alignment is not a multiple of the pointer size");
3541 
3542       Alignment = BVAlign;
3543     }
3544   }
3545 
3546   // Array members are always packed to their original alignment.
3547   if (Flags.isInConsecutiveRegs()) {
3548     // If the array member was split into multiple registers, the first
3549     // needs to be aligned to the size of the full type.  (Except for
3550     // ppcf128, which is only aligned as its f64 components.)
3551     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3552       Alignment = Align(OrigVT.getStoreSize());
3553     else
3554       Alignment = Align(ArgVT.getStoreSize());
3555   }
3556 
3557   return Alignment;
3558 }
3559 
3560 /// CalculateStackSlotUsed - Return whether this argument will use its
3561 /// stack slot (instead of being passed in registers).  ArgOffset,
3562 /// AvailableFPRs, and AvailableVRs must hold the current argument
3563 /// position, and will be updated to account for this argument.
3564 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3565                                    ISD::ArgFlagsTy Flags,
3566                                    unsigned PtrByteSize,
3567                                    unsigned LinkageSize,
3568                                    unsigned ParamAreaSize,
3569                                    unsigned &ArgOffset,
3570                                    unsigned &AvailableFPRs,
3571                                    unsigned &AvailableVRs, bool HasQPX) {
3572   bool UseMemory = false;
3573 
3574   // Respect alignment of argument on the stack.
3575   Align Alignment =
3576       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3577   ArgOffset = alignTo(ArgOffset, Alignment);
3578   // If there's no space left in the argument save area, we must
3579   // use memory (this check also catches zero-sized arguments).
3580   if (ArgOffset >= LinkageSize + ParamAreaSize)
3581     UseMemory = true;
3582 
3583   // Allocate argument on the stack.
3584   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3585   if (Flags.isInConsecutiveRegsLast())
3586     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3587   // If we overran the argument save area, we must use memory
3588   // (this check catches arguments passed partially in memory)
3589   if (ArgOffset > LinkageSize + ParamAreaSize)
3590     UseMemory = true;
3591 
3592   // However, if the argument is actually passed in an FPR or a VR,
3593   // we don't use memory after all.
3594   if (!Flags.isByVal()) {
3595     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3596         // QPX registers overlap with the scalar FP registers.
3597         (HasQPX && (ArgVT == MVT::v4f32 ||
3598                     ArgVT == MVT::v4f64 ||
3599                     ArgVT == MVT::v4i1)))
3600       if (AvailableFPRs > 0) {
3601         --AvailableFPRs;
3602         return false;
3603       }
3604     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3605         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3606         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3607         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3608       if (AvailableVRs > 0) {
3609         --AvailableVRs;
3610         return false;
3611       }
3612   }
3613 
3614   return UseMemory;
3615 }
3616 
3617 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3618 /// ensure minimum alignment required for target.
3619 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3620                                      unsigned NumBytes) {
3621   return alignTo(NumBytes, Lowering->getStackAlign());
3622 }
3623 
3624 SDValue PPCTargetLowering::LowerFormalArguments(
3625     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3626     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3627     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3628   if (Subtarget.isAIXABI())
3629     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3630                                     InVals);
3631   if (Subtarget.is64BitELFABI())
3632     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3633                                        InVals);
3634   if (Subtarget.is32BitELFABI())
3635     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3636                                        InVals);
3637 
3638   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3639                                      InVals);
3640 }
3641 
3642 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3643     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3644     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3645     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3646 
3647   // 32-bit SVR4 ABI Stack Frame Layout:
3648   //              +-----------------------------------+
3649   //        +-->  |            Back chain             |
3650   //        |     +-----------------------------------+
3651   //        |     | Floating-point register save area |
3652   //        |     +-----------------------------------+
3653   //        |     |    General register save area     |
3654   //        |     +-----------------------------------+
3655   //        |     |          CR save word             |
3656   //        |     +-----------------------------------+
3657   //        |     |         VRSAVE save word          |
3658   //        |     +-----------------------------------+
3659   //        |     |         Alignment padding         |
3660   //        |     +-----------------------------------+
3661   //        |     |     Vector register save area     |
3662   //        |     +-----------------------------------+
3663   //        |     |       Local variable space        |
3664   //        |     +-----------------------------------+
3665   //        |     |        Parameter list area        |
3666   //        |     +-----------------------------------+
3667   //        |     |           LR save word            |
3668   //        |     +-----------------------------------+
3669   // SP-->  +---  |            Back chain             |
3670   //              +-----------------------------------+
3671   //
3672   // Specifications:
3673   //   System V Application Binary Interface PowerPC Processor Supplement
3674   //   AltiVec Technology Programming Interface Manual
3675 
3676   MachineFunction &MF = DAG.getMachineFunction();
3677   MachineFrameInfo &MFI = MF.getFrameInfo();
3678   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3679 
3680   EVT PtrVT = getPointerTy(MF.getDataLayout());
3681   // Potential tail calls could cause overwriting of argument stack slots.
3682   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3683                        (CallConv == CallingConv::Fast));
3684   const Align PtrAlign(4);
3685 
3686   // Assign locations to all of the incoming arguments.
3687   SmallVector<CCValAssign, 16> ArgLocs;
3688   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3689                  *DAG.getContext());
3690 
3691   // Reserve space for the linkage area on the stack.
3692   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3693   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3694   if (useSoftFloat())
3695     CCInfo.PreAnalyzeFormalArguments(Ins);
3696 
3697   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3698   CCInfo.clearWasPPCF128();
3699 
3700   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3701     CCValAssign &VA = ArgLocs[i];
3702 
3703     // Arguments stored in registers.
3704     if (VA.isRegLoc()) {
3705       const TargetRegisterClass *RC;
3706       EVT ValVT = VA.getValVT();
3707 
3708       switch (ValVT.getSimpleVT().SimpleTy) {
3709         default:
3710           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3711         case MVT::i1:
3712         case MVT::i32:
3713           RC = &PPC::GPRCRegClass;
3714           break;
3715         case MVT::f32:
3716           if (Subtarget.hasP8Vector())
3717             RC = &PPC::VSSRCRegClass;
3718           else if (Subtarget.hasSPE())
3719             RC = &PPC::GPRCRegClass;
3720           else
3721             RC = &PPC::F4RCRegClass;
3722           break;
3723         case MVT::f64:
3724           if (Subtarget.hasVSX())
3725             RC = &PPC::VSFRCRegClass;
3726           else if (Subtarget.hasSPE())
3727             // SPE passes doubles in GPR pairs.
3728             RC = &PPC::GPRCRegClass;
3729           else
3730             RC = &PPC::F8RCRegClass;
3731           break;
3732         case MVT::v16i8:
3733         case MVT::v8i16:
3734         case MVT::v4i32:
3735           RC = &PPC::VRRCRegClass;
3736           break;
3737         case MVT::v4f32:
3738           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3739           break;
3740         case MVT::v2f64:
3741         case MVT::v2i64:
3742           RC = &PPC::VRRCRegClass;
3743           break;
3744         case MVT::v4f64:
3745           RC = &PPC::QFRCRegClass;
3746           break;
3747         case MVT::v4i1:
3748           RC = &PPC::QBRCRegClass;
3749           break;
3750       }
3751 
3752       SDValue ArgValue;
3753       // Transform the arguments stored in physical registers into
3754       // virtual ones.
3755       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3756         assert(i + 1 < e && "No second half of double precision argument");
3757         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3758         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3759         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3760         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3761         if (!Subtarget.isLittleEndian())
3762           std::swap (ArgValueLo, ArgValueHi);
3763         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3764                                ArgValueHi);
3765       } else {
3766         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3767         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3768                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3769         if (ValVT == MVT::i1)
3770           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3771       }
3772 
3773       InVals.push_back(ArgValue);
3774     } else {
3775       // Argument stored in memory.
3776       assert(VA.isMemLoc());
3777 
3778       // Get the extended size of the argument type in stack
3779       unsigned ArgSize = VA.getLocVT().getStoreSize();
3780       // Get the actual size of the argument type
3781       unsigned ObjSize = VA.getValVT().getStoreSize();
3782       unsigned ArgOffset = VA.getLocMemOffset();
3783       // Stack objects in PPC32 are right justified.
3784       ArgOffset += ArgSize - ObjSize;
3785       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3786 
3787       // Create load nodes to retrieve arguments from the stack.
3788       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3789       InVals.push_back(
3790           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3791     }
3792   }
3793 
3794   // Assign locations to all of the incoming aggregate by value arguments.
3795   // Aggregates passed by value are stored in the local variable space of the
3796   // caller's stack frame, right above the parameter list area.
3797   SmallVector<CCValAssign, 16> ByValArgLocs;
3798   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3799                       ByValArgLocs, *DAG.getContext());
3800 
3801   // Reserve stack space for the allocations in CCInfo.
3802   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3803 
3804   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3805 
3806   // Area that is at least reserved in the caller of this function.
3807   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3808   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3809 
3810   // Set the size that is at least reserved in caller of this function.  Tail
3811   // call optimized function's reserved stack space needs to be aligned so that
3812   // taking the difference between two stack areas will result in an aligned
3813   // stack.
3814   MinReservedArea =
3815       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3816   FuncInfo->setMinReservedArea(MinReservedArea);
3817 
3818   SmallVector<SDValue, 8> MemOps;
3819 
3820   // If the function takes variable number of arguments, make a frame index for
3821   // the start of the first vararg value... for expansion of llvm.va_start.
3822   if (isVarArg) {
3823     static const MCPhysReg GPArgRegs[] = {
3824       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3825       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3826     };
3827     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3828 
3829     static const MCPhysReg FPArgRegs[] = {
3830       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3831       PPC::F8
3832     };
3833     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3834 
3835     if (useSoftFloat() || hasSPE())
3836        NumFPArgRegs = 0;
3837 
3838     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3839     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3840 
3841     // Make room for NumGPArgRegs and NumFPArgRegs.
3842     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3843                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3844 
3845     FuncInfo->setVarArgsStackOffset(
3846       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3847                             CCInfo.getNextStackOffset(), true));
3848 
3849     FuncInfo->setVarArgsFrameIndex(
3850         MFI.CreateStackObject(Depth, Align(8), false));
3851     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3852 
3853     // The fixed integer arguments of a variadic function are stored to the
3854     // VarArgsFrameIndex on the stack so that they may be loaded by
3855     // dereferencing the result of va_next.
3856     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3857       // Get an existing live-in vreg, or add a new one.
3858       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3859       if (!VReg)
3860         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3861 
3862       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3863       SDValue Store =
3864           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3865       MemOps.push_back(Store);
3866       // Increment the address by four for the next argument to store
3867       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3868       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3869     }
3870 
3871     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3872     // is set.
3873     // The double arguments are stored to the VarArgsFrameIndex
3874     // on the stack.
3875     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3876       // Get an existing live-in vreg, or add a new one.
3877       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3878       if (!VReg)
3879         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3880 
3881       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3882       SDValue Store =
3883           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3884       MemOps.push_back(Store);
3885       // Increment the address by eight for the next argument to store
3886       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3887                                          PtrVT);
3888       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3889     }
3890   }
3891 
3892   if (!MemOps.empty())
3893     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3894 
3895   return Chain;
3896 }
3897 
3898 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3899 // value to MVT::i64 and then truncate to the correct register size.
3900 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3901                                              EVT ObjectVT, SelectionDAG &DAG,
3902                                              SDValue ArgVal,
3903                                              const SDLoc &dl) const {
3904   if (Flags.isSExt())
3905     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3906                          DAG.getValueType(ObjectVT));
3907   else if (Flags.isZExt())
3908     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3909                          DAG.getValueType(ObjectVT));
3910 
3911   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3912 }
3913 
3914 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3915     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3916     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3917     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3918   // TODO: add description of PPC stack frame format, or at least some docs.
3919   //
3920   bool isELFv2ABI = Subtarget.isELFv2ABI();
3921   bool isLittleEndian = Subtarget.isLittleEndian();
3922   MachineFunction &MF = DAG.getMachineFunction();
3923   MachineFrameInfo &MFI = MF.getFrameInfo();
3924   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3925 
3926   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3927          "fastcc not supported on varargs functions");
3928 
3929   EVT PtrVT = getPointerTy(MF.getDataLayout());
3930   // Potential tail calls could cause overwriting of argument stack slots.
3931   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3932                        (CallConv == CallingConv::Fast));
3933   unsigned PtrByteSize = 8;
3934   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3935 
3936   static const MCPhysReg GPR[] = {
3937     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3938     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3939   };
3940   static const MCPhysReg VR[] = {
3941     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3942     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3943   };
3944 
3945   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3946   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3947   const unsigned Num_VR_Regs  = array_lengthof(VR);
3948   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3949 
3950   // Do a first pass over the arguments to determine whether the ABI
3951   // guarantees that our caller has allocated the parameter save area
3952   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3953   // in the ELFv2 ABI, it is true if this is a vararg function or if
3954   // any parameter is located in a stack slot.
3955 
3956   bool HasParameterArea = !isELFv2ABI || isVarArg;
3957   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3958   unsigned NumBytes = LinkageSize;
3959   unsigned AvailableFPRs = Num_FPR_Regs;
3960   unsigned AvailableVRs = Num_VR_Regs;
3961   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3962     if (Ins[i].Flags.isNest())
3963       continue;
3964 
3965     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3966                                PtrByteSize, LinkageSize, ParamAreaSize,
3967                                NumBytes, AvailableFPRs, AvailableVRs,
3968                                Subtarget.hasQPX()))
3969       HasParameterArea = true;
3970   }
3971 
3972   // Add DAG nodes to load the arguments or copy them out of registers.  On
3973   // entry to a function on PPC, the arguments start after the linkage area,
3974   // although the first ones are often in registers.
3975 
3976   unsigned ArgOffset = LinkageSize;
3977   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3978   unsigned &QFPR_idx = FPR_idx;
3979   SmallVector<SDValue, 8> MemOps;
3980   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3981   unsigned CurArgIdx = 0;
3982   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3983     SDValue ArgVal;
3984     bool needsLoad = false;
3985     EVT ObjectVT = Ins[ArgNo].VT;
3986     EVT OrigVT = Ins[ArgNo].ArgVT;
3987     unsigned ObjSize = ObjectVT.getStoreSize();
3988     unsigned ArgSize = ObjSize;
3989     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3990     if (Ins[ArgNo].isOrigArg()) {
3991       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3992       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3993     }
3994     // We re-align the argument offset for each argument, except when using the
3995     // fast calling convention, when we need to make sure we do that only when
3996     // we'll actually use a stack slot.
3997     unsigned CurArgOffset;
3998     Align Alignment;
3999     auto ComputeArgOffset = [&]() {
4000       /* Respect alignment of argument on the stack.  */
4001       Alignment =
4002           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4003       ArgOffset = alignTo(ArgOffset, Alignment);
4004       CurArgOffset = ArgOffset;
4005     };
4006 
4007     if (CallConv != CallingConv::Fast) {
4008       ComputeArgOffset();
4009 
4010       /* Compute GPR index associated with argument offset.  */
4011       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4012       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4013     }
4014 
4015     // FIXME the codegen can be much improved in some cases.
4016     // We do not have to keep everything in memory.
4017     if (Flags.isByVal()) {
4018       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4019 
4020       if (CallConv == CallingConv::Fast)
4021         ComputeArgOffset();
4022 
4023       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4024       ObjSize = Flags.getByValSize();
4025       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4026       // Empty aggregate parameters do not take up registers.  Examples:
4027       //   struct { } a;
4028       //   union  { } b;
4029       //   int c[0];
4030       // etc.  However, we have to provide a place-holder in InVals, so
4031       // pretend we have an 8-byte item at the current address for that
4032       // purpose.
4033       if (!ObjSize) {
4034         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4035         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4036         InVals.push_back(FIN);
4037         continue;
4038       }
4039 
4040       // Create a stack object covering all stack doublewords occupied
4041       // by the argument.  If the argument is (fully or partially) on
4042       // the stack, or if the argument is fully in registers but the
4043       // caller has allocated the parameter save anyway, we can refer
4044       // directly to the caller's stack frame.  Otherwise, create a
4045       // local copy in our own frame.
4046       int FI;
4047       if (HasParameterArea ||
4048           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4049         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4050       else
4051         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4052       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4053 
4054       // Handle aggregates smaller than 8 bytes.
4055       if (ObjSize < PtrByteSize) {
4056         // The value of the object is its address, which differs from the
4057         // address of the enclosing doubleword on big-endian systems.
4058         SDValue Arg = FIN;
4059         if (!isLittleEndian) {
4060           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4061           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4062         }
4063         InVals.push_back(Arg);
4064 
4065         if (GPR_idx != Num_GPR_Regs) {
4066           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4067           FuncInfo->addLiveInAttr(VReg, Flags);
4068           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4069           SDValue Store;
4070 
4071           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4072             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4073                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4074             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4075                                       MachinePointerInfo(&*FuncArg), ObjType);
4076           } else {
4077             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4078             // store the whole register as-is to the parameter save area
4079             // slot.
4080             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4081                                  MachinePointerInfo(&*FuncArg));
4082           }
4083 
4084           MemOps.push_back(Store);
4085         }
4086         // Whether we copied from a register or not, advance the offset
4087         // into the parameter save area by a full doubleword.
4088         ArgOffset += PtrByteSize;
4089         continue;
4090       }
4091 
4092       // The value of the object is its address, which is the address of
4093       // its first stack doubleword.
4094       InVals.push_back(FIN);
4095 
4096       // Store whatever pieces of the object are in registers to memory.
4097       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4098         if (GPR_idx == Num_GPR_Regs)
4099           break;
4100 
4101         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4102         FuncInfo->addLiveInAttr(VReg, Flags);
4103         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4104         SDValue Addr = FIN;
4105         if (j) {
4106           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4107           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4108         }
4109         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4110                                      MachinePointerInfo(&*FuncArg, j));
4111         MemOps.push_back(Store);
4112         ++GPR_idx;
4113       }
4114       ArgOffset += ArgSize;
4115       continue;
4116     }
4117 
4118     switch (ObjectVT.getSimpleVT().SimpleTy) {
4119     default: llvm_unreachable("Unhandled argument type!");
4120     case MVT::i1:
4121     case MVT::i32:
4122     case MVT::i64:
4123       if (Flags.isNest()) {
4124         // The 'nest' parameter, if any, is passed in R11.
4125         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4126         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4127 
4128         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4129           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4130 
4131         break;
4132       }
4133 
4134       // These can be scalar arguments or elements of an integer array type
4135       // passed directly.  Clang may use those instead of "byval" aggregate
4136       // types to avoid forcing arguments to memory unnecessarily.
4137       if (GPR_idx != Num_GPR_Regs) {
4138         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4139         FuncInfo->addLiveInAttr(VReg, Flags);
4140         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4141 
4142         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4143           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4144           // value to MVT::i64 and then truncate to the correct register size.
4145           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4146       } else {
4147         if (CallConv == CallingConv::Fast)
4148           ComputeArgOffset();
4149 
4150         needsLoad = true;
4151         ArgSize = PtrByteSize;
4152       }
4153       if (CallConv != CallingConv::Fast || needsLoad)
4154         ArgOffset += 8;
4155       break;
4156 
4157     case MVT::f32:
4158     case MVT::f64:
4159       // These can be scalar arguments or elements of a float array type
4160       // passed directly.  The latter are used to implement ELFv2 homogenous
4161       // float aggregates.
4162       if (FPR_idx != Num_FPR_Regs) {
4163         unsigned VReg;
4164 
4165         if (ObjectVT == MVT::f32)
4166           VReg = MF.addLiveIn(FPR[FPR_idx],
4167                               Subtarget.hasP8Vector()
4168                                   ? &PPC::VSSRCRegClass
4169                                   : &PPC::F4RCRegClass);
4170         else
4171           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4172                                                 ? &PPC::VSFRCRegClass
4173                                                 : &PPC::F8RCRegClass);
4174 
4175         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4176         ++FPR_idx;
4177       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4178         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4179         // once we support fp <-> gpr moves.
4180 
4181         // This can only ever happen in the presence of f32 array types,
4182         // since otherwise we never run out of FPRs before running out
4183         // of GPRs.
4184         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4185         FuncInfo->addLiveInAttr(VReg, Flags);
4186         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4187 
4188         if (ObjectVT == MVT::f32) {
4189           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4190             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4191                                  DAG.getConstant(32, dl, MVT::i32));
4192           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4193         }
4194 
4195         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4196       } else {
4197         if (CallConv == CallingConv::Fast)
4198           ComputeArgOffset();
4199 
4200         needsLoad = true;
4201       }
4202 
4203       // When passing an array of floats, the array occupies consecutive
4204       // space in the argument area; only round up to the next doubleword
4205       // at the end of the array.  Otherwise, each float takes 8 bytes.
4206       if (CallConv != CallingConv::Fast || needsLoad) {
4207         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4208         ArgOffset += ArgSize;
4209         if (Flags.isInConsecutiveRegsLast())
4210           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4211       }
4212       break;
4213     case MVT::v4f32:
4214     case MVT::v4i32:
4215     case MVT::v8i16:
4216     case MVT::v16i8:
4217     case MVT::v2f64:
4218     case MVT::v2i64:
4219     case MVT::v1i128:
4220     case MVT::f128:
4221       if (!Subtarget.hasQPX()) {
4222         // These can be scalar arguments or elements of a vector array type
4223         // passed directly.  The latter are used to implement ELFv2 homogenous
4224         // vector aggregates.
4225         if (VR_idx != Num_VR_Regs) {
4226           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4227           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4228           ++VR_idx;
4229         } else {
4230           if (CallConv == CallingConv::Fast)
4231             ComputeArgOffset();
4232           needsLoad = true;
4233         }
4234         if (CallConv != CallingConv::Fast || needsLoad)
4235           ArgOffset += 16;
4236         break;
4237       } // not QPX
4238 
4239       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4240              "Invalid QPX parameter type");
4241       LLVM_FALLTHROUGH;
4242 
4243     case MVT::v4f64:
4244     case MVT::v4i1:
4245       // QPX vectors are treated like their scalar floating-point subregisters
4246       // (except that they're larger).
4247       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4248       if (QFPR_idx != Num_QFPR_Regs) {
4249         const TargetRegisterClass *RC;
4250         switch (ObjectVT.getSimpleVT().SimpleTy) {
4251         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4252         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4253         default:         RC = &PPC::QBRCRegClass; break;
4254         }
4255 
4256         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4257         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4258         ++QFPR_idx;
4259       } else {
4260         if (CallConv == CallingConv::Fast)
4261           ComputeArgOffset();
4262         needsLoad = true;
4263       }
4264       if (CallConv != CallingConv::Fast || needsLoad)
4265         ArgOffset += Sz;
4266       break;
4267     }
4268 
4269     // We need to load the argument to a virtual register if we determined
4270     // above that we ran out of physical registers of the appropriate type.
4271     if (needsLoad) {
4272       if (ObjSize < ArgSize && !isLittleEndian)
4273         CurArgOffset += ArgSize - ObjSize;
4274       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4275       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4276       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4277     }
4278 
4279     InVals.push_back(ArgVal);
4280   }
4281 
4282   // Area that is at least reserved in the caller of this function.
4283   unsigned MinReservedArea;
4284   if (HasParameterArea)
4285     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4286   else
4287     MinReservedArea = LinkageSize;
4288 
4289   // Set the size that is at least reserved in caller of this function.  Tail
4290   // call optimized functions' reserved stack space needs to be aligned so that
4291   // taking the difference between two stack areas will result in an aligned
4292   // stack.
4293   MinReservedArea =
4294       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4295   FuncInfo->setMinReservedArea(MinReservedArea);
4296 
4297   // If the function takes variable number of arguments, make a frame index for
4298   // the start of the first vararg value... for expansion of llvm.va_start.
4299   // On ELFv2ABI spec, it writes:
4300   // C programs that are intended to be *portable* across different compilers
4301   // and architectures must use the header file <stdarg.h> to deal with variable
4302   // argument lists.
4303   if (isVarArg && MFI.hasVAStart()) {
4304     int Depth = ArgOffset;
4305 
4306     FuncInfo->setVarArgsFrameIndex(
4307       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4308     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4309 
4310     // If this function is vararg, store any remaining integer argument regs
4311     // to their spots on the stack so that they may be loaded by dereferencing
4312     // the result of va_next.
4313     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4314          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4315       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4316       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4317       SDValue Store =
4318           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4319       MemOps.push_back(Store);
4320       // Increment the address by four for the next argument to store
4321       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4322       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4323     }
4324   }
4325 
4326   if (!MemOps.empty())
4327     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4328 
4329   return Chain;
4330 }
4331 
4332 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4333     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4334     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4335     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4336   // TODO: add description of PPC stack frame format, or at least some docs.
4337   //
4338   MachineFunction &MF = DAG.getMachineFunction();
4339   MachineFrameInfo &MFI = MF.getFrameInfo();
4340   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4341 
4342   EVT PtrVT = getPointerTy(MF.getDataLayout());
4343   bool isPPC64 = PtrVT == MVT::i64;
4344   // Potential tail calls could cause overwriting of argument stack slots.
4345   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4346                        (CallConv == CallingConv::Fast));
4347   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4348   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4349   unsigned ArgOffset = LinkageSize;
4350   // Area that is at least reserved in caller of this function.
4351   unsigned MinReservedArea = ArgOffset;
4352 
4353   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4354     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4355     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4356   };
4357   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4358     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4359     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4360   };
4361   static const MCPhysReg VR[] = {
4362     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4363     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4364   };
4365 
4366   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4367   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4368   const unsigned Num_VR_Regs  = array_lengthof( VR);
4369 
4370   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4371 
4372   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4373 
4374   // In 32-bit non-varargs functions, the stack space for vectors is after the
4375   // stack space for non-vectors.  We do not use this space unless we have
4376   // too many vectors to fit in registers, something that only occurs in
4377   // constructed examples:), but we have to walk the arglist to figure
4378   // that out...for the pathological case, compute VecArgOffset as the
4379   // start of the vector parameter area.  Computing VecArgOffset is the
4380   // entire point of the following loop.
4381   unsigned VecArgOffset = ArgOffset;
4382   if (!isVarArg && !isPPC64) {
4383     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4384          ++ArgNo) {
4385       EVT ObjectVT = Ins[ArgNo].VT;
4386       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4387 
4388       if (Flags.isByVal()) {
4389         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4390         unsigned ObjSize = Flags.getByValSize();
4391         unsigned ArgSize =
4392                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4393         VecArgOffset += ArgSize;
4394         continue;
4395       }
4396 
4397       switch(ObjectVT.getSimpleVT().SimpleTy) {
4398       default: llvm_unreachable("Unhandled argument type!");
4399       case MVT::i1:
4400       case MVT::i32:
4401       case MVT::f32:
4402         VecArgOffset += 4;
4403         break;
4404       case MVT::i64:  // PPC64
4405       case MVT::f64:
4406         // FIXME: We are guaranteed to be !isPPC64 at this point.
4407         // Does MVT::i64 apply?
4408         VecArgOffset += 8;
4409         break;
4410       case MVT::v4f32:
4411       case MVT::v4i32:
4412       case MVT::v8i16:
4413       case MVT::v16i8:
4414         // Nothing to do, we're only looking at Nonvector args here.
4415         break;
4416       }
4417     }
4418   }
4419   // We've found where the vector parameter area in memory is.  Skip the
4420   // first 12 parameters; these don't use that memory.
4421   VecArgOffset = ((VecArgOffset+15)/16)*16;
4422   VecArgOffset += 12*16;
4423 
4424   // Add DAG nodes to load the arguments or copy them out of registers.  On
4425   // entry to a function on PPC, the arguments start after the linkage area,
4426   // although the first ones are often in registers.
4427 
4428   SmallVector<SDValue, 8> MemOps;
4429   unsigned nAltivecParamsAtEnd = 0;
4430   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4431   unsigned CurArgIdx = 0;
4432   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4433     SDValue ArgVal;
4434     bool needsLoad = false;
4435     EVT ObjectVT = Ins[ArgNo].VT;
4436     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4437     unsigned ArgSize = ObjSize;
4438     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4439     if (Ins[ArgNo].isOrigArg()) {
4440       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4441       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4442     }
4443     unsigned CurArgOffset = ArgOffset;
4444 
4445     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4446     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4447         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4448       if (isVarArg || isPPC64) {
4449         MinReservedArea = ((MinReservedArea+15)/16)*16;
4450         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4451                                                   Flags,
4452                                                   PtrByteSize);
4453       } else  nAltivecParamsAtEnd++;
4454     } else
4455       // Calculate min reserved area.
4456       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4457                                                 Flags,
4458                                                 PtrByteSize);
4459 
4460     // FIXME the codegen can be much improved in some cases.
4461     // We do not have to keep everything in memory.
4462     if (Flags.isByVal()) {
4463       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4464 
4465       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4466       ObjSize = Flags.getByValSize();
4467       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4468       // Objects of size 1 and 2 are right justified, everything else is
4469       // left justified.  This means the memory address is adjusted forwards.
4470       if (ObjSize==1 || ObjSize==2) {
4471         CurArgOffset = CurArgOffset + (4 - ObjSize);
4472       }
4473       // The value of the object is its address.
4474       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4475       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4476       InVals.push_back(FIN);
4477       if (ObjSize==1 || ObjSize==2) {
4478         if (GPR_idx != Num_GPR_Regs) {
4479           unsigned VReg;
4480           if (isPPC64)
4481             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4482           else
4483             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4484           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4485           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4486           SDValue Store =
4487               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4488                                 MachinePointerInfo(&*FuncArg), ObjType);
4489           MemOps.push_back(Store);
4490           ++GPR_idx;
4491         }
4492 
4493         ArgOffset += PtrByteSize;
4494 
4495         continue;
4496       }
4497       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4498         // Store whatever pieces of the object are in registers
4499         // to memory.  ArgOffset will be the address of the beginning
4500         // of the object.
4501         if (GPR_idx != Num_GPR_Regs) {
4502           unsigned VReg;
4503           if (isPPC64)
4504             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4505           else
4506             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4507           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4508           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4509           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4510           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4511                                        MachinePointerInfo(&*FuncArg, j));
4512           MemOps.push_back(Store);
4513           ++GPR_idx;
4514           ArgOffset += PtrByteSize;
4515         } else {
4516           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4517           break;
4518         }
4519       }
4520       continue;
4521     }
4522 
4523     switch (ObjectVT.getSimpleVT().SimpleTy) {
4524     default: llvm_unreachable("Unhandled argument type!");
4525     case MVT::i1:
4526     case MVT::i32:
4527       if (!isPPC64) {
4528         if (GPR_idx != Num_GPR_Regs) {
4529           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4530           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4531 
4532           if (ObjectVT == MVT::i1)
4533             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4534 
4535           ++GPR_idx;
4536         } else {
4537           needsLoad = true;
4538           ArgSize = PtrByteSize;
4539         }
4540         // All int arguments reserve stack space in the Darwin ABI.
4541         ArgOffset += PtrByteSize;
4542         break;
4543       }
4544       LLVM_FALLTHROUGH;
4545     case MVT::i64:  // PPC64
4546       if (GPR_idx != Num_GPR_Regs) {
4547         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4548         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4549 
4550         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4551           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4552           // value to MVT::i64 and then truncate to the correct register size.
4553           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4554 
4555         ++GPR_idx;
4556       } else {
4557         needsLoad = true;
4558         ArgSize = PtrByteSize;
4559       }
4560       // All int arguments reserve stack space in the Darwin ABI.
4561       ArgOffset += 8;
4562       break;
4563 
4564     case MVT::f32:
4565     case MVT::f64:
4566       // Every 4 bytes of argument space consumes one of the GPRs available for
4567       // argument passing.
4568       if (GPR_idx != Num_GPR_Regs) {
4569         ++GPR_idx;
4570         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4571           ++GPR_idx;
4572       }
4573       if (FPR_idx != Num_FPR_Regs) {
4574         unsigned VReg;
4575 
4576         if (ObjectVT == MVT::f32)
4577           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4578         else
4579           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4580 
4581         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4582         ++FPR_idx;
4583       } else {
4584         needsLoad = true;
4585       }
4586 
4587       // All FP arguments reserve stack space in the Darwin ABI.
4588       ArgOffset += isPPC64 ? 8 : ObjSize;
4589       break;
4590     case MVT::v4f32:
4591     case MVT::v4i32:
4592     case MVT::v8i16:
4593     case MVT::v16i8:
4594       // Note that vector arguments in registers don't reserve stack space,
4595       // except in varargs functions.
4596       if (VR_idx != Num_VR_Regs) {
4597         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4598         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4599         if (isVarArg) {
4600           while ((ArgOffset % 16) != 0) {
4601             ArgOffset += PtrByteSize;
4602             if (GPR_idx != Num_GPR_Regs)
4603               GPR_idx++;
4604           }
4605           ArgOffset += 16;
4606           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4607         }
4608         ++VR_idx;
4609       } else {
4610         if (!isVarArg && !isPPC64) {
4611           // Vectors go after all the nonvectors.
4612           CurArgOffset = VecArgOffset;
4613           VecArgOffset += 16;
4614         } else {
4615           // Vectors are aligned.
4616           ArgOffset = ((ArgOffset+15)/16)*16;
4617           CurArgOffset = ArgOffset;
4618           ArgOffset += 16;
4619         }
4620         needsLoad = true;
4621       }
4622       break;
4623     }
4624 
4625     // We need to load the argument to a virtual register if we determined above
4626     // that we ran out of physical registers of the appropriate type.
4627     if (needsLoad) {
4628       int FI = MFI.CreateFixedObject(ObjSize,
4629                                      CurArgOffset + (ArgSize - ObjSize),
4630                                      isImmutable);
4631       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4632       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4633     }
4634 
4635     InVals.push_back(ArgVal);
4636   }
4637 
4638   // Allow for Altivec parameters at the end, if needed.
4639   if (nAltivecParamsAtEnd) {
4640     MinReservedArea = ((MinReservedArea+15)/16)*16;
4641     MinReservedArea += 16*nAltivecParamsAtEnd;
4642   }
4643 
4644   // Area that is at least reserved in the caller of this function.
4645   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4646 
4647   // Set the size that is at least reserved in caller of this function.  Tail
4648   // call optimized functions' reserved stack space needs to be aligned so that
4649   // taking the difference between two stack areas will result in an aligned
4650   // stack.
4651   MinReservedArea =
4652       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4653   FuncInfo->setMinReservedArea(MinReservedArea);
4654 
4655   // If the function takes variable number of arguments, make a frame index for
4656   // the start of the first vararg value... for expansion of llvm.va_start.
4657   if (isVarArg) {
4658     int Depth = ArgOffset;
4659 
4660     FuncInfo->setVarArgsFrameIndex(
4661       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4662                             Depth, true));
4663     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4664 
4665     // If this function is vararg, store any remaining integer argument regs
4666     // to their spots on the stack so that they may be loaded by dereferencing
4667     // the result of va_next.
4668     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4669       unsigned VReg;
4670 
4671       if (isPPC64)
4672         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4673       else
4674         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4675 
4676       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4677       SDValue Store =
4678           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4679       MemOps.push_back(Store);
4680       // Increment the address by four for the next argument to store
4681       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4682       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4683     }
4684   }
4685 
4686   if (!MemOps.empty())
4687     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4688 
4689   return Chain;
4690 }
4691 
4692 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4693 /// adjusted to accommodate the arguments for the tailcall.
4694 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4695                                    unsigned ParamSize) {
4696 
4697   if (!isTailCall) return 0;
4698 
4699   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4700   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4701   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4702   // Remember only if the new adjustment is bigger.
4703   if (SPDiff < FI->getTailCallSPDelta())
4704     FI->setTailCallSPDelta(SPDiff);
4705 
4706   return SPDiff;
4707 }
4708 
4709 static bool isFunctionGlobalAddress(SDValue Callee);
4710 
4711 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4712                               const TargetMachine &TM) {
4713   // It does not make sense to call callsShareTOCBase() with a caller that
4714   // is PC Relative since PC Relative callers do not have a TOC.
4715 #ifndef NDEBUG
4716   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4717   assert(!STICaller->isUsingPCRelativeCalls() &&
4718          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4719 #endif
4720 
4721   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4722   // don't have enough information to determine if the caller and callee share
4723   // the same  TOC base, so we have to pessimistically assume they don't for
4724   // correctness.
4725   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4726   if (!G)
4727     return false;
4728 
4729   const GlobalValue *GV = G->getGlobal();
4730 
4731   // If the callee is preemptable, then the static linker will use a plt-stub
4732   // which saves the toc to the stack, and needs a nop after the call
4733   // instruction to convert to a toc-restore.
4734   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4735     return false;
4736 
4737   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4738   // We may need a TOC restore in the situation where the caller requires a
4739   // valid TOC but the callee is PC Relative and does not.
4740   const Function *F = dyn_cast<Function>(GV);
4741   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4742 
4743   // If we have an Alias we can try to get the function from there.
4744   if (Alias) {
4745     const GlobalObject *GlobalObj = Alias->getBaseObject();
4746     F = dyn_cast<Function>(GlobalObj);
4747   }
4748 
4749   // If we still have no valid function pointer we do not have enough
4750   // information to determine if the callee uses PC Relative calls so we must
4751   // assume that it does.
4752   if (!F)
4753     return false;
4754 
4755   // If the callee uses PC Relative we cannot guarantee that the callee won't
4756   // clobber the TOC of the caller and so we must assume that the two
4757   // functions do not share a TOC base.
4758   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4759   if (STICallee->isUsingPCRelativeCalls())
4760     return false;
4761 
4762   // The medium and large code models are expected to provide a sufficiently
4763   // large TOC to provide all data addressing needs of a module with a
4764   // single TOC.
4765   if (CodeModel::Medium == TM.getCodeModel() ||
4766       CodeModel::Large == TM.getCodeModel())
4767     return true;
4768 
4769   // Otherwise we need to ensure callee and caller are in the same section,
4770   // since the linker may allocate multiple TOCs, and we don't know which
4771   // sections will belong to the same TOC base.
4772   if (!GV->isStrongDefinitionForLinker())
4773     return false;
4774 
4775   // Any explicitly-specified sections and section prefixes must also match.
4776   // Also, if we're using -ffunction-sections, then each function is always in
4777   // a different section (the same is true for COMDAT functions).
4778   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4779       GV->getSection() != Caller->getSection())
4780     return false;
4781   if (const auto *F = dyn_cast<Function>(GV)) {
4782     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4783       return false;
4784   }
4785 
4786   return true;
4787 }
4788 
4789 static bool
4790 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4791                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4792   assert(Subtarget.is64BitELFABI());
4793 
4794   const unsigned PtrByteSize = 8;
4795   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4796 
4797   static const MCPhysReg GPR[] = {
4798     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4799     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4800   };
4801   static const MCPhysReg VR[] = {
4802     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4803     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4804   };
4805 
4806   const unsigned NumGPRs = array_lengthof(GPR);
4807   const unsigned NumFPRs = 13;
4808   const unsigned NumVRs = array_lengthof(VR);
4809   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4810 
4811   unsigned NumBytes = LinkageSize;
4812   unsigned AvailableFPRs = NumFPRs;
4813   unsigned AvailableVRs = NumVRs;
4814 
4815   for (const ISD::OutputArg& Param : Outs) {
4816     if (Param.Flags.isNest()) continue;
4817 
4818     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4819                                PtrByteSize, LinkageSize, ParamAreaSize,
4820                                NumBytes, AvailableFPRs, AvailableVRs,
4821                                Subtarget.hasQPX()))
4822       return true;
4823   }
4824   return false;
4825 }
4826 
4827 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4828   if (CB.arg_size() != CallerFn->arg_size())
4829     return false;
4830 
4831   auto CalleeArgIter = CB.arg_begin();
4832   auto CalleeArgEnd = CB.arg_end();
4833   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4834 
4835   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4836     const Value* CalleeArg = *CalleeArgIter;
4837     const Value* CallerArg = &(*CallerArgIter);
4838     if (CalleeArg == CallerArg)
4839       continue;
4840 
4841     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4842     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4843     //      }
4844     // 1st argument of callee is undef and has the same type as caller.
4845     if (CalleeArg->getType() == CallerArg->getType() &&
4846         isa<UndefValue>(CalleeArg))
4847       continue;
4848 
4849     return false;
4850   }
4851 
4852   return true;
4853 }
4854 
4855 // Returns true if TCO is possible between the callers and callees
4856 // calling conventions.
4857 static bool
4858 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4859                                     CallingConv::ID CalleeCC) {
4860   // Tail calls are possible with fastcc and ccc.
4861   auto isTailCallableCC  = [] (CallingConv::ID CC){
4862       return  CC == CallingConv::C || CC == CallingConv::Fast;
4863   };
4864   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4865     return false;
4866 
4867   // We can safely tail call both fastcc and ccc callees from a c calling
4868   // convention caller. If the caller is fastcc, we may have less stack space
4869   // than a non-fastcc caller with the same signature so disable tail-calls in
4870   // that case.
4871   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4872 }
4873 
4874 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4875     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4876     const SmallVectorImpl<ISD::OutputArg> &Outs,
4877     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4878   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4879 
4880   if (DisableSCO && !TailCallOpt) return false;
4881 
4882   // Variadic argument functions are not supported.
4883   if (isVarArg) return false;
4884 
4885   auto &Caller = DAG.getMachineFunction().getFunction();
4886   // Check that the calling conventions are compatible for tco.
4887   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4888     return false;
4889 
4890   // Caller contains any byval parameter is not supported.
4891   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4892     return false;
4893 
4894   // Callee contains any byval parameter is not supported, too.
4895   // Note: This is a quick work around, because in some cases, e.g.
4896   // caller's stack size > callee's stack size, we are still able to apply
4897   // sibling call optimization. For example, gcc is able to do SCO for caller1
4898   // in the following example, but not for caller2.
4899   //   struct test {
4900   //     long int a;
4901   //     char ary[56];
4902   //   } gTest;
4903   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4904   //     b->a = v.a;
4905   //     return 0;
4906   //   }
4907   //   void caller1(struct test a, struct test c, struct test *b) {
4908   //     callee(gTest, b); }
4909   //   void caller2(struct test *b) { callee(gTest, b); }
4910   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4911     return false;
4912 
4913   // If callee and caller use different calling conventions, we cannot pass
4914   // parameters on stack since offsets for the parameter area may be different.
4915   if (Caller.getCallingConv() != CalleeCC &&
4916       needStackSlotPassParameters(Subtarget, Outs))
4917     return false;
4918 
4919   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4920   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4921   // callee potentially have different TOC bases then we cannot tail call since
4922   // we need to restore the TOC pointer after the call.
4923   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4924   // We cannot guarantee this for indirect calls or calls to external functions.
4925   // When PC-Relative addressing is used, the concept of the TOC is no longer
4926   // applicable so this check is not required.
4927   // Check first for indirect calls.
4928   if (!Subtarget.isUsingPCRelativeCalls() &&
4929       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4930     return false;
4931 
4932   // Check if we share the TOC base.
4933   if (!Subtarget.isUsingPCRelativeCalls() &&
4934       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4935     return false;
4936 
4937   // TCO allows altering callee ABI, so we don't have to check further.
4938   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4939     return true;
4940 
4941   if (DisableSCO) return false;
4942 
4943   // If callee use the same argument list that caller is using, then we can
4944   // apply SCO on this case. If it is not, then we need to check if callee needs
4945   // stack for passing arguments.
4946   // PC Relative tail calls may not have a CallBase.
4947   // If there is no CallBase we cannot verify if we have the same argument
4948   // list so assume that we don't have the same argument list.
4949   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4950       needStackSlotPassParameters(Subtarget, Outs))
4951     return false;
4952   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4953     return false;
4954 
4955   return true;
4956 }
4957 
4958 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4959 /// for tail call optimization. Targets which want to do tail call
4960 /// optimization should implement this function.
4961 bool
4962 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4963                                                      CallingConv::ID CalleeCC,
4964                                                      bool isVarArg,
4965                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4966                                                      SelectionDAG& DAG) const {
4967   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4968     return false;
4969 
4970   // Variable argument functions are not supported.
4971   if (isVarArg)
4972     return false;
4973 
4974   MachineFunction &MF = DAG.getMachineFunction();
4975   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4976   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4977     // Functions containing by val parameters are not supported.
4978     for (unsigned i = 0; i != Ins.size(); i++) {
4979        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4980        if (Flags.isByVal()) return false;
4981     }
4982 
4983     // Non-PIC/GOT tail calls are supported.
4984     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4985       return true;
4986 
4987     // At the moment we can only do local tail calls (in same module, hidden
4988     // or protected) if we are generating PIC.
4989     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4990       return G->getGlobal()->hasHiddenVisibility()
4991           || G->getGlobal()->hasProtectedVisibility();
4992   }
4993 
4994   return false;
4995 }
4996 
4997 /// isCallCompatibleAddress - Return the immediate to use if the specified
4998 /// 32-bit value is representable in the immediate field of a BxA instruction.
4999 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
5000   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5001   if (!C) return nullptr;
5002 
5003   int Addr = C->getZExtValue();
5004   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
5005       SignExtend32<26>(Addr) != Addr)
5006     return nullptr;  // Top 6 bits have to be sext of immediate.
5007 
5008   return DAG
5009       .getConstant(
5010           (int)C->getZExtValue() >> 2, SDLoc(Op),
5011           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
5012       .getNode();
5013 }
5014 
5015 namespace {
5016 
5017 struct TailCallArgumentInfo {
5018   SDValue Arg;
5019   SDValue FrameIdxOp;
5020   int FrameIdx = 0;
5021 
5022   TailCallArgumentInfo() = default;
5023 };
5024 
5025 } // end anonymous namespace
5026 
5027 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
5028 static void StoreTailCallArgumentsToStackSlot(
5029     SelectionDAG &DAG, SDValue Chain,
5030     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5031     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5032   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5033     SDValue Arg = TailCallArgs[i].Arg;
5034     SDValue FIN = TailCallArgs[i].FrameIdxOp;
5035     int FI = TailCallArgs[i].FrameIdx;
5036     // Store relative to framepointer.
5037     MemOpChains.push_back(DAG.getStore(
5038         Chain, dl, Arg, FIN,
5039         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
5040   }
5041 }
5042 
5043 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5044 /// the appropriate stack slot for the tail call optimized function call.
5045 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
5046                                              SDValue OldRetAddr, SDValue OldFP,
5047                                              int SPDiff, const SDLoc &dl) {
5048   if (SPDiff) {
5049     // Calculate the new stack slot for the return address.
5050     MachineFunction &MF = DAG.getMachineFunction();
5051     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5052     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5053     bool isPPC64 = Subtarget.isPPC64();
5054     int SlotSize = isPPC64 ? 8 : 4;
5055     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5056     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5057                                                          NewRetAddrLoc, true);
5058     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5059     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5060     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5061                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5062   }
5063   return Chain;
5064 }
5065 
5066 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5067 /// the position of the argument.
5068 static void
5069 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
5070                          SDValue Arg, int SPDiff, unsigned ArgOffset,
5071                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5072   int Offset = ArgOffset + SPDiff;
5073   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5074   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5075   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5076   SDValue FIN = DAG.getFrameIndex(FI, VT);
5077   TailCallArgumentInfo Info;
5078   Info.Arg = Arg;
5079   Info.FrameIdxOp = FIN;
5080   Info.FrameIdx = FI;
5081   TailCallArguments.push_back(Info);
5082 }
5083 
5084 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5085 /// stack slot. Returns the chain as result and the loaded frame pointers in
5086 /// LROpOut/FPOpout. Used when tail calling.
5087 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5088     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5089     SDValue &FPOpOut, const SDLoc &dl) const {
5090   if (SPDiff) {
5091     // Load the LR and FP stack slot for later adjusting.
5092     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5093     LROpOut = getReturnAddrFrameIndex(DAG);
5094     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5095     Chain = SDValue(LROpOut.getNode(), 1);
5096   }
5097   return Chain;
5098 }
5099 
5100 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5101 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5102 /// specified by the specific parameter attribute. The copy will be passed as
5103 /// a byval function parameter.
5104 /// Sometimes what we are copying is the end of a larger object, the part that
5105 /// does not fit in registers.
5106 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5107                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5108                                          SelectionDAG &DAG, const SDLoc &dl) {
5109   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5110   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5111                        Flags.getNonZeroByValAlign(), false, false, false,
5112                        MachinePointerInfo(), MachinePointerInfo());
5113 }
5114 
5115 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5116 /// tail calls.
5117 static void LowerMemOpCallTo(
5118     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5119     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5120     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5121     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5122   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5123   if (!isTailCall) {
5124     if (isVector) {
5125       SDValue StackPtr;
5126       if (isPPC64)
5127         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5128       else
5129         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5130       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5131                            DAG.getConstant(ArgOffset, dl, PtrVT));
5132     }
5133     MemOpChains.push_back(
5134         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5135     // Calculate and remember argument location.
5136   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5137                                   TailCallArguments);
5138 }
5139 
5140 static void
5141 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5142                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5143                 SDValue FPOp,
5144                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5145   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5146   // might overwrite each other in case of tail call optimization.
5147   SmallVector<SDValue, 8> MemOpChains2;
5148   // Do not flag preceding copytoreg stuff together with the following stuff.
5149   InFlag = SDValue();
5150   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5151                                     MemOpChains2, dl);
5152   if (!MemOpChains2.empty())
5153     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5154 
5155   // Store the return address to the appropriate stack slot.
5156   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5157 
5158   // Emit callseq_end just before tailcall node.
5159   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5160                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5161   InFlag = Chain.getValue(1);
5162 }
5163 
5164 // Is this global address that of a function that can be called by name? (as
5165 // opposed to something that must hold a descriptor for an indirect call).
5166 static bool isFunctionGlobalAddress(SDValue Callee) {
5167   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5168     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5169         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5170       return false;
5171 
5172     return G->getGlobal()->getValueType()->isFunctionTy();
5173   }
5174 
5175   return false;
5176 }
5177 
5178 SDValue PPCTargetLowering::LowerCallResult(
5179     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5180     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5181     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5182   SmallVector<CCValAssign, 16> RVLocs;
5183   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5184                     *DAG.getContext());
5185 
5186   CCRetInfo.AnalyzeCallResult(
5187       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5188                ? RetCC_PPC_Cold
5189                : RetCC_PPC);
5190 
5191   // Copy all of the result registers out of their specified physreg.
5192   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5193     CCValAssign &VA = RVLocs[i];
5194     assert(VA.isRegLoc() && "Can only return in registers!");
5195 
5196     SDValue Val;
5197 
5198     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5199       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5200                                       InFlag);
5201       Chain = Lo.getValue(1);
5202       InFlag = Lo.getValue(2);
5203       VA = RVLocs[++i]; // skip ahead to next loc
5204       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5205                                       InFlag);
5206       Chain = Hi.getValue(1);
5207       InFlag = Hi.getValue(2);
5208       if (!Subtarget.isLittleEndian())
5209         std::swap (Lo, Hi);
5210       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5211     } else {
5212       Val = DAG.getCopyFromReg(Chain, dl,
5213                                VA.getLocReg(), VA.getLocVT(), InFlag);
5214       Chain = Val.getValue(1);
5215       InFlag = Val.getValue(2);
5216     }
5217 
5218     switch (VA.getLocInfo()) {
5219     default: llvm_unreachable("Unknown loc info!");
5220     case CCValAssign::Full: break;
5221     case CCValAssign::AExt:
5222       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5223       break;
5224     case CCValAssign::ZExt:
5225       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5226                         DAG.getValueType(VA.getValVT()));
5227       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5228       break;
5229     case CCValAssign::SExt:
5230       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5231                         DAG.getValueType(VA.getValVT()));
5232       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5233       break;
5234     }
5235 
5236     InVals.push_back(Val);
5237   }
5238 
5239   return Chain;
5240 }
5241 
5242 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5243                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5244   // PatchPoint calls are not indirect.
5245   if (isPatchPoint)
5246     return false;
5247 
5248   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5249     return false;
5250 
5251   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5252   // becuase the immediate function pointer points to a descriptor instead of
5253   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5254   // pointer immediate points to the global entry point, while the BLA would
5255   // need to jump to the local entry point (see rL211174).
5256   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5257       isBLACompatibleAddress(Callee, DAG))
5258     return false;
5259 
5260   return true;
5261 }
5262 
5263 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5264 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5265   return Subtarget.isAIXABI() ||
5266          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5267 }
5268 
5269 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5270                               const Function &Caller,
5271                               const SDValue &Callee,
5272                               const PPCSubtarget &Subtarget,
5273                               const TargetMachine &TM) {
5274   if (CFlags.IsTailCall)
5275     return PPCISD::TC_RETURN;
5276 
5277   // This is a call through a function pointer.
5278   if (CFlags.IsIndirect) {
5279     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5280     // indirect calls. The save of the caller's TOC pointer to the stack will be
5281     // inserted into the DAG as part of call lowering. The restore of the TOC
5282     // pointer is modeled by using a pseudo instruction for the call opcode that
5283     // represents the 2 instruction sequence of an indirect branch and link,
5284     // immediately followed by a load of the TOC pointer from the the stack save
5285     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5286     // as it is not saved or used.
5287     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5288                                                : PPCISD::BCTRL;
5289   }
5290 
5291   if (Subtarget.isUsingPCRelativeCalls()) {
5292     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5293     return PPCISD::CALL_NOTOC;
5294   }
5295 
5296   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5297   // immediately following the call instruction if the caller and callee may
5298   // have different TOC bases. At link time if the linker determines the calls
5299   // may not share a TOC base, the call is redirected to a trampoline inserted
5300   // by the linker. The trampoline will (among other things) save the callers
5301   // TOC pointer at an ABI designated offset in the linkage area and the linker
5302   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5303   // into gpr2.
5304   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5305     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5306                                                   : PPCISD::CALL_NOP;
5307 
5308   return PPCISD::CALL;
5309 }
5310 
5311 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5312                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5313   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5314     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5315       return SDValue(Dest, 0);
5316 
5317   // Returns true if the callee is local, and false otherwise.
5318   auto isLocalCallee = [&]() {
5319     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5320     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5321     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5322 
5323     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5324            !dyn_cast_or_null<GlobalIFunc>(GV);
5325   };
5326 
5327   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5328   // a static relocation model causes some versions of GNU LD (2.17.50, at
5329   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5330   // built with secure-PLT.
5331   bool UsePlt =
5332       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5333       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5334 
5335   // On AIX, direct function calls reference the symbol for the function's
5336   // entry point, which is named by prepending a "." before the function's
5337   // C-linkage name.
5338   const auto getFunctionEntryPointSymbol = [&](StringRef SymName) {
5339     auto &Context = DAG.getMachineFunction().getMMI().getContext();
5340     return cast<MCSymbolXCOFF>(
5341         Context.getOrCreateSymbol(Twine(".") + Twine(SymName)));
5342   };
5343 
5344   const auto getAIXFuncEntryPointSymbolSDNode =
5345       [&](StringRef FuncName, bool IsDeclaration,
5346           const XCOFF::StorageClass &SC) {
5347         MCSymbolXCOFF *S = getFunctionEntryPointSymbol(FuncName);
5348 
5349         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5350 
5351         if (IsDeclaration && !S->hasRepresentedCsectSet()) {
5352           // On AIX, an undefined symbol needs to be associated with a
5353           // MCSectionXCOFF to get the correct storage mapping class.
5354           // In this case, XCOFF::XMC_PR.
5355           MCSectionXCOFF *Sec = Context.getXCOFFSection(
5356               S->getSymbolTableName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5357               SectionKind::getMetadata());
5358           S->setRepresentedCsect(Sec);
5359         }
5360 
5361         MVT PtrVT =
5362             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5363         return DAG.getMCSymbol(S, PtrVT);
5364       };
5365 
5366   if (isFunctionGlobalAddress(Callee)) {
5367     const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5368     const GlobalValue *GV = G->getGlobal();
5369 
5370     if (!Subtarget.isAIXABI())
5371       return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5372                                         UsePlt ? PPCII::MO_PLT : 0);
5373 
5374     assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5375     const XCOFF::StorageClass SC =
5376         TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GV);
5377     return getAIXFuncEntryPointSymbolSDNode(GV->getName(), GV->isDeclaration(),
5378                                             SC);
5379   }
5380 
5381   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5382     const char *SymName = S->getSymbol();
5383     if (Subtarget.isAIXABI()) {
5384       // If there exists a user-declared function whose name is the same as the
5385       // ExternalSymbol's, then we pick up the user-declared version.
5386       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5387       if (const Function *F =
5388               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5389         const XCOFF::StorageClass SC =
5390             TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5391         return getAIXFuncEntryPointSymbolSDNode(F->getName(),
5392                                                 F->isDeclaration(), SC);
5393       }
5394       SymName = getFunctionEntryPointSymbol(SymName)->getName().data();
5395     }
5396     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5397                                        UsePlt ? PPCII::MO_PLT : 0);
5398   }
5399 
5400   // No transformation needed.
5401   assert(Callee.getNode() && "What no callee?");
5402   return Callee;
5403 }
5404 
5405 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5406   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5407          "Expected a CALLSEQ_STARTSDNode.");
5408 
5409   // The last operand is the chain, except when the node has glue. If the node
5410   // has glue, then the last operand is the glue, and the chain is the second
5411   // last operand.
5412   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5413   if (LastValue.getValueType() != MVT::Glue)
5414     return LastValue;
5415 
5416   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5417 }
5418 
5419 // Creates the node that moves a functions address into the count register
5420 // to prepare for an indirect call instruction.
5421 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5422                                 SDValue &Glue, SDValue &Chain,
5423                                 const SDLoc &dl) {
5424   SDValue MTCTROps[] = {Chain, Callee, Glue};
5425   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5426   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5427                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5428   // The glue is the second value produced.
5429   Glue = Chain.getValue(1);
5430 }
5431 
5432 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5433                                           SDValue &Glue, SDValue &Chain,
5434                                           SDValue CallSeqStart,
5435                                           const CallBase *CB, const SDLoc &dl,
5436                                           bool hasNest,
5437                                           const PPCSubtarget &Subtarget) {
5438   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5439   // entry point, but to the function descriptor (the function entry point
5440   // address is part of the function descriptor though).
5441   // The function descriptor is a three doubleword structure with the
5442   // following fields: function entry point, TOC base address and
5443   // environment pointer.
5444   // Thus for a call through a function pointer, the following actions need
5445   // to be performed:
5446   //   1. Save the TOC of the caller in the TOC save area of its stack
5447   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5448   //   2. Load the address of the function entry point from the function
5449   //      descriptor.
5450   //   3. Load the TOC of the callee from the function descriptor into r2.
5451   //   4. Load the environment pointer from the function descriptor into
5452   //      r11.
5453   //   5. Branch to the function entry point address.
5454   //   6. On return of the callee, the TOC of the caller needs to be
5455   //      restored (this is done in FinishCall()).
5456   //
5457   // The loads are scheduled at the beginning of the call sequence, and the
5458   // register copies are flagged together to ensure that no other
5459   // operations can be scheduled in between. E.g. without flagging the
5460   // copies together, a TOC access in the caller could be scheduled between
5461   // the assignment of the callee TOC and the branch to the callee, which leads
5462   // to incorrect code.
5463 
5464   // Start by loading the function address from the descriptor.
5465   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5466   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5467                       ? (MachineMemOperand::MODereferenceable |
5468                          MachineMemOperand::MOInvariant)
5469                       : MachineMemOperand::MONone;
5470 
5471   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5472 
5473   // Registers used in building the DAG.
5474   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5475   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5476 
5477   // Offsets of descriptor members.
5478   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5479   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5480 
5481   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5482   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5483 
5484   // One load for the functions entry point address.
5485   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5486                                     Alignment, MMOFlags);
5487 
5488   // One for loading the TOC anchor for the module that contains the called
5489   // function.
5490   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5491   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5492   SDValue TOCPtr =
5493       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5494                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5495 
5496   // One for loading the environment pointer.
5497   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5498   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5499   SDValue LoadEnvPtr =
5500       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5501                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5502 
5503 
5504   // Then copy the newly loaded TOC anchor to the TOC pointer.
5505   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5506   Chain = TOCVal.getValue(0);
5507   Glue = TOCVal.getValue(1);
5508 
5509   // If the function call has an explicit 'nest' parameter, it takes the
5510   // place of the environment pointer.
5511   assert((!hasNest || !Subtarget.isAIXABI()) &&
5512          "Nest parameter is not supported on AIX.");
5513   if (!hasNest) {
5514     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5515     Chain = EnvVal.getValue(0);
5516     Glue = EnvVal.getValue(1);
5517   }
5518 
5519   // The rest of the indirect call sequence is the same as the non-descriptor
5520   // DAG.
5521   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5522 }
5523 
5524 static void
5525 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5526                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5527                   SelectionDAG &DAG,
5528                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5529                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5530                   const PPCSubtarget &Subtarget) {
5531   const bool IsPPC64 = Subtarget.isPPC64();
5532   // MVT for a general purpose register.
5533   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5534 
5535   // First operand is always the chain.
5536   Ops.push_back(Chain);
5537 
5538   // If it's a direct call pass the callee as the second operand.
5539   if (!CFlags.IsIndirect)
5540     Ops.push_back(Callee);
5541   else {
5542     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5543 
5544     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5545     // on the stack (this would have been done in `LowerCall_64SVR4` or
5546     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5547     // represents both the indirect branch and a load that restores the TOC
5548     // pointer from the linkage area. The operand for the TOC restore is an add
5549     // of the TOC save offset to the stack pointer. This must be the second
5550     // operand: after the chain input but before any other variadic arguments.
5551     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5552     // saved or used.
5553     if (isTOCSaveRestoreRequired(Subtarget)) {
5554       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5555 
5556       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5557       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5558       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5559       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5560       Ops.push_back(AddTOC);
5561     }
5562 
5563     // Add the register used for the environment pointer.
5564     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5565       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5566                                     RegVT));
5567 
5568 
5569     // Add CTR register as callee so a bctr can be emitted later.
5570     if (CFlags.IsTailCall)
5571       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5572   }
5573 
5574   // If this is a tail call add stack pointer delta.
5575   if (CFlags.IsTailCall)
5576     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5577 
5578   // Add argument registers to the end of the list so that they are known live
5579   // into the call.
5580   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5581     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5582                                   RegsToPass[i].second.getValueType()));
5583 
5584   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5585   // no way to mark dependencies as implicit here.
5586   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5587   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5588        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5589     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5590 
5591   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5592   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5593     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5594 
5595   // Add a register mask operand representing the call-preserved registers.
5596   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5597   const uint32_t *Mask =
5598       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5599   assert(Mask && "Missing call preserved mask for calling convention");
5600   Ops.push_back(DAG.getRegisterMask(Mask));
5601 
5602   // If the glue is valid, it is the last operand.
5603   if (Glue.getNode())
5604     Ops.push_back(Glue);
5605 }
5606 
5607 SDValue PPCTargetLowering::FinishCall(
5608     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5609     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5610     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5611     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5612     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5613 
5614   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5615       Subtarget.isAIXABI())
5616     setUsesTOCBasePtr(DAG);
5617 
5618   unsigned CallOpc =
5619       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5620                     Subtarget, DAG.getTarget());
5621 
5622   if (!CFlags.IsIndirect)
5623     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5624   else if (Subtarget.usesFunctionDescriptors())
5625     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5626                                   dl, CFlags.HasNest, Subtarget);
5627   else
5628     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5629 
5630   // Build the operand list for the call instruction.
5631   SmallVector<SDValue, 8> Ops;
5632   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5633                     SPDiff, Subtarget);
5634 
5635   // Emit tail call.
5636   if (CFlags.IsTailCall) {
5637     // Indirect tail call when using PC Relative calls do not have the same
5638     // constraints.
5639     assert(((Callee.getOpcode() == ISD::Register &&
5640              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5641             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5642             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5643             isa<ConstantSDNode>(Callee) ||
5644             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5645            "Expecting a global address, external symbol, absolute value, "
5646            "register or an indirect tail call when PC Relative calls are "
5647            "used.");
5648     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5649     assert(CallOpc == PPCISD::TC_RETURN &&
5650            "Unexpected call opcode for a tail call.");
5651     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5652     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5653   }
5654 
5655   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5656   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5657   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5658   Glue = Chain.getValue(1);
5659 
5660   // When performing tail call optimization the callee pops its arguments off
5661   // the stack. Account for this here so these bytes can be pushed back on in
5662   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5663   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5664                          getTargetMachine().Options.GuaranteedTailCallOpt)
5665                             ? NumBytes
5666                             : 0;
5667 
5668   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5669                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5670                              Glue, dl);
5671   Glue = Chain.getValue(1);
5672 
5673   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5674                          DAG, InVals);
5675 }
5676 
5677 SDValue
5678 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5679                              SmallVectorImpl<SDValue> &InVals) const {
5680   SelectionDAG &DAG                     = CLI.DAG;
5681   SDLoc &dl                             = CLI.DL;
5682   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5683   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5684   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5685   SDValue Chain                         = CLI.Chain;
5686   SDValue Callee                        = CLI.Callee;
5687   bool &isTailCall                      = CLI.IsTailCall;
5688   CallingConv::ID CallConv              = CLI.CallConv;
5689   bool isVarArg                         = CLI.IsVarArg;
5690   bool isPatchPoint                     = CLI.IsPatchPoint;
5691   const CallBase *CB                    = CLI.CB;
5692 
5693   if (isTailCall) {
5694     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5695       isTailCall = false;
5696     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5697       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5698           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5699     else
5700       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5701                                                      Ins, DAG);
5702     if (isTailCall) {
5703       ++NumTailCalls;
5704       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5705         ++NumSiblingCalls;
5706 
5707       // PC Relative calls no longer guarantee that the callee is a Global
5708       // Address Node. The callee could be an indirect tail call in which
5709       // case the SDValue for the callee could be a load (to load the address
5710       // of a function pointer) or it may be a register copy (to move the
5711       // address of the callee from a function parameter into a virtual
5712       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5713       assert((Subtarget.isUsingPCRelativeCalls() ||
5714               isa<GlobalAddressSDNode>(Callee)) &&
5715              "Callee should be an llvm::Function object.");
5716 
5717       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5718                         << "\nTCO callee: ");
5719       LLVM_DEBUG(Callee.dump());
5720     }
5721   }
5722 
5723   if (!isTailCall && CB && CB->isMustTailCall())
5724     report_fatal_error("failed to perform tail call elimination on a call "
5725                        "site marked musttail");
5726 
5727   // When long calls (i.e. indirect calls) are always used, calls are always
5728   // made via function pointer. If we have a function name, first translate it
5729   // into a pointer.
5730   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5731       !isTailCall)
5732     Callee = LowerGlobalAddress(Callee, DAG);
5733 
5734   CallFlags CFlags(
5735       CallConv, isTailCall, isVarArg, isPatchPoint,
5736       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5737       // hasNest
5738       Subtarget.is64BitELFABI() &&
5739           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5740       CLI.NoMerge);
5741 
5742   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5743     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5744                             InVals, CB);
5745 
5746   if (Subtarget.isSVR4ABI())
5747     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5748                             InVals, CB);
5749 
5750   if (Subtarget.isAIXABI())
5751     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5752                          InVals, CB);
5753 
5754   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5755                           InVals, CB);
5756 }
5757 
5758 SDValue PPCTargetLowering::LowerCall_32SVR4(
5759     SDValue Chain, SDValue Callee, CallFlags CFlags,
5760     const SmallVectorImpl<ISD::OutputArg> &Outs,
5761     const SmallVectorImpl<SDValue> &OutVals,
5762     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5763     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5764     const CallBase *CB) const {
5765   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5766   // of the 32-bit SVR4 ABI stack frame layout.
5767 
5768   const CallingConv::ID CallConv = CFlags.CallConv;
5769   const bool IsVarArg = CFlags.IsVarArg;
5770   const bool IsTailCall = CFlags.IsTailCall;
5771 
5772   assert((CallConv == CallingConv::C ||
5773           CallConv == CallingConv::Cold ||
5774           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5775 
5776   const Align PtrAlign(4);
5777 
5778   MachineFunction &MF = DAG.getMachineFunction();
5779 
5780   // Mark this function as potentially containing a function that contains a
5781   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5782   // and restoring the callers stack pointer in this functions epilog. This is
5783   // done because by tail calling the called function might overwrite the value
5784   // in this function's (MF) stack pointer stack slot 0(SP).
5785   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5786       CallConv == CallingConv::Fast)
5787     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5788 
5789   // Count how many bytes are to be pushed on the stack, including the linkage
5790   // area, parameter list area and the part of the local variable space which
5791   // contains copies of aggregates which are passed by value.
5792 
5793   // Assign locations to all of the outgoing arguments.
5794   SmallVector<CCValAssign, 16> ArgLocs;
5795   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5796 
5797   // Reserve space for the linkage area on the stack.
5798   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5799                        PtrAlign);
5800   if (useSoftFloat())
5801     CCInfo.PreAnalyzeCallOperands(Outs);
5802 
5803   if (IsVarArg) {
5804     // Handle fixed and variable vector arguments differently.
5805     // Fixed vector arguments go into registers as long as registers are
5806     // available. Variable vector arguments always go into memory.
5807     unsigned NumArgs = Outs.size();
5808 
5809     for (unsigned i = 0; i != NumArgs; ++i) {
5810       MVT ArgVT = Outs[i].VT;
5811       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5812       bool Result;
5813 
5814       if (Outs[i].IsFixed) {
5815         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5816                                CCInfo);
5817       } else {
5818         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5819                                       ArgFlags, CCInfo);
5820       }
5821 
5822       if (Result) {
5823 #ifndef NDEBUG
5824         errs() << "Call operand #" << i << " has unhandled type "
5825              << EVT(ArgVT).getEVTString() << "\n";
5826 #endif
5827         llvm_unreachable(nullptr);
5828       }
5829     }
5830   } else {
5831     // All arguments are treated the same.
5832     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5833   }
5834   CCInfo.clearWasPPCF128();
5835 
5836   // Assign locations to all of the outgoing aggregate by value arguments.
5837   SmallVector<CCValAssign, 16> ByValArgLocs;
5838   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5839 
5840   // Reserve stack space for the allocations in CCInfo.
5841   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5842 
5843   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5844 
5845   // Size of the linkage area, parameter list area and the part of the local
5846   // space variable where copies of aggregates which are passed by value are
5847   // stored.
5848   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5849 
5850   // Calculate by how many bytes the stack has to be adjusted in case of tail
5851   // call optimization.
5852   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5853 
5854   // Adjust the stack pointer for the new arguments...
5855   // These operations are automatically eliminated by the prolog/epilog pass
5856   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5857   SDValue CallSeqStart = Chain;
5858 
5859   // Load the return address and frame pointer so it can be moved somewhere else
5860   // later.
5861   SDValue LROp, FPOp;
5862   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5863 
5864   // Set up a copy of the stack pointer for use loading and storing any
5865   // arguments that may not fit in the registers available for argument
5866   // passing.
5867   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5868 
5869   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5870   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5871   SmallVector<SDValue, 8> MemOpChains;
5872 
5873   bool seenFloatArg = false;
5874   // Walk the register/memloc assignments, inserting copies/loads.
5875   // i - Tracks the index into the list of registers allocated for the call
5876   // RealArgIdx - Tracks the index into the list of actual function arguments
5877   // j - Tracks the index into the list of byval arguments
5878   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5879        i != e;
5880        ++i, ++RealArgIdx) {
5881     CCValAssign &VA = ArgLocs[i];
5882     SDValue Arg = OutVals[RealArgIdx];
5883     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5884 
5885     if (Flags.isByVal()) {
5886       // Argument is an aggregate which is passed by value, thus we need to
5887       // create a copy of it in the local variable space of the current stack
5888       // frame (which is the stack frame of the caller) and pass the address of
5889       // this copy to the callee.
5890       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5891       CCValAssign &ByValVA = ByValArgLocs[j++];
5892       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5893 
5894       // Memory reserved in the local variable space of the callers stack frame.
5895       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5896 
5897       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5898       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5899                            StackPtr, PtrOff);
5900 
5901       // Create a copy of the argument in the local area of the current
5902       // stack frame.
5903       SDValue MemcpyCall =
5904         CreateCopyOfByValArgument(Arg, PtrOff,
5905                                   CallSeqStart.getNode()->getOperand(0),
5906                                   Flags, DAG, dl);
5907 
5908       // This must go outside the CALLSEQ_START..END.
5909       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5910                                                      SDLoc(MemcpyCall));
5911       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5912                              NewCallSeqStart.getNode());
5913       Chain = CallSeqStart = NewCallSeqStart;
5914 
5915       // Pass the address of the aggregate copy on the stack either in a
5916       // physical register or in the parameter list area of the current stack
5917       // frame to the callee.
5918       Arg = PtrOff;
5919     }
5920 
5921     // When useCRBits() is true, there can be i1 arguments.
5922     // It is because getRegisterType(MVT::i1) => MVT::i1,
5923     // and for other integer types getRegisterType() => MVT::i32.
5924     // Extend i1 and ensure callee will get i32.
5925     if (Arg.getValueType() == MVT::i1)
5926       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5927                         dl, MVT::i32, Arg);
5928 
5929     if (VA.isRegLoc()) {
5930       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5931       // Put argument in a physical register.
5932       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5933         bool IsLE = Subtarget.isLittleEndian();
5934         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5935                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5936         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5937         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5938                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5939         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5940                              SVal.getValue(0)));
5941       } else
5942         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5943     } else {
5944       // Put argument in the parameter list area of the current stack frame.
5945       assert(VA.isMemLoc());
5946       unsigned LocMemOffset = VA.getLocMemOffset();
5947 
5948       if (!IsTailCall) {
5949         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5950         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5951                              StackPtr, PtrOff);
5952 
5953         MemOpChains.push_back(
5954             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5955       } else {
5956         // Calculate and remember argument location.
5957         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5958                                  TailCallArguments);
5959       }
5960     }
5961   }
5962 
5963   if (!MemOpChains.empty())
5964     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5965 
5966   // Build a sequence of copy-to-reg nodes chained together with token chain
5967   // and flag operands which copy the outgoing args into the appropriate regs.
5968   SDValue InFlag;
5969   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5970     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5971                              RegsToPass[i].second, InFlag);
5972     InFlag = Chain.getValue(1);
5973   }
5974 
5975   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5976   // registers.
5977   if (IsVarArg) {
5978     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5979     SDValue Ops[] = { Chain, InFlag };
5980 
5981     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5982                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5983 
5984     InFlag = Chain.getValue(1);
5985   }
5986 
5987   if (IsTailCall)
5988     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5989                     TailCallArguments);
5990 
5991   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5992                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5993 }
5994 
5995 // Copy an argument into memory, being careful to do this outside the
5996 // call sequence for the call to which the argument belongs.
5997 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5998     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5999     SelectionDAG &DAG, const SDLoc &dl) const {
6000   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
6001                         CallSeqStart.getNode()->getOperand(0),
6002                         Flags, DAG, dl);
6003   // The MEMCPY must go outside the CALLSEQ_START..END.
6004   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6005   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6006                                                  SDLoc(MemcpyCall));
6007   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6008                          NewCallSeqStart.getNode());
6009   return NewCallSeqStart;
6010 }
6011 
6012 SDValue PPCTargetLowering::LowerCall_64SVR4(
6013     SDValue Chain, SDValue Callee, CallFlags CFlags,
6014     const SmallVectorImpl<ISD::OutputArg> &Outs,
6015     const SmallVectorImpl<SDValue> &OutVals,
6016     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6017     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6018     const CallBase *CB) const {
6019   bool isELFv2ABI = Subtarget.isELFv2ABI();
6020   bool isLittleEndian = Subtarget.isLittleEndian();
6021   unsigned NumOps = Outs.size();
6022   bool IsSibCall = false;
6023   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6024 
6025   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6026   unsigned PtrByteSize = 8;
6027 
6028   MachineFunction &MF = DAG.getMachineFunction();
6029 
6030   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6031     IsSibCall = true;
6032 
6033   // Mark this function as potentially containing a function that contains a
6034   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6035   // and restoring the callers stack pointer in this functions epilog. This is
6036   // done because by tail calling the called function might overwrite the value
6037   // in this function's (MF) stack pointer stack slot 0(SP).
6038   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6039     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6040 
6041   assert(!(IsFastCall && CFlags.IsVarArg) &&
6042          "fastcc not supported on varargs functions");
6043 
6044   // Count how many bytes are to be pushed on the stack, including the linkage
6045   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
6046   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6047   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6048   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6049   unsigned NumBytes = LinkageSize;
6050   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6051   unsigned &QFPR_idx = FPR_idx;
6052 
6053   static const MCPhysReg GPR[] = {
6054     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6055     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6056   };
6057   static const MCPhysReg VR[] = {
6058     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6059     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6060   };
6061 
6062   const unsigned NumGPRs = array_lengthof(GPR);
6063   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6064   const unsigned NumVRs  = array_lengthof(VR);
6065   const unsigned NumQFPRs = NumFPRs;
6066 
6067   // On ELFv2, we can avoid allocating the parameter area if all the arguments
6068   // can be passed to the callee in registers.
6069   // For the fast calling convention, there is another check below.
6070   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6071   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6072   if (!HasParameterArea) {
6073     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6074     unsigned AvailableFPRs = NumFPRs;
6075     unsigned AvailableVRs = NumVRs;
6076     unsigned NumBytesTmp = NumBytes;
6077     for (unsigned i = 0; i != NumOps; ++i) {
6078       if (Outs[i].Flags.isNest()) continue;
6079       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6080                                 PtrByteSize, LinkageSize, ParamAreaSize,
6081                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
6082                                 Subtarget.hasQPX()))
6083         HasParameterArea = true;
6084     }
6085   }
6086 
6087   // When using the fast calling convention, we don't provide backing for
6088   // arguments that will be in registers.
6089   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6090 
6091   // Avoid allocating parameter area for fastcc functions if all the arguments
6092   // can be passed in the registers.
6093   if (IsFastCall)
6094     HasParameterArea = false;
6095 
6096   // Add up all the space actually used.
6097   for (unsigned i = 0; i != NumOps; ++i) {
6098     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6099     EVT ArgVT = Outs[i].VT;
6100     EVT OrigVT = Outs[i].ArgVT;
6101 
6102     if (Flags.isNest())
6103       continue;
6104 
6105     if (IsFastCall) {
6106       if (Flags.isByVal()) {
6107         NumGPRsUsed += (Flags.getByValSize()+7)/8;
6108         if (NumGPRsUsed > NumGPRs)
6109           HasParameterArea = true;
6110       } else {
6111         switch (ArgVT.getSimpleVT().SimpleTy) {
6112         default: llvm_unreachable("Unexpected ValueType for argument!");
6113         case MVT::i1:
6114         case MVT::i32:
6115         case MVT::i64:
6116           if (++NumGPRsUsed <= NumGPRs)
6117             continue;
6118           break;
6119         case MVT::v4i32:
6120         case MVT::v8i16:
6121         case MVT::v16i8:
6122         case MVT::v2f64:
6123         case MVT::v2i64:
6124         case MVT::v1i128:
6125         case MVT::f128:
6126           if (++NumVRsUsed <= NumVRs)
6127             continue;
6128           break;
6129         case MVT::v4f32:
6130           // When using QPX, this is handled like a FP register, otherwise, it
6131           // is an Altivec register.
6132           if (Subtarget.hasQPX()) {
6133             if (++NumFPRsUsed <= NumFPRs)
6134               continue;
6135           } else {
6136             if (++NumVRsUsed <= NumVRs)
6137               continue;
6138           }
6139           break;
6140         case MVT::f32:
6141         case MVT::f64:
6142         case MVT::v4f64: // QPX
6143         case MVT::v4i1:  // QPX
6144           if (++NumFPRsUsed <= NumFPRs)
6145             continue;
6146           break;
6147         }
6148         HasParameterArea = true;
6149       }
6150     }
6151 
6152     /* Respect alignment of argument on the stack.  */
6153     auto Alignement =
6154         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6155     NumBytes = alignTo(NumBytes, Alignement);
6156 
6157     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6158     if (Flags.isInConsecutiveRegsLast())
6159       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6160   }
6161 
6162   unsigned NumBytesActuallyUsed = NumBytes;
6163 
6164   // In the old ELFv1 ABI,
6165   // the prolog code of the callee may store up to 8 GPR argument registers to
6166   // the stack, allowing va_start to index over them in memory if its varargs.
6167   // Because we cannot tell if this is needed on the caller side, we have to
6168   // conservatively assume that it is needed.  As such, make sure we have at
6169   // least enough stack space for the caller to store the 8 GPRs.
6170   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6171   // really requires memory operands, e.g. a vararg function.
6172   if (HasParameterArea)
6173     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6174   else
6175     NumBytes = LinkageSize;
6176 
6177   // Tail call needs the stack to be aligned.
6178   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6179     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6180 
6181   int SPDiff = 0;
6182 
6183   // Calculate by how many bytes the stack has to be adjusted in case of tail
6184   // call optimization.
6185   if (!IsSibCall)
6186     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6187 
6188   // To protect arguments on the stack from being clobbered in a tail call,
6189   // force all the loads to happen before doing any other lowering.
6190   if (CFlags.IsTailCall)
6191     Chain = DAG.getStackArgumentTokenFactor(Chain);
6192 
6193   // Adjust the stack pointer for the new arguments...
6194   // These operations are automatically eliminated by the prolog/epilog pass
6195   if (!IsSibCall)
6196     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6197   SDValue CallSeqStart = Chain;
6198 
6199   // Load the return address and frame pointer so it can be move somewhere else
6200   // later.
6201   SDValue LROp, FPOp;
6202   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6203 
6204   // Set up a copy of the stack pointer for use loading and storing any
6205   // arguments that may not fit in the registers available for argument
6206   // passing.
6207   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6208 
6209   // Figure out which arguments are going to go in registers, and which in
6210   // memory.  Also, if this is a vararg function, floating point operations
6211   // must be stored to our stack, and loaded into integer regs as well, if
6212   // any integer regs are available for argument passing.
6213   unsigned ArgOffset = LinkageSize;
6214 
6215   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6216   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6217 
6218   SmallVector<SDValue, 8> MemOpChains;
6219   for (unsigned i = 0; i != NumOps; ++i) {
6220     SDValue Arg = OutVals[i];
6221     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6222     EVT ArgVT = Outs[i].VT;
6223     EVT OrigVT = Outs[i].ArgVT;
6224 
6225     // PtrOff will be used to store the current argument to the stack if a
6226     // register cannot be found for it.
6227     SDValue PtrOff;
6228 
6229     // We re-align the argument offset for each argument, except when using the
6230     // fast calling convention, when we need to make sure we do that only when
6231     // we'll actually use a stack slot.
6232     auto ComputePtrOff = [&]() {
6233       /* Respect alignment of argument on the stack.  */
6234       auto Alignment =
6235           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6236       ArgOffset = alignTo(ArgOffset, Alignment);
6237 
6238       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6239 
6240       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6241     };
6242 
6243     if (!IsFastCall) {
6244       ComputePtrOff();
6245 
6246       /* Compute GPR index associated with argument offset.  */
6247       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6248       GPR_idx = std::min(GPR_idx, NumGPRs);
6249     }
6250 
6251     // Promote integers to 64-bit values.
6252     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6253       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6254       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6255       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6256     }
6257 
6258     // FIXME memcpy is used way more than necessary.  Correctness first.
6259     // Note: "by value" is code for passing a structure by value, not
6260     // basic types.
6261     if (Flags.isByVal()) {
6262       // Note: Size includes alignment padding, so
6263       //   struct x { short a; char b; }
6264       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6265       // These are the proper values we need for right-justifying the
6266       // aggregate in a parameter register.
6267       unsigned Size = Flags.getByValSize();
6268 
6269       // An empty aggregate parameter takes up no storage and no
6270       // registers.
6271       if (Size == 0)
6272         continue;
6273 
6274       if (IsFastCall)
6275         ComputePtrOff();
6276 
6277       // All aggregates smaller than 8 bytes must be passed right-justified.
6278       if (Size==1 || Size==2 || Size==4) {
6279         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6280         if (GPR_idx != NumGPRs) {
6281           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6282                                         MachinePointerInfo(), VT);
6283           MemOpChains.push_back(Load.getValue(1));
6284           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6285 
6286           ArgOffset += PtrByteSize;
6287           continue;
6288         }
6289       }
6290 
6291       if (GPR_idx == NumGPRs && Size < 8) {
6292         SDValue AddPtr = PtrOff;
6293         if (!isLittleEndian) {
6294           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6295                                           PtrOff.getValueType());
6296           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6297         }
6298         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6299                                                           CallSeqStart,
6300                                                           Flags, DAG, dl);
6301         ArgOffset += PtrByteSize;
6302         continue;
6303       }
6304       // Copy entire object into memory.  There are cases where gcc-generated
6305       // code assumes it is there, even if it could be put entirely into
6306       // registers.  (This is not what the doc says.)
6307 
6308       // FIXME: The above statement is likely due to a misunderstanding of the
6309       // documents.  All arguments must be copied into the parameter area BY
6310       // THE CALLEE in the event that the callee takes the address of any
6311       // formal argument.  That has not yet been implemented.  However, it is
6312       // reasonable to use the stack area as a staging area for the register
6313       // load.
6314 
6315       // Skip this for small aggregates, as we will use the same slot for a
6316       // right-justified copy, below.
6317       if (Size >= 8)
6318         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6319                                                           CallSeqStart,
6320                                                           Flags, DAG, dl);
6321 
6322       // When a register is available, pass a small aggregate right-justified.
6323       if (Size < 8 && GPR_idx != NumGPRs) {
6324         // The easiest way to get this right-justified in a register
6325         // is to copy the structure into the rightmost portion of a
6326         // local variable slot, then load the whole slot into the
6327         // register.
6328         // FIXME: The memcpy seems to produce pretty awful code for
6329         // small aggregates, particularly for packed ones.
6330         // FIXME: It would be preferable to use the slot in the
6331         // parameter save area instead of a new local variable.
6332         SDValue AddPtr = PtrOff;
6333         if (!isLittleEndian) {
6334           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6335           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6336         }
6337         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6338                                                           CallSeqStart,
6339                                                           Flags, DAG, dl);
6340 
6341         // Load the slot into the register.
6342         SDValue Load =
6343             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6344         MemOpChains.push_back(Load.getValue(1));
6345         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6346 
6347         // Done with this argument.
6348         ArgOffset += PtrByteSize;
6349         continue;
6350       }
6351 
6352       // For aggregates larger than PtrByteSize, copy the pieces of the
6353       // object that fit into registers from the parameter save area.
6354       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6355         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6356         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6357         if (GPR_idx != NumGPRs) {
6358           SDValue Load =
6359               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6360           MemOpChains.push_back(Load.getValue(1));
6361           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6362           ArgOffset += PtrByteSize;
6363         } else {
6364           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6365           break;
6366         }
6367       }
6368       continue;
6369     }
6370 
6371     switch (Arg.getSimpleValueType().SimpleTy) {
6372     default: llvm_unreachable("Unexpected ValueType for argument!");
6373     case MVT::i1:
6374     case MVT::i32:
6375     case MVT::i64:
6376       if (Flags.isNest()) {
6377         // The 'nest' parameter, if any, is passed in R11.
6378         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6379         break;
6380       }
6381 
6382       // These can be scalar arguments or elements of an integer array type
6383       // passed directly.  Clang may use those instead of "byval" aggregate
6384       // types to avoid forcing arguments to memory unnecessarily.
6385       if (GPR_idx != NumGPRs) {
6386         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6387       } else {
6388         if (IsFastCall)
6389           ComputePtrOff();
6390 
6391         assert(HasParameterArea &&
6392                "Parameter area must exist to pass an argument in memory.");
6393         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6394                          true, CFlags.IsTailCall, false, MemOpChains,
6395                          TailCallArguments, dl);
6396         if (IsFastCall)
6397           ArgOffset += PtrByteSize;
6398       }
6399       if (!IsFastCall)
6400         ArgOffset += PtrByteSize;
6401       break;
6402     case MVT::f32:
6403     case MVT::f64: {
6404       // These can be scalar arguments or elements of a float array type
6405       // passed directly.  The latter are used to implement ELFv2 homogenous
6406       // float aggregates.
6407 
6408       // Named arguments go into FPRs first, and once they overflow, the
6409       // remaining arguments go into GPRs and then the parameter save area.
6410       // Unnamed arguments for vararg functions always go to GPRs and
6411       // then the parameter save area.  For now, put all arguments to vararg
6412       // routines always in both locations (FPR *and* GPR or stack slot).
6413       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6414       bool NeededLoad = false;
6415 
6416       // First load the argument into the next available FPR.
6417       if (FPR_idx != NumFPRs)
6418         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6419 
6420       // Next, load the argument into GPR or stack slot if needed.
6421       if (!NeedGPROrStack)
6422         ;
6423       else if (GPR_idx != NumGPRs && !IsFastCall) {
6424         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6425         // once we support fp <-> gpr moves.
6426 
6427         // In the non-vararg case, this can only ever happen in the
6428         // presence of f32 array types, since otherwise we never run
6429         // out of FPRs before running out of GPRs.
6430         SDValue ArgVal;
6431 
6432         // Double values are always passed in a single GPR.
6433         if (Arg.getValueType() != MVT::f32) {
6434           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6435 
6436         // Non-array float values are extended and passed in a GPR.
6437         } else if (!Flags.isInConsecutiveRegs()) {
6438           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6439           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6440 
6441         // If we have an array of floats, we collect every odd element
6442         // together with its predecessor into one GPR.
6443         } else if (ArgOffset % PtrByteSize != 0) {
6444           SDValue Lo, Hi;
6445           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6446           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6447           if (!isLittleEndian)
6448             std::swap(Lo, Hi);
6449           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6450 
6451         // The final element, if even, goes into the first half of a GPR.
6452         } else if (Flags.isInConsecutiveRegsLast()) {
6453           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6454           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6455           if (!isLittleEndian)
6456             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6457                                  DAG.getConstant(32, dl, MVT::i32));
6458 
6459         // Non-final even elements are skipped; they will be handled
6460         // together the with subsequent argument on the next go-around.
6461         } else
6462           ArgVal = SDValue();
6463 
6464         if (ArgVal.getNode())
6465           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6466       } else {
6467         if (IsFastCall)
6468           ComputePtrOff();
6469 
6470         // Single-precision floating-point values are mapped to the
6471         // second (rightmost) word of the stack doubleword.
6472         if (Arg.getValueType() == MVT::f32 &&
6473             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6474           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6475           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6476         }
6477 
6478         assert(HasParameterArea &&
6479                "Parameter area must exist to pass an argument in memory.");
6480         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6481                          true, CFlags.IsTailCall, false, MemOpChains,
6482                          TailCallArguments, dl);
6483 
6484         NeededLoad = true;
6485       }
6486       // When passing an array of floats, the array occupies consecutive
6487       // space in the argument area; only round up to the next doubleword
6488       // at the end of the array.  Otherwise, each float takes 8 bytes.
6489       if (!IsFastCall || NeededLoad) {
6490         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6491                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6492         if (Flags.isInConsecutiveRegsLast())
6493           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6494       }
6495       break;
6496     }
6497     case MVT::v4f32:
6498     case MVT::v4i32:
6499     case MVT::v8i16:
6500     case MVT::v16i8:
6501     case MVT::v2f64:
6502     case MVT::v2i64:
6503     case MVT::v1i128:
6504     case MVT::f128:
6505       if (!Subtarget.hasQPX()) {
6506       // These can be scalar arguments or elements of a vector array type
6507       // passed directly.  The latter are used to implement ELFv2 homogenous
6508       // vector aggregates.
6509 
6510       // For a varargs call, named arguments go into VRs or on the stack as
6511       // usual; unnamed arguments always go to the stack or the corresponding
6512       // GPRs when within range.  For now, we always put the value in both
6513       // locations (or even all three).
6514       if (CFlags.IsVarArg) {
6515         assert(HasParameterArea &&
6516                "Parameter area must exist if we have a varargs call.");
6517         // We could elide this store in the case where the object fits
6518         // entirely in R registers.  Maybe later.
6519         SDValue Store =
6520             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6521         MemOpChains.push_back(Store);
6522         if (VR_idx != NumVRs) {
6523           SDValue Load =
6524               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6525           MemOpChains.push_back(Load.getValue(1));
6526           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6527         }
6528         ArgOffset += 16;
6529         for (unsigned i=0; i<16; i+=PtrByteSize) {
6530           if (GPR_idx == NumGPRs)
6531             break;
6532           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6533                                    DAG.getConstant(i, dl, PtrVT));
6534           SDValue Load =
6535               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6536           MemOpChains.push_back(Load.getValue(1));
6537           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6538         }
6539         break;
6540       }
6541 
6542       // Non-varargs Altivec params go into VRs or on the stack.
6543       if (VR_idx != NumVRs) {
6544         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6545       } else {
6546         if (IsFastCall)
6547           ComputePtrOff();
6548 
6549         assert(HasParameterArea &&
6550                "Parameter area must exist to pass an argument in memory.");
6551         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6552                          true, CFlags.IsTailCall, true, MemOpChains,
6553                          TailCallArguments, dl);
6554         if (IsFastCall)
6555           ArgOffset += 16;
6556       }
6557 
6558       if (!IsFastCall)
6559         ArgOffset += 16;
6560       break;
6561       } // not QPX
6562 
6563       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6564              "Invalid QPX parameter type");
6565 
6566       LLVM_FALLTHROUGH;
6567     case MVT::v4f64:
6568     case MVT::v4i1: {
6569       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6570       if (CFlags.IsVarArg) {
6571         assert(HasParameterArea &&
6572                "Parameter area must exist if we have a varargs call.");
6573         // We could elide this store in the case where the object fits
6574         // entirely in R registers.  Maybe later.
6575         SDValue Store =
6576             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6577         MemOpChains.push_back(Store);
6578         if (QFPR_idx != NumQFPRs) {
6579           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6580                                      PtrOff, MachinePointerInfo());
6581           MemOpChains.push_back(Load.getValue(1));
6582           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6583         }
6584         ArgOffset += (IsF32 ? 16 : 32);
6585         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6586           if (GPR_idx == NumGPRs)
6587             break;
6588           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6589                                    DAG.getConstant(i, dl, PtrVT));
6590           SDValue Load =
6591               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6592           MemOpChains.push_back(Load.getValue(1));
6593           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6594         }
6595         break;
6596       }
6597 
6598       // Non-varargs QPX params go into registers or on the stack.
6599       if (QFPR_idx != NumQFPRs) {
6600         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6601       } else {
6602         if (IsFastCall)
6603           ComputePtrOff();
6604 
6605         assert(HasParameterArea &&
6606                "Parameter area must exist to pass an argument in memory.");
6607         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6608                          true, CFlags.IsTailCall, true, MemOpChains,
6609                          TailCallArguments, dl);
6610         if (IsFastCall)
6611           ArgOffset += (IsF32 ? 16 : 32);
6612       }
6613 
6614       if (!IsFastCall)
6615         ArgOffset += (IsF32 ? 16 : 32);
6616       break;
6617       }
6618     }
6619   }
6620 
6621   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6622          "mismatch in size of parameter area");
6623   (void)NumBytesActuallyUsed;
6624 
6625   if (!MemOpChains.empty())
6626     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6627 
6628   // Check if this is an indirect call (MTCTR/BCTRL).
6629   // See prepareDescriptorIndirectCall and buildCallOperands for more
6630   // information about calls through function pointers in the 64-bit SVR4 ABI.
6631   if (CFlags.IsIndirect) {
6632     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6633     // caller in the TOC save area.
6634     if (isTOCSaveRestoreRequired(Subtarget)) {
6635       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6636       // Load r2 into a virtual register and store it to the TOC save area.
6637       setUsesTOCBasePtr(DAG);
6638       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6639       // TOC save area offset.
6640       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6641       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6642       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6643       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6644                            MachinePointerInfo::getStack(
6645                                DAG.getMachineFunction(), TOCSaveOffset));
6646     }
6647     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6648     // This does not mean the MTCTR instruction must use R12; it's easier
6649     // to model this as an extra parameter, so do that.
6650     if (isELFv2ABI && !CFlags.IsPatchPoint)
6651       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6652   }
6653 
6654   // Build a sequence of copy-to-reg nodes chained together with token chain
6655   // and flag operands which copy the outgoing args into the appropriate regs.
6656   SDValue InFlag;
6657   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6658     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6659                              RegsToPass[i].second, InFlag);
6660     InFlag = Chain.getValue(1);
6661   }
6662 
6663   if (CFlags.IsTailCall && !IsSibCall)
6664     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6665                     TailCallArguments);
6666 
6667   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6668                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6669 }
6670 
6671 SDValue PPCTargetLowering::LowerCall_Darwin(
6672     SDValue Chain, SDValue Callee, CallFlags CFlags,
6673     const SmallVectorImpl<ISD::OutputArg> &Outs,
6674     const SmallVectorImpl<SDValue> &OutVals,
6675     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6676     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6677     const CallBase *CB) const {
6678   unsigned NumOps = Outs.size();
6679 
6680   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6681   bool isPPC64 = PtrVT == MVT::i64;
6682   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6683 
6684   MachineFunction &MF = DAG.getMachineFunction();
6685 
6686   // Mark this function as potentially containing a function that contains a
6687   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6688   // and restoring the callers stack pointer in this functions epilog. This is
6689   // done because by tail calling the called function might overwrite the value
6690   // in this function's (MF) stack pointer stack slot 0(SP).
6691   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6692       CFlags.CallConv == CallingConv::Fast)
6693     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6694 
6695   // Count how many bytes are to be pushed on the stack, including the linkage
6696   // area, and parameter passing area.  We start with 24/48 bytes, which is
6697   // prereserved space for [SP][CR][LR][3 x unused].
6698   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6699   unsigned NumBytes = LinkageSize;
6700 
6701   // Add up all the space actually used.
6702   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6703   // they all go in registers, but we must reserve stack space for them for
6704   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6705   // assigned stack space in order, with padding so Altivec parameters are
6706   // 16-byte aligned.
6707   unsigned nAltivecParamsAtEnd = 0;
6708   for (unsigned i = 0; i != NumOps; ++i) {
6709     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6710     EVT ArgVT = Outs[i].VT;
6711     // Varargs Altivec parameters are padded to a 16 byte boundary.
6712     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6713         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6714         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6715       if (!CFlags.IsVarArg && !isPPC64) {
6716         // Non-varargs Altivec parameters go after all the non-Altivec
6717         // parameters; handle those later so we know how much padding we need.
6718         nAltivecParamsAtEnd++;
6719         continue;
6720       }
6721       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6722       NumBytes = ((NumBytes+15)/16)*16;
6723     }
6724     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6725   }
6726 
6727   // Allow for Altivec parameters at the end, if needed.
6728   if (nAltivecParamsAtEnd) {
6729     NumBytes = ((NumBytes+15)/16)*16;
6730     NumBytes += 16*nAltivecParamsAtEnd;
6731   }
6732 
6733   // The prolog code of the callee may store up to 8 GPR argument registers to
6734   // the stack, allowing va_start to index over them in memory if its varargs.
6735   // Because we cannot tell if this is needed on the caller side, we have to
6736   // conservatively assume that it is needed.  As such, make sure we have at
6737   // least enough stack space for the caller to store the 8 GPRs.
6738   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6739 
6740   // Tail call needs the stack to be aligned.
6741   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6742       CFlags.CallConv == CallingConv::Fast)
6743     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6744 
6745   // Calculate by how many bytes the stack has to be adjusted in case of tail
6746   // call optimization.
6747   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6748 
6749   // To protect arguments on the stack from being clobbered in a tail call,
6750   // force all the loads to happen before doing any other lowering.
6751   if (CFlags.IsTailCall)
6752     Chain = DAG.getStackArgumentTokenFactor(Chain);
6753 
6754   // Adjust the stack pointer for the new arguments...
6755   // These operations are automatically eliminated by the prolog/epilog pass
6756   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6757   SDValue CallSeqStart = Chain;
6758 
6759   // Load the return address and frame pointer so it can be move somewhere else
6760   // later.
6761   SDValue LROp, FPOp;
6762   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6763 
6764   // Set up a copy of the stack pointer for use loading and storing any
6765   // arguments that may not fit in the registers available for argument
6766   // passing.
6767   SDValue StackPtr;
6768   if (isPPC64)
6769     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6770   else
6771     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6772 
6773   // Figure out which arguments are going to go in registers, and which in
6774   // memory.  Also, if this is a vararg function, floating point operations
6775   // must be stored to our stack, and loaded into integer regs as well, if
6776   // any integer regs are available for argument passing.
6777   unsigned ArgOffset = LinkageSize;
6778   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6779 
6780   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6781     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6782     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6783   };
6784   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6785     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6786     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6787   };
6788   static const MCPhysReg VR[] = {
6789     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6790     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6791   };
6792   const unsigned NumGPRs = array_lengthof(GPR_32);
6793   const unsigned NumFPRs = 13;
6794   const unsigned NumVRs  = array_lengthof(VR);
6795 
6796   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6797 
6798   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6799   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6800 
6801   SmallVector<SDValue, 8> MemOpChains;
6802   for (unsigned i = 0; i != NumOps; ++i) {
6803     SDValue Arg = OutVals[i];
6804     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6805 
6806     // PtrOff will be used to store the current argument to the stack if a
6807     // register cannot be found for it.
6808     SDValue PtrOff;
6809 
6810     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6811 
6812     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6813 
6814     // On PPC64, promote integers to 64-bit values.
6815     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6816       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6817       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6818       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6819     }
6820 
6821     // FIXME memcpy is used way more than necessary.  Correctness first.
6822     // Note: "by value" is code for passing a structure by value, not
6823     // basic types.
6824     if (Flags.isByVal()) {
6825       unsigned Size = Flags.getByValSize();
6826       // Very small objects are passed right-justified.  Everything else is
6827       // passed left-justified.
6828       if (Size==1 || Size==2) {
6829         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6830         if (GPR_idx != NumGPRs) {
6831           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6832                                         MachinePointerInfo(), VT);
6833           MemOpChains.push_back(Load.getValue(1));
6834           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6835 
6836           ArgOffset += PtrByteSize;
6837         } else {
6838           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6839                                           PtrOff.getValueType());
6840           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6841           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6842                                                             CallSeqStart,
6843                                                             Flags, DAG, dl);
6844           ArgOffset += PtrByteSize;
6845         }
6846         continue;
6847       }
6848       // Copy entire object into memory.  There are cases where gcc-generated
6849       // code assumes it is there, even if it could be put entirely into
6850       // registers.  (This is not what the doc says.)
6851       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6852                                                         CallSeqStart,
6853                                                         Flags, DAG, dl);
6854 
6855       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6856       // copy the pieces of the object that fit into registers from the
6857       // parameter save area.
6858       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6859         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6860         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6861         if (GPR_idx != NumGPRs) {
6862           SDValue Load =
6863               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6864           MemOpChains.push_back(Load.getValue(1));
6865           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6866           ArgOffset += PtrByteSize;
6867         } else {
6868           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6869           break;
6870         }
6871       }
6872       continue;
6873     }
6874 
6875     switch (Arg.getSimpleValueType().SimpleTy) {
6876     default: llvm_unreachable("Unexpected ValueType for argument!");
6877     case MVT::i1:
6878     case MVT::i32:
6879     case MVT::i64:
6880       if (GPR_idx != NumGPRs) {
6881         if (Arg.getValueType() == MVT::i1)
6882           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6883 
6884         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6885       } else {
6886         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6887                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6888                          TailCallArguments, dl);
6889       }
6890       ArgOffset += PtrByteSize;
6891       break;
6892     case MVT::f32:
6893     case MVT::f64:
6894       if (FPR_idx != NumFPRs) {
6895         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6896 
6897         if (CFlags.IsVarArg) {
6898           SDValue Store =
6899               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6900           MemOpChains.push_back(Store);
6901 
6902           // Float varargs are always shadowed in available integer registers
6903           if (GPR_idx != NumGPRs) {
6904             SDValue Load =
6905                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6906             MemOpChains.push_back(Load.getValue(1));
6907             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6908           }
6909           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6910             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6911             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6912             SDValue Load =
6913                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6914             MemOpChains.push_back(Load.getValue(1));
6915             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6916           }
6917         } else {
6918           // If we have any FPRs remaining, we may also have GPRs remaining.
6919           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6920           // GPRs.
6921           if (GPR_idx != NumGPRs)
6922             ++GPR_idx;
6923           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6924               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6925             ++GPR_idx;
6926         }
6927       } else
6928         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6929                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6930                          TailCallArguments, dl);
6931       if (isPPC64)
6932         ArgOffset += 8;
6933       else
6934         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6935       break;
6936     case MVT::v4f32:
6937     case MVT::v4i32:
6938     case MVT::v8i16:
6939     case MVT::v16i8:
6940       if (CFlags.IsVarArg) {
6941         // These go aligned on the stack, or in the corresponding R registers
6942         // when within range.  The Darwin PPC ABI doc claims they also go in
6943         // V registers; in fact gcc does this only for arguments that are
6944         // prototyped, not for those that match the ...  We do it for all
6945         // arguments, seems to work.
6946         while (ArgOffset % 16 !=0) {
6947           ArgOffset += PtrByteSize;
6948           if (GPR_idx != NumGPRs)
6949             GPR_idx++;
6950         }
6951         // We could elide this store in the case where the object fits
6952         // entirely in R registers.  Maybe later.
6953         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6954                              DAG.getConstant(ArgOffset, dl, PtrVT));
6955         SDValue Store =
6956             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6957         MemOpChains.push_back(Store);
6958         if (VR_idx != NumVRs) {
6959           SDValue Load =
6960               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6961           MemOpChains.push_back(Load.getValue(1));
6962           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6963         }
6964         ArgOffset += 16;
6965         for (unsigned i=0; i<16; i+=PtrByteSize) {
6966           if (GPR_idx == NumGPRs)
6967             break;
6968           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6969                                    DAG.getConstant(i, dl, PtrVT));
6970           SDValue Load =
6971               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6972           MemOpChains.push_back(Load.getValue(1));
6973           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6974         }
6975         break;
6976       }
6977 
6978       // Non-varargs Altivec params generally go in registers, but have
6979       // stack space allocated at the end.
6980       if (VR_idx != NumVRs) {
6981         // Doesn't have GPR space allocated.
6982         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6983       } else if (nAltivecParamsAtEnd==0) {
6984         // We are emitting Altivec params in order.
6985         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6986                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6987                          TailCallArguments, dl);
6988         ArgOffset += 16;
6989       }
6990       break;
6991     }
6992   }
6993   // If all Altivec parameters fit in registers, as they usually do,
6994   // they get stack space following the non-Altivec parameters.  We
6995   // don't track this here because nobody below needs it.
6996   // If there are more Altivec parameters than fit in registers emit
6997   // the stores here.
6998   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6999     unsigned j = 0;
7000     // Offset is aligned; skip 1st 12 params which go in V registers.
7001     ArgOffset = ((ArgOffset+15)/16)*16;
7002     ArgOffset += 12*16;
7003     for (unsigned i = 0; i != NumOps; ++i) {
7004       SDValue Arg = OutVals[i];
7005       EVT ArgType = Outs[i].VT;
7006       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
7007           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
7008         if (++j > NumVRs) {
7009           SDValue PtrOff;
7010           // We are emitting Altivec params in order.
7011           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
7012                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
7013                            TailCallArguments, dl);
7014           ArgOffset += 16;
7015         }
7016       }
7017     }
7018   }
7019 
7020   if (!MemOpChains.empty())
7021     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7022 
7023   // On Darwin, R12 must contain the address of an indirect callee.  This does
7024   // not mean the MTCTR instruction must use R12; it's easier to model this as
7025   // an extra parameter, so do that.
7026   if (CFlags.IsIndirect) {
7027     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7028     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
7029                                                    PPC::R12), Callee));
7030   }
7031 
7032   // Build a sequence of copy-to-reg nodes chained together with token chain
7033   // and flag operands which copy the outgoing args into the appropriate regs.
7034   SDValue InFlag;
7035   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
7036     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
7037                              RegsToPass[i].second, InFlag);
7038     InFlag = Chain.getValue(1);
7039   }
7040 
7041   if (CFlags.IsTailCall)
7042     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
7043                     TailCallArguments);
7044 
7045   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7046                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7047 }
7048 
7049 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
7050                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
7051                    CCState &State) {
7052 
7053   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
7054       State.getMachineFunction().getSubtarget());
7055   const bool IsPPC64 = Subtarget.isPPC64();
7056   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
7057   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
7058 
7059   assert((!ValVT.isInteger() ||
7060           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
7061          "Integer argument exceeds register size: should have been legalized");
7062 
7063   if (ValVT == MVT::f128)
7064     report_fatal_error("f128 is unimplemented on AIX.");
7065 
7066   if (ArgFlags.isNest())
7067     report_fatal_error("Nest arguments are unimplemented.");
7068 
7069   if (ValVT.isVector() || LocVT.isVector())
7070     report_fatal_error("Vector arguments are unimplemented on AIX.");
7071 
7072   static const MCPhysReg GPR_32[] = {// 32-bit registers.
7073                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7074                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7075   static const MCPhysReg GPR_64[] = {// 64-bit registers.
7076                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7077                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7078 
7079   if (ArgFlags.isByVal()) {
7080     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
7081       report_fatal_error("Pass-by-value arguments with alignment greater than "
7082                          "register width are not supported.");
7083 
7084     const unsigned ByValSize = ArgFlags.getByValSize();
7085 
7086     // An empty aggregate parameter takes up no storage and no registers,
7087     // but needs a MemLoc for a stack slot for the formal arguments side.
7088     if (ByValSize == 0) {
7089       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7090                                        State.getNextStackOffset(), RegVT,
7091                                        LocInfo));
7092       return false;
7093     }
7094 
7095     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
7096     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
7097     for (const unsigned E = Offset + StackSize; Offset < E;
7098          Offset += PtrAlign.value()) {
7099       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7100         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7101       else {
7102         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7103                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
7104                                          LocInfo));
7105         break;
7106       }
7107     }
7108     return false;
7109   }
7110 
7111   // Arguments always reserve parameter save area.
7112   switch (ValVT.SimpleTy) {
7113   default:
7114     report_fatal_error("Unhandled value type for argument.");
7115   case MVT::i64:
7116     // i64 arguments should have been split to i32 for PPC32.
7117     assert(IsPPC64 && "PPC32 should have split i64 values.");
7118     LLVM_FALLTHROUGH;
7119   case MVT::i1:
7120   case MVT::i32: {
7121     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
7122     // AIX integer arguments are always passed in register width.
7123     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
7124       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
7125                                   : CCValAssign::LocInfo::ZExt;
7126     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7127       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7128     else
7129       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
7130 
7131     return false;
7132   }
7133   case MVT::f32:
7134   case MVT::f64: {
7135     // Parameter save area (PSA) is reserved even if the float passes in fpr.
7136     const unsigned StoreSize = LocVT.getStoreSize();
7137     // Floats are always 4-byte aligned in the PSA on AIX.
7138     // This includes f64 in 64-bit mode for ABI compatibility.
7139     const unsigned Offset =
7140         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
7141     unsigned FReg = State.AllocateReg(FPR);
7142     if (FReg)
7143       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
7144 
7145     // Reserve and initialize GPRs or initialize the PSA as required.
7146     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
7147       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
7148         assert(FReg && "An FPR should be available when a GPR is reserved.");
7149         if (State.isVarArg()) {
7150           // Successfully reserved GPRs are only initialized for vararg calls.
7151           // Custom handling is required for:
7152           //   f64 in PPC32 needs to be split into 2 GPRs.
7153           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
7154           State.addLoc(
7155               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7156         }
7157       } else {
7158         // If there are insufficient GPRs, the PSA needs to be initialized.
7159         // Initialization occurs even if an FPR was initialized for
7160         // compatibility with the AIX XL compiler. The full memory for the
7161         // argument will be initialized even if a prior word is saved in GPR.
7162         // A custom memLoc is used when the argument also passes in FPR so
7163         // that the callee handling can skip over it easily.
7164         State.addLoc(
7165             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
7166                                              LocInfo)
7167                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7168         break;
7169       }
7170     }
7171 
7172     return false;
7173   }
7174   }
7175   return true;
7176 }
7177 
7178 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7179                                                     bool IsPPC64) {
7180   assert((IsPPC64 || SVT != MVT::i64) &&
7181          "i64 should have been split for 32-bit codegen.");
7182 
7183   switch (SVT) {
7184   default:
7185     report_fatal_error("Unexpected value type for formal argument");
7186   case MVT::i1:
7187   case MVT::i32:
7188   case MVT::i64:
7189     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7190   case MVT::f32:
7191     return &PPC::F4RCRegClass;
7192   case MVT::f64:
7193     return &PPC::F8RCRegClass;
7194   }
7195 }
7196 
7197 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7198                                         SelectionDAG &DAG, SDValue ArgValue,
7199                                         MVT LocVT, const SDLoc &dl) {
7200   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7201   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7202 
7203   if (Flags.isSExt())
7204     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7205                            DAG.getValueType(ValVT));
7206   else if (Flags.isZExt())
7207     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7208                            DAG.getValueType(ValVT));
7209 
7210   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7211 }
7212 
7213 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7214   const unsigned LASize = FL->getLinkageSize();
7215 
7216   if (PPC::GPRCRegClass.contains(Reg)) {
7217     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7218            "Reg must be a valid argument register!");
7219     return LASize + 4 * (Reg - PPC::R3);
7220   }
7221 
7222   if (PPC::G8RCRegClass.contains(Reg)) {
7223     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7224            "Reg must be a valid argument register!");
7225     return LASize + 8 * (Reg - PPC::X3);
7226   }
7227 
7228   llvm_unreachable("Only general purpose registers expected.");
7229 }
7230 
7231 //   AIX ABI Stack Frame Layout:
7232 //
7233 //   Low Memory +--------------------------------------------+
7234 //   SP   +---> | Back chain                                 | ---+
7235 //        |     +--------------------------------------------+    |
7236 //        |     | Saved Condition Register                   |    |
7237 //        |     +--------------------------------------------+    |
7238 //        |     | Saved Linkage Register                     |    |
7239 //        |     +--------------------------------------------+    | Linkage Area
7240 //        |     | Reserved for compilers                     |    |
7241 //        |     +--------------------------------------------+    |
7242 //        |     | Reserved for binders                       |    |
7243 //        |     +--------------------------------------------+    |
7244 //        |     | Saved TOC pointer                          | ---+
7245 //        |     +--------------------------------------------+
7246 //        |     | Parameter save area                        |
7247 //        |     +--------------------------------------------+
7248 //        |     | Alloca space                               |
7249 //        |     +--------------------------------------------+
7250 //        |     | Local variable space                       |
7251 //        |     +--------------------------------------------+
7252 //        |     | Float/int conversion temporary             |
7253 //        |     +--------------------------------------------+
7254 //        |     | Save area for AltiVec registers            |
7255 //        |     +--------------------------------------------+
7256 //        |     | AltiVec alignment padding                  |
7257 //        |     +--------------------------------------------+
7258 //        |     | Save area for VRSAVE register              |
7259 //        |     +--------------------------------------------+
7260 //        |     | Save area for General Purpose registers    |
7261 //        |     +--------------------------------------------+
7262 //        |     | Save area for Floating Point registers     |
7263 //        |     +--------------------------------------------+
7264 //        +---- | Back chain                                 |
7265 // High Memory  +--------------------------------------------+
7266 //
7267 //  Specifications:
7268 //  AIX 7.2 Assembler Language Reference
7269 //  Subroutine linkage convention
7270 
7271 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7272     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7273     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7274     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7275 
7276   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7277           CallConv == CallingConv::Fast) &&
7278          "Unexpected calling convention!");
7279 
7280   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7281     report_fatal_error("Tail call support is unimplemented on AIX.");
7282 
7283   if (useSoftFloat())
7284     report_fatal_error("Soft float support is unimplemented on AIX.");
7285 
7286   const PPCSubtarget &Subtarget =
7287       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7288   if (Subtarget.hasQPX())
7289     report_fatal_error("QPX support is not supported on AIX.");
7290 
7291   const bool IsPPC64 = Subtarget.isPPC64();
7292   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7293 
7294   // Assign locations to all of the incoming arguments.
7295   SmallVector<CCValAssign, 16> ArgLocs;
7296   MachineFunction &MF = DAG.getMachineFunction();
7297   MachineFrameInfo &MFI = MF.getFrameInfo();
7298   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7299 
7300   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7301   // Reserve space for the linkage area on the stack.
7302   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7303   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7304   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7305 
7306   SmallVector<SDValue, 8> MemOps;
7307 
7308   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7309     CCValAssign &VA = ArgLocs[I++];
7310     MVT LocVT = VA.getLocVT();
7311     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7312 
7313     // For compatibility with the AIX XL compiler, the float args in the
7314     // parameter save area are initialized even if the argument is available
7315     // in register.  The caller is required to initialize both the register
7316     // and memory, however, the callee can choose to expect it in either.
7317     // The memloc is dismissed here because the argument is retrieved from
7318     // the register.
7319     if (VA.isMemLoc() && VA.needsCustom())
7320       continue;
7321 
7322     if (Flags.isByVal() && VA.isMemLoc()) {
7323       const unsigned Size =
7324           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7325                   PtrByteSize);
7326       const int FI = MF.getFrameInfo().CreateFixedObject(
7327           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7328           /* IsAliased */ true);
7329       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7330       InVals.push_back(FIN);
7331 
7332       continue;
7333     }
7334 
7335     if (Flags.isByVal()) {
7336       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7337 
7338       const MCPhysReg ArgReg = VA.getLocReg();
7339       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7340 
7341       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7342         report_fatal_error("Over aligned byvals not supported yet.");
7343 
7344       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7345       const int FI = MF.getFrameInfo().CreateFixedObject(
7346           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7347           /* IsAliased */ true);
7348       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7349       InVals.push_back(FIN);
7350 
7351       // Add live ins for all the RegLocs for the same ByVal.
7352       const TargetRegisterClass *RegClass =
7353           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7354 
7355       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7356                                                unsigned Offset) {
7357         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7358         // Since the callers side has left justified the aggregate in the
7359         // register, we can simply store the entire register into the stack
7360         // slot.
7361         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7362         // The store to the fixedstack object is needed becuase accessing a
7363         // field of the ByVal will use a gep and load. Ideally we will optimize
7364         // to extracting the value from the register directly, and elide the
7365         // stores when the arguments address is not taken, but that will need to
7366         // be future work.
7367         SDValue Store =
7368             DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom,
7369                          DAG.getObjectPtrOffset(dl, FIN, Offset),
7370                          MachinePointerInfo::getFixedStack(MF, FI, Offset));
7371 
7372         MemOps.push_back(Store);
7373       };
7374 
7375       unsigned Offset = 0;
7376       HandleRegLoc(VA.getLocReg(), Offset);
7377       Offset += PtrByteSize;
7378       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7379            Offset += PtrByteSize) {
7380         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7381                "RegLocs should be for ByVal argument.");
7382 
7383         const CCValAssign RL = ArgLocs[I++];
7384         HandleRegLoc(RL.getLocReg(), Offset);
7385       }
7386 
7387       if (Offset != StackSize) {
7388         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7389                "Expected MemLoc for remaining bytes.");
7390         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7391         // Consume the MemLoc.The InVal has already been emitted, so nothing
7392         // more needs to be done.
7393         ++I;
7394       }
7395 
7396       continue;
7397     }
7398 
7399     EVT ValVT = VA.getValVT();
7400     if (VA.isRegLoc() && !VA.needsCustom()) {
7401       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7402       unsigned VReg =
7403           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7404       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7405       if (ValVT.isScalarInteger() &&
7406           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7407         ArgValue =
7408             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7409       }
7410       InVals.push_back(ArgValue);
7411       continue;
7412     }
7413     if (VA.isMemLoc()) {
7414       const unsigned LocSize = LocVT.getStoreSize();
7415       const unsigned ValSize = ValVT.getStoreSize();
7416       assert((ValSize <= LocSize) &&
7417              "Object size is larger than size of MemLoc");
7418       int CurArgOffset = VA.getLocMemOffset();
7419       // Objects are right-justified because AIX is big-endian.
7420       if (LocSize > ValSize)
7421         CurArgOffset += LocSize - ValSize;
7422       // Potential tail calls could cause overwriting of argument stack slots.
7423       const bool IsImmutable =
7424           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7425             (CallConv == CallingConv::Fast));
7426       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7427       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7428       SDValue ArgValue =
7429           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7430       InVals.push_back(ArgValue);
7431       continue;
7432     }
7433   }
7434 
7435   // On AIX a minimum of 8 words is saved to the parameter save area.
7436   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7437   // Area that is at least reserved in the caller of this function.
7438   unsigned CallerReservedArea =
7439       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7440 
7441   // Set the size that is at least reserved in caller of this function. Tail
7442   // call optimized function's reserved stack space needs to be aligned so
7443   // that taking the difference between two stack areas will result in an
7444   // aligned stack.
7445   CallerReservedArea =
7446       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7447   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7448   FuncInfo->setMinReservedArea(CallerReservedArea);
7449 
7450   if (isVarArg) {
7451     FuncInfo->setVarArgsFrameIndex(
7452         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7453     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7454 
7455     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7456                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7457 
7458     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7459                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7460     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7461 
7462     // The fixed integer arguments of a variadic function are stored to the
7463     // VarArgsFrameIndex on the stack so that they may be loaded by
7464     // dereferencing the result of va_next.
7465     for (unsigned GPRIndex =
7466              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7467          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7468 
7469       const unsigned VReg =
7470           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7471                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7472 
7473       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7474       SDValue Store =
7475           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7476       MemOps.push_back(Store);
7477       // Increment the address for the next argument to store.
7478       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7479       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7480     }
7481   }
7482 
7483   if (!MemOps.empty())
7484     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7485 
7486   return Chain;
7487 }
7488 
7489 SDValue PPCTargetLowering::LowerCall_AIX(
7490     SDValue Chain, SDValue Callee, CallFlags CFlags,
7491     const SmallVectorImpl<ISD::OutputArg> &Outs,
7492     const SmallVectorImpl<SDValue> &OutVals,
7493     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7494     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7495     const CallBase *CB) const {
7496   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7497   // AIX ABI stack frame layout.
7498 
7499   assert((CFlags.CallConv == CallingConv::C ||
7500           CFlags.CallConv == CallingConv::Cold ||
7501           CFlags.CallConv == CallingConv::Fast) &&
7502          "Unexpected calling convention!");
7503 
7504   if (CFlags.IsPatchPoint)
7505     report_fatal_error("This call type is unimplemented on AIX.");
7506 
7507   const PPCSubtarget& Subtarget =
7508       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7509   if (Subtarget.hasQPX())
7510     report_fatal_error("QPX is not supported on AIX.");
7511   if (Subtarget.hasAltivec())
7512     report_fatal_error("Altivec support is unimplemented on AIX.");
7513 
7514   MachineFunction &MF = DAG.getMachineFunction();
7515   SmallVector<CCValAssign, 16> ArgLocs;
7516   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7517                  *DAG.getContext());
7518 
7519   // Reserve space for the linkage save area (LSA) on the stack.
7520   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7521   //   [SP][CR][LR][2 x reserved][TOC].
7522   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7523   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7524   const bool IsPPC64 = Subtarget.isPPC64();
7525   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7526   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7527   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7528   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7529 
7530   // The prolog code of the callee may store up to 8 GPR argument registers to
7531   // the stack, allowing va_start to index over them in memory if the callee
7532   // is variadic.
7533   // Because we cannot tell if this is needed on the caller side, we have to
7534   // conservatively assume that it is needed.  As such, make sure we have at
7535   // least enough stack space for the caller to store the 8 GPRs.
7536   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7537   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7538                                      CCInfo.getNextStackOffset());
7539 
7540   // Adjust the stack pointer for the new arguments...
7541   // These operations are automatically eliminated by the prolog/epilog pass.
7542   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7543   SDValue CallSeqStart = Chain;
7544 
7545   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7546   SmallVector<SDValue, 8> MemOpChains;
7547 
7548   // Set up a copy of the stack pointer for loading and storing any
7549   // arguments that may not fit in the registers available for argument
7550   // passing.
7551   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7552                                    : DAG.getRegister(PPC::R1, MVT::i32);
7553 
7554   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7555     const unsigned ValNo = ArgLocs[I].getValNo();
7556     SDValue Arg = OutVals[ValNo];
7557     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7558 
7559     if (Flags.isByVal()) {
7560       const unsigned ByValSize = Flags.getByValSize();
7561 
7562       // Nothing to do for zero-sized ByVals on the caller side.
7563       if (!ByValSize) {
7564         ++I;
7565         continue;
7566       }
7567 
7568       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7569         return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7570                               (LoadOffset != 0)
7571                                   ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7572                                   : Arg,
7573                               MachinePointerInfo(), VT);
7574       };
7575 
7576       unsigned LoadOffset = 0;
7577 
7578       // Initialize registers, which are fully occupied by the by-val argument.
7579       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7580         SDValue Load = GetLoad(PtrVT, LoadOffset);
7581         MemOpChains.push_back(Load.getValue(1));
7582         LoadOffset += PtrByteSize;
7583         const CCValAssign &ByValVA = ArgLocs[I++];
7584         assert(ByValVA.getValNo() == ValNo &&
7585                "Unexpected location for pass-by-value argument.");
7586         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7587       }
7588 
7589       if (LoadOffset == ByValSize)
7590         continue;
7591 
7592       // There must be one more loc to handle the remainder.
7593       assert(ArgLocs[I].getValNo() == ValNo &&
7594              "Expected additional location for by-value argument.");
7595 
7596       if (ArgLocs[I].isMemLoc()) {
7597         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7598         const CCValAssign &ByValVA = ArgLocs[I++];
7599         ISD::ArgFlagsTy MemcpyFlags = Flags;
7600         // Only memcpy the bytes that don't pass in register.
7601         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7602         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7603             (LoadOffset != 0) ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7604                               : Arg,
7605             DAG.getObjectPtrOffset(dl, StackPtr, ByValVA.getLocMemOffset()),
7606             CallSeqStart, MemcpyFlags, DAG, dl);
7607         continue;
7608       }
7609 
7610       // Initialize the final register residue.
7611       // Any residue that occupies the final by-val arg register must be
7612       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7613       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7614       // 2 and 1 byte loads.
7615       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7616       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7617              "Unexpected register residue for by-value argument.");
7618       SDValue ResidueVal;
7619       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7620         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7621         const MVT VT =
7622             N == 1 ? MVT::i8
7623                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7624         SDValue Load = GetLoad(VT, LoadOffset);
7625         MemOpChains.push_back(Load.getValue(1));
7626         LoadOffset += N;
7627         Bytes += N;
7628 
7629         // By-val arguments are passed left-justfied in register.
7630         // Every load here needs to be shifted, otherwise a full register load
7631         // should have been used.
7632         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7633                "Unexpected load emitted during handling of pass-by-value "
7634                "argument.");
7635         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7636         EVT ShiftAmountTy =
7637             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7638         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7639         SDValue ShiftedLoad =
7640             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7641         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7642                                               ShiftedLoad)
7643                                 : ShiftedLoad;
7644       }
7645 
7646       const CCValAssign &ByValVA = ArgLocs[I++];
7647       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7648       continue;
7649     }
7650 
7651     CCValAssign &VA = ArgLocs[I++];
7652     const MVT LocVT = VA.getLocVT();
7653     const MVT ValVT = VA.getValVT();
7654 
7655     switch (VA.getLocInfo()) {
7656     default:
7657       report_fatal_error("Unexpected argument extension type.");
7658     case CCValAssign::Full:
7659       break;
7660     case CCValAssign::ZExt:
7661       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7662       break;
7663     case CCValAssign::SExt:
7664       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7665       break;
7666     }
7667 
7668     if (VA.isRegLoc() && !VA.needsCustom()) {
7669       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7670       continue;
7671     }
7672 
7673     if (VA.isMemLoc()) {
7674       SDValue PtrOff =
7675           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7676       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7677       MemOpChains.push_back(
7678           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7679 
7680       continue;
7681     }
7682 
7683     // Custom handling is used for GPR initializations for vararg float
7684     // arguments.
7685     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7686            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7687            "Unexpected register handling for calling convention.");
7688 
7689     SDValue ArgAsInt =
7690         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7691 
7692     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7693       // f32 in 32-bit GPR
7694       // f64 in 64-bit GPR
7695       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7696     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7697       // f32 in 64-bit GPR.
7698       RegsToPass.push_back(std::make_pair(
7699           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7700     else {
7701       // f64 in two 32-bit GPRs
7702       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7703       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7704              "Unexpected custom register for argument!");
7705       CCValAssign &GPR1 = VA;
7706       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7707                                      DAG.getConstant(32, dl, MVT::i8));
7708       RegsToPass.push_back(std::make_pair(
7709           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7710 
7711       if (I != E) {
7712         // If only 1 GPR was available, there will only be one custom GPR and
7713         // the argument will also pass in memory.
7714         CCValAssign &PeekArg = ArgLocs[I];
7715         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7716           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7717           CCValAssign &GPR2 = ArgLocs[I++];
7718           RegsToPass.push_back(std::make_pair(
7719               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7720         }
7721       }
7722     }
7723   }
7724 
7725   if (!MemOpChains.empty())
7726     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7727 
7728   // For indirect calls, we need to save the TOC base to the stack for
7729   // restoration after the call.
7730   if (CFlags.IsIndirect) {
7731     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7732     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7733     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7734     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7735     const unsigned TOCSaveOffset =
7736         Subtarget.getFrameLowering()->getTOCSaveOffset();
7737 
7738     setUsesTOCBasePtr(DAG);
7739     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7740     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7741     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7742     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7743     Chain = DAG.getStore(
7744         Val.getValue(1), dl, Val, AddPtr,
7745         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7746   }
7747 
7748   // Build a sequence of copy-to-reg nodes chained together with token chain
7749   // and flag operands which copy the outgoing args into the appropriate regs.
7750   SDValue InFlag;
7751   for (auto Reg : RegsToPass) {
7752     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7753     InFlag = Chain.getValue(1);
7754   }
7755 
7756   const int SPDiff = 0;
7757   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7758                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7759 }
7760 
7761 bool
7762 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7763                                   MachineFunction &MF, bool isVarArg,
7764                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7765                                   LLVMContext &Context) const {
7766   SmallVector<CCValAssign, 16> RVLocs;
7767   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7768   return CCInfo.CheckReturn(
7769       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7770                 ? RetCC_PPC_Cold
7771                 : RetCC_PPC);
7772 }
7773 
7774 SDValue
7775 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7776                                bool isVarArg,
7777                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7778                                const SmallVectorImpl<SDValue> &OutVals,
7779                                const SDLoc &dl, SelectionDAG &DAG) const {
7780   SmallVector<CCValAssign, 16> RVLocs;
7781   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7782                  *DAG.getContext());
7783   CCInfo.AnalyzeReturn(Outs,
7784                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7785                            ? RetCC_PPC_Cold
7786                            : RetCC_PPC);
7787 
7788   SDValue Flag;
7789   SmallVector<SDValue, 4> RetOps(1, Chain);
7790 
7791   // Copy the result values into the output registers.
7792   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7793     CCValAssign &VA = RVLocs[i];
7794     assert(VA.isRegLoc() && "Can only return in registers!");
7795 
7796     SDValue Arg = OutVals[RealResIdx];
7797 
7798     switch (VA.getLocInfo()) {
7799     default: llvm_unreachable("Unknown loc info!");
7800     case CCValAssign::Full: break;
7801     case CCValAssign::AExt:
7802       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7803       break;
7804     case CCValAssign::ZExt:
7805       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7806       break;
7807     case CCValAssign::SExt:
7808       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7809       break;
7810     }
7811     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7812       bool isLittleEndian = Subtarget.isLittleEndian();
7813       // Legalize ret f64 -> ret 2 x i32.
7814       SDValue SVal =
7815           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7816                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7817       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7818       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7819       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7820                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7821       Flag = Chain.getValue(1);
7822       VA = RVLocs[++i]; // skip ahead to next loc
7823       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7824     } else
7825       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7826     Flag = Chain.getValue(1);
7827     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7828   }
7829 
7830   RetOps[0] = Chain;  // Update chain.
7831 
7832   // Add the flag if we have it.
7833   if (Flag.getNode())
7834     RetOps.push_back(Flag);
7835 
7836   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7837 }
7838 
7839 SDValue
7840 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7841                                                 SelectionDAG &DAG) const {
7842   SDLoc dl(Op);
7843 
7844   // Get the correct type for integers.
7845   EVT IntVT = Op.getValueType();
7846 
7847   // Get the inputs.
7848   SDValue Chain = Op.getOperand(0);
7849   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7850   // Build a DYNAREAOFFSET node.
7851   SDValue Ops[2] = {Chain, FPSIdx};
7852   SDVTList VTs = DAG.getVTList(IntVT);
7853   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7854 }
7855 
7856 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7857                                              SelectionDAG &DAG) const {
7858   // When we pop the dynamic allocation we need to restore the SP link.
7859   SDLoc dl(Op);
7860 
7861   // Get the correct type for pointers.
7862   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7863 
7864   // Construct the stack pointer operand.
7865   bool isPPC64 = Subtarget.isPPC64();
7866   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7867   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7868 
7869   // Get the operands for the STACKRESTORE.
7870   SDValue Chain = Op.getOperand(0);
7871   SDValue SaveSP = Op.getOperand(1);
7872 
7873   // Load the old link SP.
7874   SDValue LoadLinkSP =
7875       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7876 
7877   // Restore the stack pointer.
7878   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7879 
7880   // Store the old link SP.
7881   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7882 }
7883 
7884 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7885   MachineFunction &MF = DAG.getMachineFunction();
7886   bool isPPC64 = Subtarget.isPPC64();
7887   EVT PtrVT = getPointerTy(MF.getDataLayout());
7888 
7889   // Get current frame pointer save index.  The users of this index will be
7890   // primarily DYNALLOC instructions.
7891   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7892   int RASI = FI->getReturnAddrSaveIndex();
7893 
7894   // If the frame pointer save index hasn't been defined yet.
7895   if (!RASI) {
7896     // Find out what the fix offset of the frame pointer save area.
7897     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7898     // Allocate the frame index for frame pointer save area.
7899     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7900     // Save the result.
7901     FI->setReturnAddrSaveIndex(RASI);
7902   }
7903   return DAG.getFrameIndex(RASI, PtrVT);
7904 }
7905 
7906 SDValue
7907 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7908   MachineFunction &MF = DAG.getMachineFunction();
7909   bool isPPC64 = Subtarget.isPPC64();
7910   EVT PtrVT = getPointerTy(MF.getDataLayout());
7911 
7912   // Get current frame pointer save index.  The users of this index will be
7913   // primarily DYNALLOC instructions.
7914   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7915   int FPSI = FI->getFramePointerSaveIndex();
7916 
7917   // If the frame pointer save index hasn't been defined yet.
7918   if (!FPSI) {
7919     // Find out what the fix offset of the frame pointer save area.
7920     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7921     // Allocate the frame index for frame pointer save area.
7922     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7923     // Save the result.
7924     FI->setFramePointerSaveIndex(FPSI);
7925   }
7926   return DAG.getFrameIndex(FPSI, PtrVT);
7927 }
7928 
7929 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7930                                                    SelectionDAG &DAG) const {
7931   MachineFunction &MF = DAG.getMachineFunction();
7932   // Get the inputs.
7933   SDValue Chain = Op.getOperand(0);
7934   SDValue Size  = Op.getOperand(1);
7935   SDLoc dl(Op);
7936 
7937   // Get the correct type for pointers.
7938   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7939   // Negate the size.
7940   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7941                                 DAG.getConstant(0, dl, PtrVT), Size);
7942   // Construct a node for the frame pointer save index.
7943   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7944   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7945   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7946   if (hasInlineStackProbe(MF))
7947     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7948   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7949 }
7950 
7951 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7952                                                      SelectionDAG &DAG) const {
7953   MachineFunction &MF = DAG.getMachineFunction();
7954 
7955   bool isPPC64 = Subtarget.isPPC64();
7956   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7957 
7958   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7959   return DAG.getFrameIndex(FI, PtrVT);
7960 }
7961 
7962 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7963                                                SelectionDAG &DAG) const {
7964   SDLoc DL(Op);
7965   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7966                      DAG.getVTList(MVT::i32, MVT::Other),
7967                      Op.getOperand(0), Op.getOperand(1));
7968 }
7969 
7970 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7971                                                 SelectionDAG &DAG) const {
7972   SDLoc DL(Op);
7973   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7974                      Op.getOperand(0), Op.getOperand(1));
7975 }
7976 
7977 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7978   if (Op.getValueType().isVector())
7979     return LowerVectorLoad(Op, DAG);
7980 
7981   assert(Op.getValueType() == MVT::i1 &&
7982          "Custom lowering only for i1 loads");
7983 
7984   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7985 
7986   SDLoc dl(Op);
7987   LoadSDNode *LD = cast<LoadSDNode>(Op);
7988 
7989   SDValue Chain = LD->getChain();
7990   SDValue BasePtr = LD->getBasePtr();
7991   MachineMemOperand *MMO = LD->getMemOperand();
7992 
7993   SDValue NewLD =
7994       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7995                      BasePtr, MVT::i8, MMO);
7996   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7997 
7998   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7999   return DAG.getMergeValues(Ops, dl);
8000 }
8001 
8002 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
8003   if (Op.getOperand(1).getValueType().isVector())
8004     return LowerVectorStore(Op, DAG);
8005 
8006   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
8007          "Custom lowering only for i1 stores");
8008 
8009   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
8010 
8011   SDLoc dl(Op);
8012   StoreSDNode *ST = cast<StoreSDNode>(Op);
8013 
8014   SDValue Chain = ST->getChain();
8015   SDValue BasePtr = ST->getBasePtr();
8016   SDValue Value = ST->getValue();
8017   MachineMemOperand *MMO = ST->getMemOperand();
8018 
8019   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
8020                       Value);
8021   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
8022 }
8023 
8024 // FIXME: Remove this once the ANDI glue bug is fixed:
8025 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
8026   assert(Op.getValueType() == MVT::i1 &&
8027          "Custom lowering only for i1 results");
8028 
8029   SDLoc DL(Op);
8030   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
8031 }
8032 
8033 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
8034                                                SelectionDAG &DAG) const {
8035 
8036   // Implements a vector truncate that fits in a vector register as a shuffle.
8037   // We want to legalize vector truncates down to where the source fits in
8038   // a vector register (and target is therefore smaller than vector register
8039   // size).  At that point legalization will try to custom lower the sub-legal
8040   // result and get here - where we can contain the truncate as a single target
8041   // operation.
8042 
8043   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
8044   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
8045   //
8046   // We will implement it for big-endian ordering as this (where x denotes
8047   // undefined):
8048   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
8049   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
8050   //
8051   // The same operation in little-endian ordering will be:
8052   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
8053   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8054 
8055   assert(Op.getValueType().isVector() && "Vector type expected.");
8056 
8057   SDLoc DL(Op);
8058   SDValue N1 = Op.getOperand(0);
8059   unsigned SrcSize = N1.getValueType().getSizeInBits();
8060   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
8061   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8062 
8063   EVT TrgVT = Op.getValueType();
8064   unsigned TrgNumElts = TrgVT.getVectorNumElements();
8065   EVT EltVT = TrgVT.getVectorElementType();
8066   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8067   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8068 
8069   // First list the elements we want to keep.
8070   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8071   SmallVector<int, 16> ShuffV;
8072   if (Subtarget.isLittleEndian())
8073     for (unsigned i = 0; i < TrgNumElts; ++i)
8074       ShuffV.push_back(i * SizeMult);
8075   else
8076     for (unsigned i = 1; i <= TrgNumElts; ++i)
8077       ShuffV.push_back(i * SizeMult - 1);
8078 
8079   // Populate the remaining elements with undefs.
8080   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8081     // ShuffV.push_back(i + WideNumElts);
8082     ShuffV.push_back(WideNumElts + 1);
8083 
8084   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
8085   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
8086 }
8087 
8088 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8089 /// possible.
8090 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8091   // Not FP? Not a fsel.
8092   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
8093       !Op.getOperand(2).getValueType().isFloatingPoint())
8094     return Op;
8095 
8096   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8097 
8098   EVT ResVT = Op.getValueType();
8099   EVT CmpVT = Op.getOperand(0).getValueType();
8100   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8101   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
8102   SDLoc dl(Op);
8103   SDNodeFlags Flags = Op.getNode()->getFlags();
8104 
8105   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
8106   // presence of infinities.
8107   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8108     switch (CC) {
8109     default:
8110       break;
8111     case ISD::SETOGT:
8112     case ISD::SETGT:
8113       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
8114     case ISD::SETOLT:
8115     case ISD::SETLT:
8116       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
8117     }
8118   }
8119 
8120   // We might be able to do better than this under some circumstances, but in
8121   // general, fsel-based lowering of select is a finite-math-only optimization.
8122   // For more information, see section F.3 of the 2.06 ISA specification.
8123   // With ISA 3.0
8124   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8125       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
8126     return Op;
8127 
8128   // If the RHS of the comparison is a 0.0, we don't need to do the
8129   // subtraction at all.
8130   SDValue Sel1;
8131   if (isFloatingPointZero(RHS))
8132     switch (CC) {
8133     default: break;       // SETUO etc aren't handled by fsel.
8134     case ISD::SETNE:
8135       std::swap(TV, FV);
8136       LLVM_FALLTHROUGH;
8137     case ISD::SETEQ:
8138       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8139         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8140       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8141       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8142         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8143       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8144                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8145     case ISD::SETULT:
8146     case ISD::SETLT:
8147       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8148       LLVM_FALLTHROUGH;
8149     case ISD::SETOGE:
8150     case ISD::SETGE:
8151       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8152         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8153       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8154     case ISD::SETUGT:
8155     case ISD::SETGT:
8156       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8157       LLVM_FALLTHROUGH;
8158     case ISD::SETOLE:
8159     case ISD::SETLE:
8160       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8161         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8162       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8163                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8164     }
8165 
8166   SDValue Cmp;
8167   switch (CC) {
8168   default: break;       // SETUO etc aren't handled by fsel.
8169   case ISD::SETNE:
8170     std::swap(TV, FV);
8171     LLVM_FALLTHROUGH;
8172   case ISD::SETEQ:
8173     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8174     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8175       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8176     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8177     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8178       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8179     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8180                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8181   case ISD::SETULT:
8182   case ISD::SETLT:
8183     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8184     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8185       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8186     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8187   case ISD::SETOGE:
8188   case ISD::SETGE:
8189     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8190     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8191       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8192     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8193   case ISD::SETUGT:
8194   case ISD::SETGT:
8195     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8196     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8197       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8198     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8199   case ISD::SETOLE:
8200   case ISD::SETLE:
8201     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8202     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8203       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8204     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8205   }
8206   return Op;
8207 }
8208 
8209 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8210                                                SelectionDAG &DAG,
8211                                                const SDLoc &dl) const {
8212   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8213   SDValue Src = Op.getOperand(0);
8214   if (Src.getValueType() == MVT::f32)
8215     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8216 
8217   SDValue Tmp;
8218   switch (Op.getSimpleValueType().SimpleTy) {
8219   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8220   case MVT::i32:
8221     Tmp = DAG.getNode(
8222         Op.getOpcode() == ISD::FP_TO_SINT
8223             ? PPCISD::FCTIWZ
8224             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8225         dl, MVT::f64, Src);
8226     break;
8227   case MVT::i64:
8228     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8229            "i64 FP_TO_UINT is supported only with FPCVT");
8230     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8231                                                         PPCISD::FCTIDUZ,
8232                       dl, MVT::f64, Src);
8233     break;
8234   }
8235 
8236   // Convert the FP value to an int value through memory.
8237   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8238     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
8239   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8240   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8241   MachinePointerInfo MPI =
8242       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8243 
8244   // Emit a store to the stack slot.
8245   SDValue Chain;
8246   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8247   if (i32Stack) {
8248     MachineFunction &MF = DAG.getMachineFunction();
8249     Alignment = Align(4);
8250     MachineMemOperand *MMO =
8251         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8252     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
8253     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8254               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8255   } else
8256     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
8257 
8258   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8259   // add in a bias on big endian.
8260   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8261     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8262                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8263     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8264   }
8265 
8266   RLI.Chain = Chain;
8267   RLI.Ptr = FIPtr;
8268   RLI.MPI = MPI;
8269   RLI.Alignment = Alignment;
8270 }
8271 
8272 /// Custom lowers floating point to integer conversions to use
8273 /// the direct move instructions available in ISA 2.07 to avoid the
8274 /// need for load/store combinations.
8275 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8276                                                     SelectionDAG &DAG,
8277                                                     const SDLoc &dl) const {
8278   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8279   SDValue Src = Op.getOperand(0);
8280 
8281   if (Src.getValueType() == MVT::f32)
8282     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8283 
8284   SDValue Tmp;
8285   switch (Op.getSimpleValueType().SimpleTy) {
8286   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8287   case MVT::i32:
8288     Tmp = DAG.getNode(
8289         Op.getOpcode() == ISD::FP_TO_SINT
8290             ? PPCISD::FCTIWZ
8291             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8292         dl, MVT::f64, Src);
8293     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
8294     break;
8295   case MVT::i64:
8296     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8297            "i64 FP_TO_UINT is supported only with FPCVT");
8298     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8299                                                         PPCISD::FCTIDUZ,
8300                       dl, MVT::f64, Src);
8301     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
8302     break;
8303   }
8304   return Tmp;
8305 }
8306 
8307 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8308                                           const SDLoc &dl) const {
8309 
8310   // FP to INT conversions are legal for f128.
8311   if (Op->getOperand(0).getValueType() == MVT::f128)
8312     return Op;
8313 
8314   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8315   // PPC (the libcall is not available).
8316   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
8317     if (Op.getValueType() == MVT::i32) {
8318       if (Op.getOpcode() == ISD::FP_TO_SINT) {
8319         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8320                                  MVT::f64, Op.getOperand(0),
8321                                  DAG.getIntPtrConstant(0, dl));
8322         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8323                                  MVT::f64, Op.getOperand(0),
8324                                  DAG.getIntPtrConstant(1, dl));
8325 
8326         // Add the two halves of the long double in round-to-zero mode.
8327         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8328 
8329         // Now use a smaller FP_TO_SINT.
8330         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8331       }
8332       if (Op.getOpcode() == ISD::FP_TO_UINT) {
8333         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8334         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8335         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8336         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8337         // FIXME: generated code sucks.
8338         // TODO: Are there fast-math-flags to propagate to this FSUB?
8339         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
8340                                    Op.getOperand(0), Tmp);
8341         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8342         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8343                            DAG.getConstant(0x80000000, dl, MVT::i32));
8344         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
8345                                     Op.getOperand(0));
8346         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
8347                                ISD::SETGE);
8348       }
8349     }
8350 
8351     return SDValue();
8352   }
8353 
8354   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8355     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8356 
8357   ReuseLoadInfo RLI;
8358   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8359 
8360   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8361                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8362 }
8363 
8364 // We're trying to insert a regular store, S, and then a load, L. If the
8365 // incoming value, O, is a load, we might just be able to have our load use the
8366 // address used by O. However, we don't know if anything else will store to
8367 // that address before we can load from it. To prevent this situation, we need
8368 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8369 // the same chain operand as O, we create a token factor from the chain results
8370 // of O and L, and we replace all uses of O's chain result with that token
8371 // factor (see spliceIntoChain below for this last part).
8372 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8373                                             ReuseLoadInfo &RLI,
8374                                             SelectionDAG &DAG,
8375                                             ISD::LoadExtType ET) const {
8376   SDLoc dl(Op);
8377   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8378                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8379   if (ET == ISD::NON_EXTLOAD &&
8380       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8381       isOperationLegalOrCustom(Op.getOpcode(),
8382                                Op.getOperand(0).getValueType())) {
8383 
8384     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8385     return true;
8386   }
8387 
8388   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8389   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8390       LD->isNonTemporal())
8391     return false;
8392   if (LD->getMemoryVT() != MemVT)
8393     return false;
8394 
8395   RLI.Ptr = LD->getBasePtr();
8396   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8397     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8398            "Non-pre-inc AM on PPC?");
8399     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8400                           LD->getOffset());
8401   }
8402 
8403   RLI.Chain = LD->getChain();
8404   RLI.MPI = LD->getPointerInfo();
8405   RLI.IsDereferenceable = LD->isDereferenceable();
8406   RLI.IsInvariant = LD->isInvariant();
8407   RLI.Alignment = LD->getAlign();
8408   RLI.AAInfo = LD->getAAInfo();
8409   RLI.Ranges = LD->getRanges();
8410 
8411   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8412   return true;
8413 }
8414 
8415 // Given the head of the old chain, ResChain, insert a token factor containing
8416 // it and NewResChain, and make users of ResChain now be users of that token
8417 // factor.
8418 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8419 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8420                                         SDValue NewResChain,
8421                                         SelectionDAG &DAG) const {
8422   if (!ResChain)
8423     return;
8424 
8425   SDLoc dl(NewResChain);
8426 
8427   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8428                            NewResChain, DAG.getUNDEF(MVT::Other));
8429   assert(TF.getNode() != NewResChain.getNode() &&
8430          "A new TF really is required here");
8431 
8432   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8433   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8434 }
8435 
8436 /// Analyze profitability of direct move
8437 /// prefer float load to int load plus direct move
8438 /// when there is no integer use of int load
8439 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8440   SDNode *Origin = Op.getOperand(0).getNode();
8441   if (Origin->getOpcode() != ISD::LOAD)
8442     return true;
8443 
8444   // If there is no LXSIBZX/LXSIHZX, like Power8,
8445   // prefer direct move if the memory size is 1 or 2 bytes.
8446   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8447   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8448     return true;
8449 
8450   for (SDNode::use_iterator UI = Origin->use_begin(),
8451                             UE = Origin->use_end();
8452        UI != UE; ++UI) {
8453 
8454     // Only look at the users of the loaded value.
8455     if (UI.getUse().get().getResNo() != 0)
8456       continue;
8457 
8458     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8459         UI->getOpcode() != ISD::UINT_TO_FP)
8460       return true;
8461   }
8462 
8463   return false;
8464 }
8465 
8466 /// Custom lowers integer to floating point conversions to use
8467 /// the direct move instructions available in ISA 2.07 to avoid the
8468 /// need for load/store combinations.
8469 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8470                                                     SelectionDAG &DAG,
8471                                                     const SDLoc &dl) const {
8472   assert((Op.getValueType() == MVT::f32 ||
8473           Op.getValueType() == MVT::f64) &&
8474          "Invalid floating point type as target of conversion");
8475   assert(Subtarget.hasFPCVT() &&
8476          "Int to FP conversions with direct moves require FPCVT");
8477   SDValue FP;
8478   SDValue Src = Op.getOperand(0);
8479   bool SinglePrec = Op.getValueType() == MVT::f32;
8480   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8481   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8482   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
8483                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
8484 
8485   if (WordInt) {
8486     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
8487                      dl, MVT::f64, Src);
8488     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8489   }
8490   else {
8491     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
8492     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8493   }
8494 
8495   return FP;
8496 }
8497 
8498 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8499 
8500   EVT VecVT = Vec.getValueType();
8501   assert(VecVT.isVector() && "Expected a vector type.");
8502   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8503 
8504   EVT EltVT = VecVT.getVectorElementType();
8505   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8506   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8507 
8508   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8509   SmallVector<SDValue, 16> Ops(NumConcat);
8510   Ops[0] = Vec;
8511   SDValue UndefVec = DAG.getUNDEF(VecVT);
8512   for (unsigned i = 1; i < NumConcat; ++i)
8513     Ops[i] = UndefVec;
8514 
8515   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8516 }
8517 
8518 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8519                                                 const SDLoc &dl) const {
8520 
8521   unsigned Opc = Op.getOpcode();
8522   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8523          "Unexpected conversion type");
8524   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8525          "Supports conversions to v2f64/v4f32 only.");
8526 
8527   bool SignedConv = Opc == ISD::SINT_TO_FP;
8528   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8529 
8530   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8531   EVT WideVT = Wide.getValueType();
8532   unsigned WideNumElts = WideVT.getVectorNumElements();
8533   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8534 
8535   SmallVector<int, 16> ShuffV;
8536   for (unsigned i = 0; i < WideNumElts; ++i)
8537     ShuffV.push_back(i + WideNumElts);
8538 
8539   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8540   int SaveElts = FourEltRes ? 4 : 2;
8541   if (Subtarget.isLittleEndian())
8542     for (int i = 0; i < SaveElts; i++)
8543       ShuffV[i * Stride] = i;
8544   else
8545     for (int i = 1; i <= SaveElts; i++)
8546       ShuffV[i * Stride - 1] = i - 1;
8547 
8548   SDValue ShuffleSrc2 =
8549       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8550   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8551 
8552   SDValue Extend;
8553   if (SignedConv) {
8554     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8555     EVT ExtVT = Op.getOperand(0).getValueType();
8556     if (Subtarget.hasP9Altivec())
8557       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8558                                IntermediateVT.getVectorNumElements());
8559 
8560     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8561                          DAG.getValueType(ExtVT));
8562   } else
8563     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8564 
8565   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8566 }
8567 
8568 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8569                                           SelectionDAG &DAG) const {
8570   SDLoc dl(Op);
8571 
8572   EVT InVT = Op.getOperand(0).getValueType();
8573   EVT OutVT = Op.getValueType();
8574   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8575       isOperationCustom(Op.getOpcode(), InVT))
8576     return LowerINT_TO_FPVector(Op, DAG, dl);
8577 
8578   // Conversions to f128 are legal.
8579   if (Op.getValueType() == MVT::f128)
8580     return Op;
8581 
8582   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8583     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8584       return SDValue();
8585 
8586     SDValue Value = Op.getOperand(0);
8587     // The values are now known to be -1 (false) or 1 (true). To convert this
8588     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8589     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8590     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8591 
8592     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8593 
8594     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8595 
8596     if (Op.getValueType() != MVT::v4f64)
8597       Value = DAG.getNode(ISD::FP_ROUND, dl,
8598                           Op.getValueType(), Value,
8599                           DAG.getIntPtrConstant(1, dl));
8600     return Value;
8601   }
8602 
8603   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8604   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8605     return SDValue();
8606 
8607   if (Op.getOperand(0).getValueType() == MVT::i1)
8608     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8609                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8610                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8611 
8612   // If we have direct moves, we can do all the conversion, skip the store/load
8613   // however, without FPCVT we can't do most conversions.
8614   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8615       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8616     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8617 
8618   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8619          "UINT_TO_FP is supported only with FPCVT");
8620 
8621   // If we have FCFIDS, then use it when converting to single-precision.
8622   // Otherwise, convert to double-precision and then round.
8623   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8624                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8625                                                             : PPCISD::FCFIDS)
8626                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8627                                                             : PPCISD::FCFID);
8628   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8629                   ? MVT::f32
8630                   : MVT::f64;
8631 
8632   if (Op.getOperand(0).getValueType() == MVT::i64) {
8633     SDValue SINT = Op.getOperand(0);
8634     // When converting to single-precision, we actually need to convert
8635     // to double-precision first and then round to single-precision.
8636     // To avoid double-rounding effects during that operation, we have
8637     // to prepare the input operand.  Bits that might be truncated when
8638     // converting to double-precision are replaced by a bit that won't
8639     // be lost at this stage, but is below the single-precision rounding
8640     // position.
8641     //
8642     // However, if -enable-unsafe-fp-math is in effect, accept double
8643     // rounding to avoid the extra overhead.
8644     if (Op.getValueType() == MVT::f32 &&
8645         !Subtarget.hasFPCVT() &&
8646         !DAG.getTarget().Options.UnsafeFPMath) {
8647 
8648       // Twiddle input to make sure the low 11 bits are zero.  (If this
8649       // is the case, we are guaranteed the value will fit into the 53 bit
8650       // mantissa of an IEEE double-precision value without rounding.)
8651       // If any of those low 11 bits were not zero originally, make sure
8652       // bit 12 (value 2048) is set instead, so that the final rounding
8653       // to single-precision gets the correct result.
8654       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8655                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8656       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8657                           Round, DAG.getConstant(2047, dl, MVT::i64));
8658       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8659       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8660                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8661 
8662       // However, we cannot use that value unconditionally: if the magnitude
8663       // of the input value is small, the bit-twiddling we did above might
8664       // end up visibly changing the output.  Fortunately, in that case, we
8665       // don't need to twiddle bits since the original input will convert
8666       // exactly to double-precision floating-point already.  Therefore,
8667       // construct a conditional to use the original value if the top 11
8668       // bits are all sign-bit copies, and use the rounded value computed
8669       // above otherwise.
8670       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8671                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8672       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8673                          Cond, DAG.getConstant(1, dl, MVT::i64));
8674       Cond = DAG.getSetCC(
8675           dl,
8676           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8677           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8678 
8679       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8680     }
8681 
8682     ReuseLoadInfo RLI;
8683     SDValue Bits;
8684 
8685     MachineFunction &MF = DAG.getMachineFunction();
8686     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8687       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8688                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8689       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8690     } else if (Subtarget.hasLFIWAX() &&
8691                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8692       MachineMemOperand *MMO =
8693         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8694                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8695       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8696       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8697                                      DAG.getVTList(MVT::f64, MVT::Other),
8698                                      Ops, MVT::i32, MMO);
8699       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8700     } else if (Subtarget.hasFPCVT() &&
8701                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8702       MachineMemOperand *MMO =
8703         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8704                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8705       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8706       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8707                                      DAG.getVTList(MVT::f64, MVT::Other),
8708                                      Ops, MVT::i32, MMO);
8709       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8710     } else if (((Subtarget.hasLFIWAX() &&
8711                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8712                 (Subtarget.hasFPCVT() &&
8713                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8714                SINT.getOperand(0).getValueType() == MVT::i32) {
8715       MachineFrameInfo &MFI = MF.getFrameInfo();
8716       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8717 
8718       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8719       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8720 
8721       SDValue Store =
8722           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8723                        MachinePointerInfo::getFixedStack(
8724                            DAG.getMachineFunction(), FrameIdx));
8725 
8726       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8727              "Expected an i32 store");
8728 
8729       RLI.Ptr = FIdx;
8730       RLI.Chain = Store;
8731       RLI.MPI =
8732           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8733       RLI.Alignment = Align(4);
8734 
8735       MachineMemOperand *MMO =
8736         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8737                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8738       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8739       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8740                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8741                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8742                                      Ops, MVT::i32, MMO);
8743     } else
8744       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8745 
8746     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8747 
8748     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8749       FP = DAG.getNode(ISD::FP_ROUND, dl,
8750                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8751     return FP;
8752   }
8753 
8754   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8755          "Unhandled INT_TO_FP type in custom expander!");
8756   // Since we only generate this in 64-bit mode, we can take advantage of
8757   // 64-bit registers.  In particular, sign extend the input value into the
8758   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8759   // then lfd it and fcfid it.
8760   MachineFunction &MF = DAG.getMachineFunction();
8761   MachineFrameInfo &MFI = MF.getFrameInfo();
8762   EVT PtrVT = getPointerTy(MF.getDataLayout());
8763 
8764   SDValue Ld;
8765   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8766     ReuseLoadInfo RLI;
8767     bool ReusingLoad;
8768     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8769                                             DAG))) {
8770       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8771       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8772 
8773       SDValue Store =
8774           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8775                        MachinePointerInfo::getFixedStack(
8776                            DAG.getMachineFunction(), FrameIdx));
8777 
8778       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8779              "Expected an i32 store");
8780 
8781       RLI.Ptr = FIdx;
8782       RLI.Chain = Store;
8783       RLI.MPI =
8784           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8785       RLI.Alignment = Align(4);
8786     }
8787 
8788     MachineMemOperand *MMO =
8789       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8790                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8791     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8792     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8793                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
8794                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
8795                                  Ops, MVT::i32, MMO);
8796     if (ReusingLoad)
8797       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8798   } else {
8799     assert(Subtarget.isPPC64() &&
8800            "i32->FP without LFIWAX supported only on PPC64");
8801 
8802     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8803     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8804 
8805     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8806                                 Op.getOperand(0));
8807 
8808     // STD the extended value into the stack slot.
8809     SDValue Store = DAG.getStore(
8810         DAG.getEntryNode(), dl, Ext64, FIdx,
8811         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8812 
8813     // Load the value as a double.
8814     Ld = DAG.getLoad(
8815         MVT::f64, dl, Store, FIdx,
8816         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8817   }
8818 
8819   // FCFID it and return it.
8820   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8821   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8822     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8823                      DAG.getIntPtrConstant(0, dl));
8824   return FP;
8825 }
8826 
8827 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8828                                             SelectionDAG &DAG) const {
8829   SDLoc dl(Op);
8830   /*
8831    The rounding mode is in bits 30:31 of FPSR, and has the following
8832    settings:
8833      00 Round to nearest
8834      01 Round to 0
8835      10 Round to +inf
8836      11 Round to -inf
8837 
8838   FLT_ROUNDS, on the other hand, expects the following:
8839     -1 Undefined
8840      0 Round to 0
8841      1 Round to nearest
8842      2 Round to +inf
8843      3 Round to -inf
8844 
8845   To perform the conversion, we do:
8846     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8847   */
8848 
8849   MachineFunction &MF = DAG.getMachineFunction();
8850   EVT VT = Op.getValueType();
8851   EVT PtrVT = getPointerTy(MF.getDataLayout());
8852 
8853   // Save FP Control Word to register
8854   SDValue Chain = Op.getOperand(0);
8855   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8856   Chain = MFFS.getValue(1);
8857 
8858   // Save FP register to stack slot
8859   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8860   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8861   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8862 
8863   // Load FP Control Word from low 32 bits of stack slot.
8864   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8865   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8866   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8867   Chain = CWD.getValue(1);
8868 
8869   // Transform as necessary
8870   SDValue CWD1 =
8871     DAG.getNode(ISD::AND, dl, MVT::i32,
8872                 CWD, DAG.getConstant(3, dl, MVT::i32));
8873   SDValue CWD2 =
8874     DAG.getNode(ISD::SRL, dl, MVT::i32,
8875                 DAG.getNode(ISD::AND, dl, MVT::i32,
8876                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8877                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8878                             DAG.getConstant(3, dl, MVT::i32)),
8879                 DAG.getConstant(1, dl, MVT::i32));
8880 
8881   SDValue RetVal =
8882     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8883 
8884   RetVal =
8885       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8886                   dl, VT, RetVal);
8887 
8888   return DAG.getMergeValues({RetVal, Chain}, dl);
8889 }
8890 
8891 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8892   EVT VT = Op.getValueType();
8893   unsigned BitWidth = VT.getSizeInBits();
8894   SDLoc dl(Op);
8895   assert(Op.getNumOperands() == 3 &&
8896          VT == Op.getOperand(1).getValueType() &&
8897          "Unexpected SHL!");
8898 
8899   // Expand into a bunch of logical ops.  Note that these ops
8900   // depend on the PPC behavior for oversized shift amounts.
8901   SDValue Lo = Op.getOperand(0);
8902   SDValue Hi = Op.getOperand(1);
8903   SDValue Amt = Op.getOperand(2);
8904   EVT AmtVT = Amt.getValueType();
8905 
8906   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8907                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8908   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8909   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8910   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8911   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8912                              DAG.getConstant(-BitWidth, dl, AmtVT));
8913   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8914   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8915   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8916   SDValue OutOps[] = { OutLo, OutHi };
8917   return DAG.getMergeValues(OutOps, dl);
8918 }
8919 
8920 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8921   EVT VT = Op.getValueType();
8922   SDLoc dl(Op);
8923   unsigned BitWidth = VT.getSizeInBits();
8924   assert(Op.getNumOperands() == 3 &&
8925          VT == Op.getOperand(1).getValueType() &&
8926          "Unexpected SRL!");
8927 
8928   // Expand into a bunch of logical ops.  Note that these ops
8929   // depend on the PPC behavior for oversized shift amounts.
8930   SDValue Lo = Op.getOperand(0);
8931   SDValue Hi = Op.getOperand(1);
8932   SDValue Amt = Op.getOperand(2);
8933   EVT AmtVT = Amt.getValueType();
8934 
8935   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8936                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8937   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8938   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8939   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8940   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8941                              DAG.getConstant(-BitWidth, dl, AmtVT));
8942   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8943   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8944   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8945   SDValue OutOps[] = { OutLo, OutHi };
8946   return DAG.getMergeValues(OutOps, dl);
8947 }
8948 
8949 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8950   SDLoc dl(Op);
8951   EVT VT = Op.getValueType();
8952   unsigned BitWidth = VT.getSizeInBits();
8953   assert(Op.getNumOperands() == 3 &&
8954          VT == Op.getOperand(1).getValueType() &&
8955          "Unexpected SRA!");
8956 
8957   // Expand into a bunch of logical ops, followed by a select_cc.
8958   SDValue Lo = Op.getOperand(0);
8959   SDValue Hi = Op.getOperand(1);
8960   SDValue Amt = Op.getOperand(2);
8961   EVT AmtVT = Amt.getValueType();
8962 
8963   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8964                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8965   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8966   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8967   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8968   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8969                              DAG.getConstant(-BitWidth, dl, AmtVT));
8970   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8971   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8972   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8973                                   Tmp4, Tmp6, ISD::SETLE);
8974   SDValue OutOps[] = { OutLo, OutHi };
8975   return DAG.getMergeValues(OutOps, dl);
8976 }
8977 
8978 //===----------------------------------------------------------------------===//
8979 // Vector related lowering.
8980 //
8981 
8982 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8983 /// element size of SplatSize. Cast the result to VT.
8984 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8985                                       SelectionDAG &DAG, const SDLoc &dl) {
8986   static const MVT VTys[] = { // canonical VT to use for each size.
8987     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8988   };
8989 
8990   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8991 
8992   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8993   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8994     SplatSize = 1;
8995     Val = 0xFF;
8996   }
8997 
8998   EVT CanonicalVT = VTys[SplatSize-1];
8999 
9000   // Build a canonical splat for this value.
9001   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
9002 }
9003 
9004 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9005 /// specified intrinsic ID.
9006 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
9007                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
9008   if (DestVT == MVT::Other) DestVT = Op.getValueType();
9009   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9010                      DAG.getConstant(IID, dl, MVT::i32), Op);
9011 }
9012 
9013 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9014 /// specified intrinsic ID.
9015 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9016                                 SelectionDAG &DAG, const SDLoc &dl,
9017                                 EVT DestVT = MVT::Other) {
9018   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9019   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9020                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9021 }
9022 
9023 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9024 /// specified intrinsic ID.
9025 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9026                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9027                                 EVT DestVT = MVT::Other) {
9028   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9029   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9030                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9031 }
9032 
9033 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9034 /// amount.  The result has the specified value type.
9035 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9036                            SelectionDAG &DAG, const SDLoc &dl) {
9037   // Force LHS/RHS to be the right type.
9038   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9039   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9040 
9041   int Ops[16];
9042   for (unsigned i = 0; i != 16; ++i)
9043     Ops[i] = i + Amt;
9044   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9045   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9046 }
9047 
9048 /// Do we have an efficient pattern in a .td file for this node?
9049 ///
9050 /// \param V - pointer to the BuildVectorSDNode being matched
9051 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9052 ///
9053 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9054 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9055 /// the opposite is true (expansion is beneficial) are:
9056 /// - The node builds a vector out of integers that are not 32 or 64-bits
9057 /// - The node builds a vector out of constants
9058 /// - The node is a "load-and-splat"
9059 /// In all other cases, we will choose to keep the BUILD_VECTOR.
9060 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9061                                             bool HasDirectMove,
9062                                             bool HasP8Vector) {
9063   EVT VecVT = V->getValueType(0);
9064   bool RightType = VecVT == MVT::v2f64 ||
9065     (HasP8Vector && VecVT == MVT::v4f32) ||
9066     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9067   if (!RightType)
9068     return false;
9069 
9070   bool IsSplat = true;
9071   bool IsLoad = false;
9072   SDValue Op0 = V->getOperand(0);
9073 
9074   // This function is called in a block that confirms the node is not a constant
9075   // splat. So a constant BUILD_VECTOR here means the vector is built out of
9076   // different constants.
9077   if (V->isConstant())
9078     return false;
9079   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9080     if (V->getOperand(i).isUndef())
9081       return false;
9082     // We want to expand nodes that represent load-and-splat even if the
9083     // loaded value is a floating point truncation or conversion to int.
9084     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9085         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9086          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9087         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9088          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9089         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9090          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9091       IsLoad = true;
9092     // If the operands are different or the input is not a load and has more
9093     // uses than just this BV node, then it isn't a splat.
9094     if (V->getOperand(i) != Op0 ||
9095         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9096       IsSplat = false;
9097   }
9098   return !(IsSplat && IsLoad);
9099 }
9100 
9101 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9102 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9103 
9104   SDLoc dl(Op);
9105   SDValue Op0 = Op->getOperand(0);
9106 
9107   if ((Op.getValueType() != MVT::f128) ||
9108       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9109       (Op0.getOperand(0).getValueType() != MVT::i64) ||
9110       (Op0.getOperand(1).getValueType() != MVT::i64))
9111     return SDValue();
9112 
9113   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9114                      Op0.getOperand(1));
9115 }
9116 
9117 static const SDValue *getNormalLoadInput(const SDValue &Op) {
9118   const SDValue *InputLoad = &Op;
9119   if (InputLoad->getOpcode() == ISD::BITCAST)
9120     InputLoad = &InputLoad->getOperand(0);
9121   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9122       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED)
9123     InputLoad = &InputLoad->getOperand(0);
9124   if (InputLoad->getOpcode() != ISD::LOAD)
9125     return nullptr;
9126   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9127   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9128 }
9129 
9130 // Convert the argument APFloat to a single precision APFloat if there is no
9131 // loss in information during the conversion to single precision APFloat and the
9132 // resulting number is not a denormal number. Return true if successful.
9133 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9134   APFloat APFloatToConvert = ArgAPFloat;
9135   bool LosesInfo = true;
9136   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9137                            &LosesInfo);
9138   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9139   if (Success)
9140     ArgAPFloat = APFloatToConvert;
9141   return Success;
9142 }
9143 
9144 // Bitcast the argument APInt to a double and convert it to a single precision
9145 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9146 // argument if there is no loss in information during the conversion from
9147 // double to single precision APFloat and the resulting number is not a denormal
9148 // number. Return true if successful.
9149 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9150   double DpValue = ArgAPInt.bitsToDouble();
9151   APFloat APFloatDp(DpValue);
9152   bool Success = convertToNonDenormSingle(APFloatDp);
9153   if (Success)
9154     ArgAPInt = APFloatDp.bitcastToAPInt();
9155   return Success;
9156 }
9157 
9158 // If this is a case we can't handle, return null and let the default
9159 // expansion code take care of it.  If we CAN select this case, and if it
9160 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9161 // this case more efficiently than a constant pool load, lower it to the
9162 // sequence of ops that should be used.
9163 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9164                                              SelectionDAG &DAG) const {
9165   SDLoc dl(Op);
9166   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9167   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9168 
9169   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
9170     // We first build an i32 vector, load it into a QPX register,
9171     // then convert it to a floating-point vector and compare it
9172     // to a zero vector to get the boolean result.
9173     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9174     int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9175     MachinePointerInfo PtrInfo =
9176         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9177     EVT PtrVT = getPointerTy(DAG.getDataLayout());
9178     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9179 
9180     assert(BVN->getNumOperands() == 4 &&
9181       "BUILD_VECTOR for v4i1 does not have 4 operands");
9182 
9183     bool IsConst = true;
9184     for (unsigned i = 0; i < 4; ++i) {
9185       if (BVN->getOperand(i).isUndef()) continue;
9186       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
9187         IsConst = false;
9188         break;
9189       }
9190     }
9191 
9192     if (IsConst) {
9193       Constant *One =
9194         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
9195       Constant *NegOne =
9196         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
9197 
9198       Constant *CV[4];
9199       for (unsigned i = 0; i < 4; ++i) {
9200         if (BVN->getOperand(i).isUndef())
9201           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
9202         else if (isNullConstant(BVN->getOperand(i)))
9203           CV[i] = NegOne;
9204         else
9205           CV[i] = One;
9206       }
9207 
9208       Constant *CP = ConstantVector::get(CV);
9209       SDValue CPIdx =
9210           DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), Align(16));
9211 
9212       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
9213       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
9214       return DAG.getMemIntrinsicNode(
9215           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
9216           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
9217     }
9218 
9219     SmallVector<SDValue, 4> Stores;
9220     for (unsigned i = 0; i < 4; ++i) {
9221       if (BVN->getOperand(i).isUndef()) continue;
9222 
9223       unsigned Offset = 4*i;
9224       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9225       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9226 
9227       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
9228       if (StoreSize > 4) {
9229         Stores.push_back(
9230             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
9231                               PtrInfo.getWithOffset(Offset), MVT::i32));
9232       } else {
9233         SDValue StoreValue = BVN->getOperand(i);
9234         if (StoreSize < 4)
9235           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
9236 
9237         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
9238                                       PtrInfo.getWithOffset(Offset)));
9239       }
9240     }
9241 
9242     SDValue StoreChain;
9243     if (!Stores.empty())
9244       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9245     else
9246       StoreChain = DAG.getEntryNode();
9247 
9248     // Now load from v4i32 into the QPX register; this will extend it to
9249     // v4i64 but not yet convert it to a floating point. Nevertheless, this
9250     // is typed as v4f64 because the QPX register integer states are not
9251     // explicitly represented.
9252 
9253     SDValue Ops[] = {StoreChain,
9254                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
9255                      FIdx};
9256     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
9257 
9258     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
9259       dl, VTs, Ops, MVT::v4i32, PtrInfo);
9260     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9261       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
9262       LoadedVect);
9263 
9264     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
9265 
9266     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
9267   }
9268 
9269   // All other QPX vectors are handled by generic code.
9270   if (Subtarget.hasQPX())
9271     return SDValue();
9272 
9273   // Check if this is a splat of a constant value.
9274   APInt APSplatBits, APSplatUndef;
9275   unsigned SplatBitSize;
9276   bool HasAnyUndefs;
9277   bool BVNIsConstantSplat =
9278       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9279                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9280 
9281   // If it is a splat of a double, check if we can shrink it to a 32 bit
9282   // non-denormal float which when converted back to double gives us the same
9283   // double. This is to exploit the XXSPLTIDP instruction.
9284   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9285       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9286       convertToNonDenormSingle(APSplatBits)) {
9287     SDValue SplatNode = DAG.getNode(
9288         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9289         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9290     return DAG.getBitcast(Op.getValueType(), SplatNode);
9291   }
9292 
9293   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9294 
9295     const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
9296     // Handle load-and-splat patterns as we have instructions that will do this
9297     // in one go.
9298     if (InputLoad && DAG.isSplatValue(Op, true)) {
9299       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9300 
9301       // We have handling for 4 and 8 byte elements.
9302       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9303 
9304       // Checking for a single use of this load, we have to check for vector
9305       // width (128 bits) / ElementSize uses (since each operand of the
9306       // BUILD_VECTOR is a separate use of the value.
9307       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9308           ((Subtarget.hasVSX() && ElementSize == 64) ||
9309            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9310         SDValue Ops[] = {
9311           LD->getChain(),    // Chain
9312           LD->getBasePtr(),  // Ptr
9313           DAG.getValueType(Op.getValueType()) // VT
9314         };
9315         return
9316           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9317                                   DAG.getVTList(Op.getValueType(), MVT::Other),
9318                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
9319       }
9320     }
9321 
9322     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9323     // lowered to VSX instructions under certain conditions.
9324     // Without VSX, there is no pattern more efficient than expanding the node.
9325     if (Subtarget.hasVSX() &&
9326         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9327                                         Subtarget.hasP8Vector()))
9328       return Op;
9329     return SDValue();
9330   }
9331 
9332   uint64_t SplatBits = APSplatBits.getZExtValue();
9333   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9334   unsigned SplatSize = SplatBitSize / 8;
9335 
9336   // First, handle single instruction cases.
9337 
9338   // All zeros?
9339   if (SplatBits == 0) {
9340     // Canonicalize all zero vectors to be v4i32.
9341     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9342       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9343       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9344     }
9345     return Op;
9346   }
9347 
9348   // We have XXSPLTIW for constant splats four bytes wide.
9349   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9350   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9351   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9352   // turned into a 4-byte splat of 0xABABABAB.
9353   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9354     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9355                                   Op.getValueType(), DAG, dl);
9356 
9357   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9358     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9359                                   dl);
9360 
9361   // We have XXSPLTIB for constant splats one byte wide.
9362   if (Subtarget.hasP9Vector() && SplatSize == 1)
9363     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9364                                   dl);
9365 
9366   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9367   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9368                     (32-SplatBitSize));
9369   if (SextVal >= -16 && SextVal <= 15)
9370     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9371                                   dl);
9372 
9373   // Two instruction sequences.
9374 
9375   // If this value is in the range [-32,30] and is even, use:
9376   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9377   // If this value is in the range [17,31] and is odd, use:
9378   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9379   // If this value is in the range [-31,-17] and is odd, use:
9380   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9381   // Note the last two are three-instruction sequences.
9382   if (SextVal >= -32 && SextVal <= 31) {
9383     // To avoid having these optimizations undone by constant folding,
9384     // we convert to a pseudo that will be expanded later into one of
9385     // the above forms.
9386     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9387     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9388               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9389     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9390     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9391     if (VT == Op.getValueType())
9392       return RetVal;
9393     else
9394       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9395   }
9396 
9397   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9398   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9399   // for fneg/fabs.
9400   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9401     // Make -1 and vspltisw -1:
9402     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9403 
9404     // Make the VSLW intrinsic, computing 0x8000_0000.
9405     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9406                                    OnesV, DAG, dl);
9407 
9408     // xor by OnesV to invert it.
9409     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9410     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9411   }
9412 
9413   // Check to see if this is a wide variety of vsplti*, binop self cases.
9414   static const signed char SplatCsts[] = {
9415     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9416     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9417   };
9418 
9419   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9420     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9421     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9422     int i = SplatCsts[idx];
9423 
9424     // Figure out what shift amount will be used by altivec if shifted by i in
9425     // this splat size.
9426     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9427 
9428     // vsplti + shl self.
9429     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9430       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9431       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9432         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9433         Intrinsic::ppc_altivec_vslw
9434       };
9435       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9436       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9437     }
9438 
9439     // vsplti + srl self.
9440     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9441       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9442       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9443         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9444         Intrinsic::ppc_altivec_vsrw
9445       };
9446       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9447       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9448     }
9449 
9450     // vsplti + sra self.
9451     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9452       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9453       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9454         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9455         Intrinsic::ppc_altivec_vsraw
9456       };
9457       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9458       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9459     }
9460 
9461     // vsplti + rol self.
9462     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9463                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9464       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9465       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9466         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9467         Intrinsic::ppc_altivec_vrlw
9468       };
9469       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9470       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9471     }
9472 
9473     // t = vsplti c, result = vsldoi t, t, 1
9474     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9475       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9476       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9477       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9478     }
9479     // t = vsplti c, result = vsldoi t, t, 2
9480     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9481       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9482       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9483       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9484     }
9485     // t = vsplti c, result = vsldoi t, t, 3
9486     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9487       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9488       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9489       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9490     }
9491   }
9492 
9493   return SDValue();
9494 }
9495 
9496 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9497 /// the specified operations to build the shuffle.
9498 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9499                                       SDValue RHS, SelectionDAG &DAG,
9500                                       const SDLoc &dl) {
9501   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9502   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9503   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9504 
9505   enum {
9506     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9507     OP_VMRGHW,
9508     OP_VMRGLW,
9509     OP_VSPLTISW0,
9510     OP_VSPLTISW1,
9511     OP_VSPLTISW2,
9512     OP_VSPLTISW3,
9513     OP_VSLDOI4,
9514     OP_VSLDOI8,
9515     OP_VSLDOI12
9516   };
9517 
9518   if (OpNum == OP_COPY) {
9519     if (LHSID == (1*9+2)*9+3) return LHS;
9520     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9521     return RHS;
9522   }
9523 
9524   SDValue OpLHS, OpRHS;
9525   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9526   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9527 
9528   int ShufIdxs[16];
9529   switch (OpNum) {
9530   default: llvm_unreachable("Unknown i32 permute!");
9531   case OP_VMRGHW:
9532     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9533     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9534     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9535     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9536     break;
9537   case OP_VMRGLW:
9538     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9539     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9540     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9541     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9542     break;
9543   case OP_VSPLTISW0:
9544     for (unsigned i = 0; i != 16; ++i)
9545       ShufIdxs[i] = (i&3)+0;
9546     break;
9547   case OP_VSPLTISW1:
9548     for (unsigned i = 0; i != 16; ++i)
9549       ShufIdxs[i] = (i&3)+4;
9550     break;
9551   case OP_VSPLTISW2:
9552     for (unsigned i = 0; i != 16; ++i)
9553       ShufIdxs[i] = (i&3)+8;
9554     break;
9555   case OP_VSPLTISW3:
9556     for (unsigned i = 0; i != 16; ++i)
9557       ShufIdxs[i] = (i&3)+12;
9558     break;
9559   case OP_VSLDOI4:
9560     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9561   case OP_VSLDOI8:
9562     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9563   case OP_VSLDOI12:
9564     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9565   }
9566   EVT VT = OpLHS.getValueType();
9567   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9568   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9569   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9570   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9571 }
9572 
9573 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9574 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9575 /// SDValue.
9576 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9577                                            SelectionDAG &DAG) const {
9578   const unsigned BytesInVector = 16;
9579   bool IsLE = Subtarget.isLittleEndian();
9580   SDLoc dl(N);
9581   SDValue V1 = N->getOperand(0);
9582   SDValue V2 = N->getOperand(1);
9583   unsigned ShiftElts = 0, InsertAtByte = 0;
9584   bool Swap = false;
9585 
9586   // Shifts required to get the byte we want at element 7.
9587   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9588                                    0, 15, 14, 13, 12, 11, 10, 9};
9589   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9590                                 1, 2,  3,  4,  5,  6,  7,  8};
9591 
9592   ArrayRef<int> Mask = N->getMask();
9593   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9594 
9595   // For each mask element, find out if we're just inserting something
9596   // from V2 into V1 or vice versa.
9597   // Possible permutations inserting an element from V2 into V1:
9598   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9599   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9600   //   ...
9601   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9602   // Inserting from V1 into V2 will be similar, except mask range will be
9603   // [16,31].
9604 
9605   bool FoundCandidate = false;
9606   // If both vector operands for the shuffle are the same vector, the mask
9607   // will contain only elements from the first one and the second one will be
9608   // undef.
9609   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9610   // Go through the mask of half-words to find an element that's being moved
9611   // from one vector to the other.
9612   for (unsigned i = 0; i < BytesInVector; ++i) {
9613     unsigned CurrentElement = Mask[i];
9614     // If 2nd operand is undefined, we should only look for element 7 in the
9615     // Mask.
9616     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9617       continue;
9618 
9619     bool OtherElementsInOrder = true;
9620     // Examine the other elements in the Mask to see if they're in original
9621     // order.
9622     for (unsigned j = 0; j < BytesInVector; ++j) {
9623       if (j == i)
9624         continue;
9625       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9626       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9627       // in which we always assume we're always picking from the 1st operand.
9628       int MaskOffset =
9629           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9630       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9631         OtherElementsInOrder = false;
9632         break;
9633       }
9634     }
9635     // If other elements are in original order, we record the number of shifts
9636     // we need to get the element we want into element 7. Also record which byte
9637     // in the vector we should insert into.
9638     if (OtherElementsInOrder) {
9639       // If 2nd operand is undefined, we assume no shifts and no swapping.
9640       if (V2.isUndef()) {
9641         ShiftElts = 0;
9642         Swap = false;
9643       } else {
9644         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9645         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9646                          : BigEndianShifts[CurrentElement & 0xF];
9647         Swap = CurrentElement < BytesInVector;
9648       }
9649       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9650       FoundCandidate = true;
9651       break;
9652     }
9653   }
9654 
9655   if (!FoundCandidate)
9656     return SDValue();
9657 
9658   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9659   // optionally with VECSHL if shift is required.
9660   if (Swap)
9661     std::swap(V1, V2);
9662   if (V2.isUndef())
9663     V2 = V1;
9664   if (ShiftElts) {
9665     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9666                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9667     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9668                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9669   }
9670   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9671                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9672 }
9673 
9674 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9675 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9676 /// SDValue.
9677 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9678                                            SelectionDAG &DAG) const {
9679   const unsigned NumHalfWords = 8;
9680   const unsigned BytesInVector = NumHalfWords * 2;
9681   // Check that the shuffle is on half-words.
9682   if (!isNByteElemShuffleMask(N, 2, 1))
9683     return SDValue();
9684 
9685   bool IsLE = Subtarget.isLittleEndian();
9686   SDLoc dl(N);
9687   SDValue V1 = N->getOperand(0);
9688   SDValue V2 = N->getOperand(1);
9689   unsigned ShiftElts = 0, InsertAtByte = 0;
9690   bool Swap = false;
9691 
9692   // Shifts required to get the half-word we want at element 3.
9693   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9694   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9695 
9696   uint32_t Mask = 0;
9697   uint32_t OriginalOrderLow = 0x1234567;
9698   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9699   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9700   // 32-bit space, only need 4-bit nibbles per element.
9701   for (unsigned i = 0; i < NumHalfWords; ++i) {
9702     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9703     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9704   }
9705 
9706   // For each mask element, find out if we're just inserting something
9707   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9708   // from V2 into V1:
9709   //   X, 1, 2, 3, 4, 5, 6, 7
9710   //   0, X, 2, 3, 4, 5, 6, 7
9711   //   0, 1, X, 3, 4, 5, 6, 7
9712   //   0, 1, 2, X, 4, 5, 6, 7
9713   //   0, 1, 2, 3, X, 5, 6, 7
9714   //   0, 1, 2, 3, 4, X, 6, 7
9715   //   0, 1, 2, 3, 4, 5, X, 7
9716   //   0, 1, 2, 3, 4, 5, 6, X
9717   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9718 
9719   bool FoundCandidate = false;
9720   // Go through the mask of half-words to find an element that's being moved
9721   // from one vector to the other.
9722   for (unsigned i = 0; i < NumHalfWords; ++i) {
9723     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9724     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9725     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9726     uint32_t TargetOrder = 0x0;
9727 
9728     // If both vector operands for the shuffle are the same vector, the mask
9729     // will contain only elements from the first one and the second one will be
9730     // undef.
9731     if (V2.isUndef()) {
9732       ShiftElts = 0;
9733       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9734       TargetOrder = OriginalOrderLow;
9735       Swap = false;
9736       // Skip if not the correct element or mask of other elements don't equal
9737       // to our expected order.
9738       if (MaskOneElt == VINSERTHSrcElem &&
9739           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9740         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9741         FoundCandidate = true;
9742         break;
9743       }
9744     } else { // If both operands are defined.
9745       // Target order is [8,15] if the current mask is between [0,7].
9746       TargetOrder =
9747           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9748       // Skip if mask of other elements don't equal our expected order.
9749       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9750         // We only need the last 3 bits for the number of shifts.
9751         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9752                          : BigEndianShifts[MaskOneElt & 0x7];
9753         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9754         Swap = MaskOneElt < NumHalfWords;
9755         FoundCandidate = true;
9756         break;
9757       }
9758     }
9759   }
9760 
9761   if (!FoundCandidate)
9762     return SDValue();
9763 
9764   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9765   // optionally with VECSHL if shift is required.
9766   if (Swap)
9767     std::swap(V1, V2);
9768   if (V2.isUndef())
9769     V2 = V1;
9770   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9771   if (ShiftElts) {
9772     // Double ShiftElts because we're left shifting on v16i8 type.
9773     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9774                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9775     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9776     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9777                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9778     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9779   }
9780   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9781   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9782                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9783   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9784 }
9785 
9786 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9787 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9788 /// return the default SDValue.
9789 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9790                                               SelectionDAG &DAG) const {
9791   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9792   // to v16i8. Peek through the bitcasts to get the actual operands.
9793   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9794   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9795 
9796   auto ShuffleMask = SVN->getMask();
9797   SDValue VecShuffle(SVN, 0);
9798   SDLoc DL(SVN);
9799 
9800   // Check that we have a four byte shuffle.
9801   if (!isNByteElemShuffleMask(SVN, 4, 1))
9802     return SDValue();
9803 
9804   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9805   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9806     std::swap(LHS, RHS);
9807     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9808     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9809   }
9810 
9811   // Ensure that the RHS is a vector of constants.
9812   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9813   if (!BVN)
9814     return SDValue();
9815 
9816   // Check if RHS is a splat of 4-bytes (or smaller).
9817   APInt APSplatValue, APSplatUndef;
9818   unsigned SplatBitSize;
9819   bool HasAnyUndefs;
9820   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9821                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9822       SplatBitSize > 32)
9823     return SDValue();
9824 
9825   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9826   // The instruction splats a constant C into two words of the source vector
9827   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9828   // Thus we check that the shuffle mask is the equivalent  of
9829   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9830   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9831   // within each word are consecutive, so we only need to check the first byte.
9832   SDValue Index;
9833   bool IsLE = Subtarget.isLittleEndian();
9834   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9835       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9836        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9837     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9838   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9839            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9840             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9841     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9842   else
9843     return SDValue();
9844 
9845   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9846   // for XXSPLTI32DX.
9847   unsigned SplatVal = APSplatValue.getZExtValue();
9848   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9849     SplatVal |= (SplatVal << SplatBitSize);
9850 
9851   SDValue SplatNode = DAG.getNode(
9852       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9853       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9854   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9855 }
9856 
9857 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9858 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9859 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9860 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9861 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9862   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9863   assert(Op.getValueType() == MVT::v1i128 &&
9864          "Only set v1i128 as custom, other type shouldn't reach here!");
9865   SDLoc dl(Op);
9866   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9867   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9868   unsigned SHLAmt = N1.getConstantOperandVal(0);
9869   if (SHLAmt % 8 == 0) {
9870     SmallVector<int, 16> Mask(16, 0);
9871     std::iota(Mask.begin(), Mask.end(), 0);
9872     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9873     if (SDValue Shuffle =
9874             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9875                                  DAG.getUNDEF(MVT::v16i8), Mask))
9876       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9877   }
9878   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9879   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9880                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9881   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9882                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9883   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9884   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9885 }
9886 
9887 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9888 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9889 /// return the code it can be lowered into.  Worst case, it can always be
9890 /// lowered into a vperm.
9891 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9892                                                SelectionDAG &DAG) const {
9893   SDLoc dl(Op);
9894   SDValue V1 = Op.getOperand(0);
9895   SDValue V2 = Op.getOperand(1);
9896   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9897 
9898   // Any nodes that were combined in the target-independent combiner prior
9899   // to vector legalization will not be sent to the target combine. Try to
9900   // combine it here.
9901   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9902     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9903       return NewShuffle;
9904     Op = NewShuffle;
9905     SVOp = cast<ShuffleVectorSDNode>(Op);
9906     V1 = Op.getOperand(0);
9907     V2 = Op.getOperand(1);
9908   }
9909   EVT VT = Op.getValueType();
9910   bool isLittleEndian = Subtarget.isLittleEndian();
9911 
9912   unsigned ShiftElts, InsertAtByte;
9913   bool Swap = false;
9914 
9915   // If this is a load-and-splat, we can do that with a single instruction
9916   // in some cases. However if the load has multiple uses, we don't want to
9917   // combine it because that will just produce multiple loads.
9918   const SDValue *InputLoad = getNormalLoadInput(V1);
9919   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9920       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9921       InputLoad->hasOneUse()) {
9922     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9923     int SplatIdx =
9924       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9925 
9926     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9927     // For 4-byte load-and-splat, we need Power9.
9928     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9929       uint64_t Offset = 0;
9930       if (IsFourByte)
9931         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9932       else
9933         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9934 
9935       // If we are loading a partial vector, it does not make sense to adjust
9936       // the base pointer. This happens with (splat (s_to_v_permuted (ld))).
9937       if (LD->getMemoryVT().getSizeInBits() == (IsFourByte ? 32 : 64))
9938         Offset = 0;
9939       SDValue BasePtr = LD->getBasePtr();
9940       if (Offset != 0)
9941         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9942                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9943       SDValue Ops[] = {
9944         LD->getChain(),    // Chain
9945         BasePtr,           // BasePtr
9946         DAG.getValueType(Op.getValueType()) // VT
9947       };
9948       SDVTList VTL =
9949         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9950       SDValue LdSplt =
9951         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9952                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9953       if (LdSplt.getValueType() != SVOp->getValueType(0))
9954         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9955       return LdSplt;
9956     }
9957   }
9958   if (Subtarget.hasP9Vector() &&
9959       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9960                            isLittleEndian)) {
9961     if (Swap)
9962       std::swap(V1, V2);
9963     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9964     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9965     if (ShiftElts) {
9966       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9967                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9968       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9969                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9970       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9971     }
9972     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9973                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9974     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9975   }
9976 
9977   if (Subtarget.hasPrefixInstrs()) {
9978     SDValue SplatInsertNode;
9979     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9980       return SplatInsertNode;
9981   }
9982 
9983   if (Subtarget.hasP9Altivec()) {
9984     SDValue NewISDNode;
9985     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9986       return NewISDNode;
9987 
9988     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9989       return NewISDNode;
9990   }
9991 
9992   if (Subtarget.hasVSX() &&
9993       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9994     if (Swap)
9995       std::swap(V1, V2);
9996     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9997     SDValue Conv2 =
9998         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9999 
10000     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
10001                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10002     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10003   }
10004 
10005   if (Subtarget.hasVSX() &&
10006     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10007     if (Swap)
10008       std::swap(V1, V2);
10009     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10010     SDValue Conv2 =
10011         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10012 
10013     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10014                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10015     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10016   }
10017 
10018   if (Subtarget.hasP9Vector()) {
10019      if (PPC::isXXBRHShuffleMask(SVOp)) {
10020       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10021       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10022       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10023     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10024       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10025       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10026       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10027     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10028       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10029       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10030       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10031     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10032       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10033       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10034       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10035     }
10036   }
10037 
10038   if (Subtarget.hasVSX()) {
10039     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10040       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10041 
10042       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10043       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10044                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
10045       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10046     }
10047 
10048     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10049     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10050       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10051       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10052       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10053     }
10054   }
10055 
10056   if (Subtarget.hasQPX()) {
10057     if (VT.getVectorNumElements() != 4)
10058       return SDValue();
10059 
10060     if (V2.isUndef()) V2 = V1;
10061 
10062     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
10063     if (AlignIdx != -1) {
10064       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
10065                          DAG.getConstant(AlignIdx, dl, MVT::i32));
10066     } else if (SVOp->isSplat()) {
10067       int SplatIdx = SVOp->getSplatIndex();
10068       if (SplatIdx >= 4) {
10069         std::swap(V1, V2);
10070         SplatIdx -= 4;
10071       }
10072 
10073       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
10074                          DAG.getConstant(SplatIdx, dl, MVT::i32));
10075     }
10076 
10077     // Lower this into a qvgpci/qvfperm pair.
10078 
10079     // Compute the qvgpci literal
10080     unsigned idx = 0;
10081     for (unsigned i = 0; i < 4; ++i) {
10082       int m = SVOp->getMaskElt(i);
10083       unsigned mm = m >= 0 ? (unsigned) m : i;
10084       idx |= mm << (3-i)*3;
10085     }
10086 
10087     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
10088                              DAG.getConstant(idx, dl, MVT::i32));
10089     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
10090   }
10091 
10092   // Cases that are handled by instructions that take permute immediates
10093   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10094   // selected by the instruction selector.
10095   if (V2.isUndef()) {
10096     if (PPC::isSplatShuffleMask(SVOp, 1) ||
10097         PPC::isSplatShuffleMask(SVOp, 2) ||
10098         PPC::isSplatShuffleMask(SVOp, 4) ||
10099         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10100         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10101         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10102         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10103         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10104         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10105         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10106         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10107         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10108         (Subtarget.hasP8Altivec() && (
10109          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10110          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10111          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10112       return Op;
10113     }
10114   }
10115 
10116   // Altivec has a variety of "shuffle immediates" that take two vector inputs
10117   // and produce a fixed permutation.  If any of these match, do not lower to
10118   // VPERM.
10119   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10120   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10121       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10122       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10123       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10124       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10125       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10126       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10127       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10128       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10129       (Subtarget.hasP8Altivec() && (
10130        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10131        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10132        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10133     return Op;
10134 
10135   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
10136   // perfect shuffle table to emit an optimal matching sequence.
10137   ArrayRef<int> PermMask = SVOp->getMask();
10138 
10139   unsigned PFIndexes[4];
10140   bool isFourElementShuffle = true;
10141   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
10142     unsigned EltNo = 8;   // Start out undef.
10143     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
10144       if (PermMask[i*4+j] < 0)
10145         continue;   // Undef, ignore it.
10146 
10147       unsigned ByteSource = PermMask[i*4+j];
10148       if ((ByteSource & 3) != j) {
10149         isFourElementShuffle = false;
10150         break;
10151       }
10152 
10153       if (EltNo == 8) {
10154         EltNo = ByteSource/4;
10155       } else if (EltNo != ByteSource/4) {
10156         isFourElementShuffle = false;
10157         break;
10158       }
10159     }
10160     PFIndexes[i] = EltNo;
10161   }
10162 
10163   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10164   // perfect shuffle vector to determine if it is cost effective to do this as
10165   // discrete instructions, or whether we should use a vperm.
10166   // For now, we skip this for little endian until such time as we have a
10167   // little-endian perfect shuffle table.
10168   if (isFourElementShuffle && !isLittleEndian) {
10169     // Compute the index in the perfect shuffle table.
10170     unsigned PFTableIndex =
10171       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10172 
10173     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10174     unsigned Cost  = (PFEntry >> 30);
10175 
10176     // Determining when to avoid vperm is tricky.  Many things affect the cost
10177     // of vperm, particularly how many times the perm mask needs to be computed.
10178     // For example, if the perm mask can be hoisted out of a loop or is already
10179     // used (perhaps because there are multiple permutes with the same shuffle
10180     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
10181     // the loop requires an extra register.
10182     //
10183     // As a compromise, we only emit discrete instructions if the shuffle can be
10184     // generated in 3 or fewer operations.  When we have loop information
10185     // available, if this block is within a loop, we should avoid using vperm
10186     // for 3-operation perms and use a constant pool load instead.
10187     if (Cost < 3)
10188       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10189   }
10190 
10191   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10192   // vector that will get spilled to the constant pool.
10193   if (V2.isUndef()) V2 = V1;
10194 
10195   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10196   // that it is in input element units, not in bytes.  Convert now.
10197 
10198   // For little endian, the order of the input vectors is reversed, and
10199   // the permutation mask is complemented with respect to 31.  This is
10200   // necessary to produce proper semantics with the big-endian-biased vperm
10201   // instruction.
10202   EVT EltVT = V1.getValueType().getVectorElementType();
10203   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10204 
10205   SmallVector<SDValue, 16> ResultMask;
10206   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10207     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10208 
10209     for (unsigned j = 0; j != BytesPerElement; ++j)
10210       if (isLittleEndian)
10211         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10212                                              dl, MVT::i32));
10213       else
10214         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10215                                              MVT::i32));
10216   }
10217 
10218   ShufflesHandledWithVPERM++;
10219   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10220   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10221   LLVM_DEBUG(SVOp->dump());
10222   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10223   LLVM_DEBUG(VPermMask.dump());
10224 
10225   if (isLittleEndian)
10226     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10227                        V2, V1, VPermMask);
10228   else
10229     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10230                        V1, V2, VPermMask);
10231 }
10232 
10233 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10234 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10235 /// information about the intrinsic.
10236 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10237                                  bool &isDot, const PPCSubtarget &Subtarget) {
10238   unsigned IntrinsicID =
10239       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10240   CompareOpc = -1;
10241   isDot = false;
10242   switch (IntrinsicID) {
10243   default:
10244     return false;
10245   // Comparison predicates.
10246   case Intrinsic::ppc_altivec_vcmpbfp_p:
10247     CompareOpc = 966;
10248     isDot = true;
10249     break;
10250   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10251     CompareOpc = 198;
10252     isDot = true;
10253     break;
10254   case Intrinsic::ppc_altivec_vcmpequb_p:
10255     CompareOpc = 6;
10256     isDot = true;
10257     break;
10258   case Intrinsic::ppc_altivec_vcmpequh_p:
10259     CompareOpc = 70;
10260     isDot = true;
10261     break;
10262   case Intrinsic::ppc_altivec_vcmpequw_p:
10263     CompareOpc = 134;
10264     isDot = true;
10265     break;
10266   case Intrinsic::ppc_altivec_vcmpequd_p:
10267     if (Subtarget.hasP8Altivec()) {
10268       CompareOpc = 199;
10269       isDot = true;
10270     } else
10271       return false;
10272     break;
10273   case Intrinsic::ppc_altivec_vcmpneb_p:
10274   case Intrinsic::ppc_altivec_vcmpneh_p:
10275   case Intrinsic::ppc_altivec_vcmpnew_p:
10276   case Intrinsic::ppc_altivec_vcmpnezb_p:
10277   case Intrinsic::ppc_altivec_vcmpnezh_p:
10278   case Intrinsic::ppc_altivec_vcmpnezw_p:
10279     if (Subtarget.hasP9Altivec()) {
10280       switch (IntrinsicID) {
10281       default:
10282         llvm_unreachable("Unknown comparison intrinsic.");
10283       case Intrinsic::ppc_altivec_vcmpneb_p:
10284         CompareOpc = 7;
10285         break;
10286       case Intrinsic::ppc_altivec_vcmpneh_p:
10287         CompareOpc = 71;
10288         break;
10289       case Intrinsic::ppc_altivec_vcmpnew_p:
10290         CompareOpc = 135;
10291         break;
10292       case Intrinsic::ppc_altivec_vcmpnezb_p:
10293         CompareOpc = 263;
10294         break;
10295       case Intrinsic::ppc_altivec_vcmpnezh_p:
10296         CompareOpc = 327;
10297         break;
10298       case Intrinsic::ppc_altivec_vcmpnezw_p:
10299         CompareOpc = 391;
10300         break;
10301       }
10302       isDot = true;
10303     } else
10304       return false;
10305     break;
10306   case Intrinsic::ppc_altivec_vcmpgefp_p:
10307     CompareOpc = 454;
10308     isDot = true;
10309     break;
10310   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10311     CompareOpc = 710;
10312     isDot = true;
10313     break;
10314   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10315     CompareOpc = 774;
10316     isDot = true;
10317     break;
10318   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10319     CompareOpc = 838;
10320     isDot = true;
10321     break;
10322   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10323     CompareOpc = 902;
10324     isDot = true;
10325     break;
10326   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10327     if (Subtarget.hasP8Altivec()) {
10328       CompareOpc = 967;
10329       isDot = true;
10330     } else
10331       return false;
10332     break;
10333   case Intrinsic::ppc_altivec_vcmpgtub_p:
10334     CompareOpc = 518;
10335     isDot = true;
10336     break;
10337   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10338     CompareOpc = 582;
10339     isDot = true;
10340     break;
10341   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10342     CompareOpc = 646;
10343     isDot = true;
10344     break;
10345   case Intrinsic::ppc_altivec_vcmpgtud_p:
10346     if (Subtarget.hasP8Altivec()) {
10347       CompareOpc = 711;
10348       isDot = true;
10349     } else
10350       return false;
10351     break;
10352 
10353   // VSX predicate comparisons use the same infrastructure
10354   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10355   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10356   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10357   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10358   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10359   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10360     if (Subtarget.hasVSX()) {
10361       switch (IntrinsicID) {
10362       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10363         CompareOpc = 99;
10364         break;
10365       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10366         CompareOpc = 115;
10367         break;
10368       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10369         CompareOpc = 107;
10370         break;
10371       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10372         CompareOpc = 67;
10373         break;
10374       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10375         CompareOpc = 83;
10376         break;
10377       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10378         CompareOpc = 75;
10379         break;
10380       }
10381       isDot = true;
10382     } else
10383       return false;
10384     break;
10385 
10386   // Normal Comparisons.
10387   case Intrinsic::ppc_altivec_vcmpbfp:
10388     CompareOpc = 966;
10389     break;
10390   case Intrinsic::ppc_altivec_vcmpeqfp:
10391     CompareOpc = 198;
10392     break;
10393   case Intrinsic::ppc_altivec_vcmpequb:
10394     CompareOpc = 6;
10395     break;
10396   case Intrinsic::ppc_altivec_vcmpequh:
10397     CompareOpc = 70;
10398     break;
10399   case Intrinsic::ppc_altivec_vcmpequw:
10400     CompareOpc = 134;
10401     break;
10402   case Intrinsic::ppc_altivec_vcmpequd:
10403     if (Subtarget.hasP8Altivec())
10404       CompareOpc = 199;
10405     else
10406       return false;
10407     break;
10408   case Intrinsic::ppc_altivec_vcmpneb:
10409   case Intrinsic::ppc_altivec_vcmpneh:
10410   case Intrinsic::ppc_altivec_vcmpnew:
10411   case Intrinsic::ppc_altivec_vcmpnezb:
10412   case Intrinsic::ppc_altivec_vcmpnezh:
10413   case Intrinsic::ppc_altivec_vcmpnezw:
10414     if (Subtarget.hasP9Altivec())
10415       switch (IntrinsicID) {
10416       default:
10417         llvm_unreachable("Unknown comparison intrinsic.");
10418       case Intrinsic::ppc_altivec_vcmpneb:
10419         CompareOpc = 7;
10420         break;
10421       case Intrinsic::ppc_altivec_vcmpneh:
10422         CompareOpc = 71;
10423         break;
10424       case Intrinsic::ppc_altivec_vcmpnew:
10425         CompareOpc = 135;
10426         break;
10427       case Intrinsic::ppc_altivec_vcmpnezb:
10428         CompareOpc = 263;
10429         break;
10430       case Intrinsic::ppc_altivec_vcmpnezh:
10431         CompareOpc = 327;
10432         break;
10433       case Intrinsic::ppc_altivec_vcmpnezw:
10434         CompareOpc = 391;
10435         break;
10436       }
10437     else
10438       return false;
10439     break;
10440   case Intrinsic::ppc_altivec_vcmpgefp:
10441     CompareOpc = 454;
10442     break;
10443   case Intrinsic::ppc_altivec_vcmpgtfp:
10444     CompareOpc = 710;
10445     break;
10446   case Intrinsic::ppc_altivec_vcmpgtsb:
10447     CompareOpc = 774;
10448     break;
10449   case Intrinsic::ppc_altivec_vcmpgtsh:
10450     CompareOpc = 838;
10451     break;
10452   case Intrinsic::ppc_altivec_vcmpgtsw:
10453     CompareOpc = 902;
10454     break;
10455   case Intrinsic::ppc_altivec_vcmpgtsd:
10456     if (Subtarget.hasP8Altivec())
10457       CompareOpc = 967;
10458     else
10459       return false;
10460     break;
10461   case Intrinsic::ppc_altivec_vcmpgtub:
10462     CompareOpc = 518;
10463     break;
10464   case Intrinsic::ppc_altivec_vcmpgtuh:
10465     CompareOpc = 582;
10466     break;
10467   case Intrinsic::ppc_altivec_vcmpgtuw:
10468     CompareOpc = 646;
10469     break;
10470   case Intrinsic::ppc_altivec_vcmpgtud:
10471     if (Subtarget.hasP8Altivec())
10472       CompareOpc = 711;
10473     else
10474       return false;
10475     break;
10476   }
10477   return true;
10478 }
10479 
10480 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10481 /// lower, do it, otherwise return null.
10482 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10483                                                    SelectionDAG &DAG) const {
10484   unsigned IntrinsicID =
10485     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10486 
10487   SDLoc dl(Op);
10488 
10489   if (IntrinsicID == Intrinsic::thread_pointer) {
10490     // Reads the thread pointer register, used for __builtin_thread_pointer.
10491     if (Subtarget.isPPC64())
10492       return DAG.getRegister(PPC::X13, MVT::i64);
10493     return DAG.getRegister(PPC::R2, MVT::i32);
10494   }
10495 
10496   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10497   // opcode number of the comparison.
10498   int CompareOpc;
10499   bool isDot;
10500   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10501     return SDValue();    // Don't custom lower most intrinsics.
10502 
10503   // If this is a non-dot comparison, make the VCMP node and we are done.
10504   if (!isDot) {
10505     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10506                               Op.getOperand(1), Op.getOperand(2),
10507                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10508     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10509   }
10510 
10511   // Create the PPCISD altivec 'dot' comparison node.
10512   SDValue Ops[] = {
10513     Op.getOperand(2),  // LHS
10514     Op.getOperand(3),  // RHS
10515     DAG.getConstant(CompareOpc, dl, MVT::i32)
10516   };
10517   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10518   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10519 
10520   // Now that we have the comparison, emit a copy from the CR to a GPR.
10521   // This is flagged to the above dot comparison.
10522   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10523                                 DAG.getRegister(PPC::CR6, MVT::i32),
10524                                 CompNode.getValue(1));
10525 
10526   // Unpack the result based on how the target uses it.
10527   unsigned BitNo;   // Bit # of CR6.
10528   bool InvertBit;   // Invert result?
10529   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10530   default:  // Can't happen, don't crash on invalid number though.
10531   case 0:   // Return the value of the EQ bit of CR6.
10532     BitNo = 0; InvertBit = false;
10533     break;
10534   case 1:   // Return the inverted value of the EQ bit of CR6.
10535     BitNo = 0; InvertBit = true;
10536     break;
10537   case 2:   // Return the value of the LT bit of CR6.
10538     BitNo = 2; InvertBit = false;
10539     break;
10540   case 3:   // Return the inverted value of the LT bit of CR6.
10541     BitNo = 2; InvertBit = true;
10542     break;
10543   }
10544 
10545   // Shift the bit into the low position.
10546   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10547                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10548   // Isolate the bit.
10549   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10550                       DAG.getConstant(1, dl, MVT::i32));
10551 
10552   // If we are supposed to, toggle the bit.
10553   if (InvertBit)
10554     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10555                         DAG.getConstant(1, dl, MVT::i32));
10556   return Flags;
10557 }
10558 
10559 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10560                                                SelectionDAG &DAG) const {
10561   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10562   // the beginning of the argument list.
10563   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10564   SDLoc DL(Op);
10565   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10566   case Intrinsic::ppc_cfence: {
10567     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10568     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10569     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10570                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10571                                                   Op.getOperand(ArgStart + 1)),
10572                                       Op.getOperand(0)),
10573                    0);
10574   }
10575   default:
10576     break;
10577   }
10578   return SDValue();
10579 }
10580 
10581 // Lower scalar BSWAP64 to xxbrd.
10582 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10583   SDLoc dl(Op);
10584   // MTVSRDD
10585   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10586                    Op.getOperand(0));
10587   // XXBRD
10588   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10589   // MFVSRD
10590   int VectorIndex = 0;
10591   if (Subtarget.isLittleEndian())
10592     VectorIndex = 1;
10593   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10594                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10595   return Op;
10596 }
10597 
10598 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10599 // compared to a value that is atomically loaded (atomic loads zero-extend).
10600 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10601                                                 SelectionDAG &DAG) const {
10602   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10603          "Expecting an atomic compare-and-swap here.");
10604   SDLoc dl(Op);
10605   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10606   EVT MemVT = AtomicNode->getMemoryVT();
10607   if (MemVT.getSizeInBits() >= 32)
10608     return Op;
10609 
10610   SDValue CmpOp = Op.getOperand(2);
10611   // If this is already correctly zero-extended, leave it alone.
10612   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10613   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10614     return Op;
10615 
10616   // Clear the high bits of the compare operand.
10617   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10618   SDValue NewCmpOp =
10619     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10620                 DAG.getConstant(MaskVal, dl, MVT::i32));
10621 
10622   // Replace the existing compare operand with the properly zero-extended one.
10623   SmallVector<SDValue, 4> Ops;
10624   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10625     Ops.push_back(AtomicNode->getOperand(i));
10626   Ops[2] = NewCmpOp;
10627   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10628   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10629   auto NodeTy =
10630     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10631   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10632 }
10633 
10634 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10635                                                  SelectionDAG &DAG) const {
10636   SDLoc dl(Op);
10637   // Create a stack slot that is 16-byte aligned.
10638   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10639   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10640   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10641   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10642 
10643   // Store the input value into Value#0 of the stack slot.
10644   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10645                                MachinePointerInfo());
10646   // Load it out.
10647   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10648 }
10649 
10650 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10651                                                   SelectionDAG &DAG) const {
10652   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10653          "Should only be called for ISD::INSERT_VECTOR_ELT");
10654 
10655   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10656   // We have legal lowering for constant indices but not for variable ones.
10657   if (!C)
10658     return SDValue();
10659 
10660   EVT VT = Op.getValueType();
10661   SDLoc dl(Op);
10662   SDValue V1 = Op.getOperand(0);
10663   SDValue V2 = Op.getOperand(1);
10664   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10665   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10666     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10667     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10668     unsigned InsertAtElement = C->getZExtValue();
10669     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10670     if (Subtarget.isLittleEndian()) {
10671       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10672     }
10673     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10674                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10675   }
10676   return Op;
10677 }
10678 
10679 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10680                                                    SelectionDAG &DAG) const {
10681   SDLoc dl(Op);
10682   SDNode *N = Op.getNode();
10683 
10684   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
10685          "Unknown extract_vector_elt type");
10686 
10687   SDValue Value = N->getOperand(0);
10688 
10689   // The first part of this is like the store lowering except that we don't
10690   // need to track the chain.
10691 
10692   // The values are now known to be -1 (false) or 1 (true). To convert this
10693   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10694   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10695   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10696 
10697   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10698   // understand how to form the extending load.
10699   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10700 
10701   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10702 
10703   // Now convert to an integer and store.
10704   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10705     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10706     Value);
10707 
10708   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10709   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10710   MachinePointerInfo PtrInfo =
10711       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10712   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10713   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10714 
10715   SDValue StoreChain = DAG.getEntryNode();
10716   SDValue Ops[] = {StoreChain,
10717                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10718                    Value, FIdx};
10719   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10720 
10721   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10722     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10723 
10724   // Extract the value requested.
10725   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10726   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10727   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10728 
10729   SDValue IntVal =
10730       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10731 
10732   if (!Subtarget.useCRBits())
10733     return IntVal;
10734 
10735   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10736 }
10737 
10738 /// Lowering for QPX v4i1 loads
10739 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10740                                            SelectionDAG &DAG) const {
10741   SDLoc dl(Op);
10742   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10743   SDValue LoadChain = LN->getChain();
10744   SDValue BasePtr = LN->getBasePtr();
10745 
10746   if (Op.getValueType() == MVT::v4f64 ||
10747       Op.getValueType() == MVT::v4f32) {
10748     EVT MemVT = LN->getMemoryVT();
10749     unsigned Alignment = LN->getAlignment();
10750 
10751     // If this load is properly aligned, then it is legal.
10752     if (Alignment >= MemVT.getStoreSize())
10753       return Op;
10754 
10755     EVT ScalarVT = Op.getValueType().getScalarType(),
10756         ScalarMemVT = MemVT.getScalarType();
10757     unsigned Stride = ScalarMemVT.getStoreSize();
10758 
10759     SDValue Vals[4], LoadChains[4];
10760     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10761       SDValue Load;
10762       if (ScalarVT != ScalarMemVT)
10763         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10764                               BasePtr,
10765                               LN->getPointerInfo().getWithOffset(Idx * Stride),
10766                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10767                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
10768       else
10769         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10770                            LN->getPointerInfo().getWithOffset(Idx * Stride),
10771                            MinAlign(Alignment, Idx * Stride),
10772                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
10773 
10774       if (Idx == 0 && LN->isIndexed()) {
10775         assert(LN->getAddressingMode() == ISD::PRE_INC &&
10776                "Unknown addressing mode on vector load");
10777         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10778                                   LN->getAddressingMode());
10779       }
10780 
10781       Vals[Idx] = Load;
10782       LoadChains[Idx] = Load.getValue(1);
10783 
10784       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10785                             DAG.getConstant(Stride, dl,
10786                                             BasePtr.getValueType()));
10787     }
10788 
10789     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10790     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10791 
10792     if (LN->isIndexed()) {
10793       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10794       return DAG.getMergeValues(RetOps, dl);
10795     }
10796 
10797     SDValue RetOps[] = { Value, TF };
10798     return DAG.getMergeValues(RetOps, dl);
10799   }
10800 
10801   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10802   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10803 
10804   // To lower v4i1 from a byte array, we load the byte elements of the
10805   // vector and then reuse the BUILD_VECTOR logic.
10806 
10807   SDValue VectElmts[4], VectElmtChains[4];
10808   for (unsigned i = 0; i < 4; ++i) {
10809     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10810     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10811 
10812     VectElmts[i] = DAG.getExtLoad(
10813         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10814         LN->getPointerInfo().getWithOffset(i), MVT::i8,
10815         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10816     VectElmtChains[i] = VectElmts[i].getValue(1);
10817   }
10818 
10819   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10820   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10821 
10822   SDValue RVals[] = { Value, LoadChain };
10823   return DAG.getMergeValues(RVals, dl);
10824 }
10825 
10826 /// Lowering for QPX v4i1 stores
10827 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10828                                             SelectionDAG &DAG) const {
10829   SDLoc dl(Op);
10830   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10831   SDValue StoreChain = SN->getChain();
10832   SDValue BasePtr = SN->getBasePtr();
10833   SDValue Value = SN->getValue();
10834 
10835   if (Value.getValueType() == MVT::v4f64 ||
10836       Value.getValueType() == MVT::v4f32) {
10837     EVT MemVT = SN->getMemoryVT();
10838     unsigned Alignment = SN->getAlignment();
10839 
10840     // If this store is properly aligned, then it is legal.
10841     if (Alignment >= MemVT.getStoreSize())
10842       return Op;
10843 
10844     EVT ScalarVT = Value.getValueType().getScalarType(),
10845         ScalarMemVT = MemVT.getScalarType();
10846     unsigned Stride = ScalarMemVT.getStoreSize();
10847 
10848     SDValue Stores[4];
10849     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10850       SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10851                                DAG.getVectorIdxConstant(Idx, dl));
10852       SDValue Store;
10853       if (ScalarVT != ScalarMemVT)
10854         Store =
10855             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10856                               SN->getPointerInfo().getWithOffset(Idx * Stride),
10857                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10858                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
10859       else
10860         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10861                              SN->getPointerInfo().getWithOffset(Idx * Stride),
10862                              MinAlign(Alignment, Idx * Stride),
10863                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
10864 
10865       if (Idx == 0 && SN->isIndexed()) {
10866         assert(SN->getAddressingMode() == ISD::PRE_INC &&
10867                "Unknown addressing mode on vector store");
10868         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10869                                     SN->getAddressingMode());
10870       }
10871 
10872       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10873                             DAG.getConstant(Stride, dl,
10874                                             BasePtr.getValueType()));
10875       Stores[Idx] = Store;
10876     }
10877 
10878     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10879 
10880     if (SN->isIndexed()) {
10881       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10882       return DAG.getMergeValues(RetOps, dl);
10883     }
10884 
10885     return TF;
10886   }
10887 
10888   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10889   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10890 
10891   // The values are now known to be -1 (false) or 1 (true). To convert this
10892   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10893   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10894   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10895 
10896   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10897   // understand how to form the extending load.
10898   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10899 
10900   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10901 
10902   // Now convert to an integer and store.
10903   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10904     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10905     Value);
10906 
10907   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10908   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10909   MachinePointerInfo PtrInfo =
10910       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10911   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10912   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10913 
10914   SDValue Ops[] = {StoreChain,
10915                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10916                    Value, FIdx};
10917   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10918 
10919   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10920     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10921 
10922   // Move data into the byte array.
10923   SDValue Loads[4], LoadChains[4];
10924   for (unsigned i = 0; i < 4; ++i) {
10925     unsigned Offset = 4*i;
10926     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10927     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10928 
10929     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10930                            PtrInfo.getWithOffset(Offset));
10931     LoadChains[i] = Loads[i].getValue(1);
10932   }
10933 
10934   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10935 
10936   SDValue Stores[4];
10937   for (unsigned i = 0; i < 4; ++i) {
10938     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10939     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10940 
10941     Stores[i] = DAG.getTruncStore(
10942         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10943         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10944         SN->getAAInfo());
10945   }
10946 
10947   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10948 
10949   return StoreChain;
10950 }
10951 
10952 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10953   SDLoc dl(Op);
10954   if (Op.getValueType() == MVT::v4i32) {
10955     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10956 
10957     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10958     // +16 as shift amt.
10959     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10960     SDValue RHSSwap =   // = vrlw RHS, 16
10961       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10962 
10963     // Shrinkify inputs to v8i16.
10964     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10965     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10966     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10967 
10968     // Low parts multiplied together, generating 32-bit results (we ignore the
10969     // top parts).
10970     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10971                                         LHS, RHS, DAG, dl, MVT::v4i32);
10972 
10973     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10974                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10975     // Shift the high parts up 16 bits.
10976     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10977                               Neg16, DAG, dl);
10978     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10979   } else if (Op.getValueType() == MVT::v16i8) {
10980     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10981     bool isLittleEndian = Subtarget.isLittleEndian();
10982 
10983     // Multiply the even 8-bit parts, producing 16-bit sums.
10984     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10985                                            LHS, RHS, DAG, dl, MVT::v8i16);
10986     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10987 
10988     // Multiply the odd 8-bit parts, producing 16-bit sums.
10989     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10990                                           LHS, RHS, DAG, dl, MVT::v8i16);
10991     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10992 
10993     // Merge the results together.  Because vmuleub and vmuloub are
10994     // instructions with a big-endian bias, we must reverse the
10995     // element numbering and reverse the meaning of "odd" and "even"
10996     // when generating little endian code.
10997     int Ops[16];
10998     for (unsigned i = 0; i != 8; ++i) {
10999       if (isLittleEndian) {
11000         Ops[i*2  ] = 2*i;
11001         Ops[i*2+1] = 2*i+16;
11002       } else {
11003         Ops[i*2  ] = 2*i+1;
11004         Ops[i*2+1] = 2*i+1+16;
11005       }
11006     }
11007     if (isLittleEndian)
11008       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
11009     else
11010       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
11011   } else {
11012     llvm_unreachable("Unknown mul to lower!");
11013   }
11014 }
11015 
11016 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
11017 
11018   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
11019 
11020   EVT VT = Op.getValueType();
11021   assert(VT.isVector() &&
11022          "Only set vector abs as custom, scalar abs shouldn't reach here!");
11023   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
11024           VT == MVT::v16i8) &&
11025          "Unexpected vector element type!");
11026   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
11027          "Current subtarget doesn't support smax v2i64!");
11028 
11029   // For vector abs, it can be lowered to:
11030   // abs x
11031   // ==>
11032   // y = -x
11033   // smax(x, y)
11034 
11035   SDLoc dl(Op);
11036   SDValue X = Op.getOperand(0);
11037   SDValue Zero = DAG.getConstant(0, dl, VT);
11038   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
11039 
11040   // SMAX patch https://reviews.llvm.org/D47332
11041   // hasn't landed yet, so use intrinsic first here.
11042   // TODO: Should use SMAX directly once SMAX patch landed
11043   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
11044   if (VT == MVT::v2i64)
11045     BifID = Intrinsic::ppc_altivec_vmaxsd;
11046   else if (VT == MVT::v8i16)
11047     BifID = Intrinsic::ppc_altivec_vmaxsh;
11048   else if (VT == MVT::v16i8)
11049     BifID = Intrinsic::ppc_altivec_vmaxsb;
11050 
11051   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
11052 }
11053 
11054 // Custom lowering for fpext vf32 to v2f64
11055 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
11056 
11057   assert(Op.getOpcode() == ISD::FP_EXTEND &&
11058          "Should only be called for ISD::FP_EXTEND");
11059 
11060   // FIXME: handle extends from half precision float vectors on P9.
11061   // We only want to custom lower an extend from v2f32 to v2f64.
11062   if (Op.getValueType() != MVT::v2f64 ||
11063       Op.getOperand(0).getValueType() != MVT::v2f32)
11064     return SDValue();
11065 
11066   SDLoc dl(Op);
11067   SDValue Op0 = Op.getOperand(0);
11068 
11069   switch (Op0.getOpcode()) {
11070   default:
11071     return SDValue();
11072   case ISD::EXTRACT_SUBVECTOR: {
11073     assert(Op0.getNumOperands() == 2 &&
11074            isa<ConstantSDNode>(Op0->getOperand(1)) &&
11075            "Node should have 2 operands with second one being a constant!");
11076 
11077     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
11078       return SDValue();
11079 
11080     // Custom lower is only done for high or low doubleword.
11081     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
11082     if (Idx % 2 != 0)
11083       return SDValue();
11084 
11085     // Since input is v4f32, at this point Idx is either 0 or 2.
11086     // Shift to get the doubleword position we want.
11087     int DWord = Idx >> 1;
11088 
11089     // High and low word positions are different on little endian.
11090     if (Subtarget.isLittleEndian())
11091       DWord ^= 0x1;
11092 
11093     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
11094                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
11095   }
11096   case ISD::FADD:
11097   case ISD::FMUL:
11098   case ISD::FSUB: {
11099     SDValue NewLoad[2];
11100     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
11101       // Ensure both input are loads.
11102       SDValue LdOp = Op0.getOperand(i);
11103       if (LdOp.getOpcode() != ISD::LOAD)
11104         return SDValue();
11105       // Generate new load node.
11106       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
11107       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11108       NewLoad[i] = DAG.getMemIntrinsicNode(
11109           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11110           LD->getMemoryVT(), LD->getMemOperand());
11111     }
11112     SDValue NewOp =
11113         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
11114                     NewLoad[1], Op0.getNode()->getFlags());
11115     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
11116                        DAG.getConstant(0, dl, MVT::i32));
11117   }
11118   case ISD::LOAD: {
11119     LoadSDNode *LD = cast<LoadSDNode>(Op0);
11120     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11121     SDValue NewLd = DAG.getMemIntrinsicNode(
11122         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11123         LD->getMemoryVT(), LD->getMemOperand());
11124     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11125                        DAG.getConstant(0, dl, MVT::i32));
11126   }
11127   }
11128   llvm_unreachable("ERROR:Should return for all cases within swtich.");
11129 }
11130 
11131 /// LowerOperation - Provide custom lowering hooks for some operations.
11132 ///
11133 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11134   switch (Op.getOpcode()) {
11135   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11136   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
11137   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
11138   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
11139   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
11140   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
11141   case ISD::SETCC:              return LowerSETCC(Op, DAG);
11142   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
11143   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
11144 
11145   // Variable argument lowering.
11146   case ISD::VASTART:            return LowerVASTART(Op, DAG);
11147   case ISD::VAARG:              return LowerVAARG(Op, DAG);
11148   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
11149 
11150   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
11151   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11152   case ISD::GET_DYNAMIC_AREA_OFFSET:
11153     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11154 
11155   // Exception handling lowering.
11156   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
11157   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
11158   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
11159 
11160   case ISD::LOAD:               return LowerLOAD(Op, DAG);
11161   case ISD::STORE:              return LowerSTORE(Op, DAG);
11162   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
11163   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
11164   case ISD::FP_TO_UINT:
11165   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11166   case ISD::UINT_TO_FP:
11167   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
11168   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
11169 
11170   // Lower 64-bit shifts.
11171   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
11172   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
11173   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
11174 
11175   // Vector-related lowering.
11176   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
11177   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
11178   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11179   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
11180   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
11181   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
11182   case ISD::MUL:                return LowerMUL(Op, DAG);
11183   case ISD::ABS:                return LowerABS(Op, DAG);
11184   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
11185   case ISD::ROTL:               return LowerROTL(Op, DAG);
11186 
11187   // For counter-based loop handling.
11188   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
11189 
11190   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
11191 
11192   // Frame & Return address.
11193   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
11194   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
11195 
11196   case ISD::INTRINSIC_VOID:
11197     return LowerINTRINSIC_VOID(Op, DAG);
11198   case ISD::BSWAP:
11199     return LowerBSWAP(Op, DAG);
11200   case ISD::ATOMIC_CMP_SWAP:
11201     return LowerATOMIC_CMP_SWAP(Op, DAG);
11202   }
11203 }
11204 
11205 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
11206                                            SmallVectorImpl<SDValue>&Results,
11207                                            SelectionDAG &DAG) const {
11208   SDLoc dl(N);
11209   switch (N->getOpcode()) {
11210   default:
11211     llvm_unreachable("Do not know how to custom type legalize this operation!");
11212   case ISD::READCYCLECOUNTER: {
11213     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11214     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11215 
11216     Results.push_back(
11217         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11218     Results.push_back(RTB.getValue(2));
11219     break;
11220   }
11221   case ISD::INTRINSIC_W_CHAIN: {
11222     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11223         Intrinsic::loop_decrement)
11224       break;
11225 
11226     assert(N->getValueType(0) == MVT::i1 &&
11227            "Unexpected result type for CTR decrement intrinsic");
11228     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11229                                  N->getValueType(0));
11230     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11231     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11232                                  N->getOperand(1));
11233 
11234     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11235     Results.push_back(NewInt.getValue(1));
11236     break;
11237   }
11238   case ISD::VAARG: {
11239     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11240       return;
11241 
11242     EVT VT = N->getValueType(0);
11243 
11244     if (VT == MVT::i64) {
11245       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11246 
11247       Results.push_back(NewNode);
11248       Results.push_back(NewNode.getValue(1));
11249     }
11250     return;
11251   }
11252   case ISD::FP_TO_SINT:
11253   case ISD::FP_TO_UINT:
11254     // LowerFP_TO_INT() can only handle f32 and f64.
11255     if (N->getOperand(0).getValueType() == MVT::ppcf128)
11256       return;
11257     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11258     return;
11259   case ISD::TRUNCATE: {
11260     EVT TrgVT = N->getValueType(0);
11261     EVT OpVT = N->getOperand(0).getValueType();
11262     if (TrgVT.isVector() &&
11263         isOperationCustom(N->getOpcode(), TrgVT) &&
11264         OpVT.getSizeInBits() <= 128 &&
11265         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
11266       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
11267     return;
11268   }
11269   case ISD::BITCAST:
11270     // Don't handle bitcast here.
11271     return;
11272   case ISD::FP_EXTEND:
11273     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11274     if (Lowered)
11275       Results.push_back(Lowered);
11276     return;
11277   }
11278 }
11279 
11280 //===----------------------------------------------------------------------===//
11281 //  Other Lowering Code
11282 //===----------------------------------------------------------------------===//
11283 
11284 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
11285   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11286   Function *Func = Intrinsic::getDeclaration(M, Id);
11287   return Builder.CreateCall(Func, {});
11288 }
11289 
11290 // The mappings for emitLeading/TrailingFence is taken from
11291 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11292 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
11293                                                  Instruction *Inst,
11294                                                  AtomicOrdering Ord) const {
11295   if (Ord == AtomicOrdering::SequentiallyConsistent)
11296     return callIntrinsic(Builder, Intrinsic::ppc_sync);
11297   if (isReleaseOrStronger(Ord))
11298     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11299   return nullptr;
11300 }
11301 
11302 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
11303                                                   Instruction *Inst,
11304                                                   AtomicOrdering Ord) const {
11305   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11306     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11307     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11308     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11309     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11310       return Builder.CreateCall(
11311           Intrinsic::getDeclaration(
11312               Builder.GetInsertBlock()->getParent()->getParent(),
11313               Intrinsic::ppc_cfence, {Inst->getType()}),
11314           {Inst});
11315     // FIXME: Can use isync for rmw operation.
11316     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11317   }
11318   return nullptr;
11319 }
11320 
11321 MachineBasicBlock *
11322 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11323                                     unsigned AtomicSize,
11324                                     unsigned BinOpcode,
11325                                     unsigned CmpOpcode,
11326                                     unsigned CmpPred) const {
11327   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11328   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11329 
11330   auto LoadMnemonic = PPC::LDARX;
11331   auto StoreMnemonic = PPC::STDCX;
11332   switch (AtomicSize) {
11333   default:
11334     llvm_unreachable("Unexpected size of atomic entity");
11335   case 1:
11336     LoadMnemonic = PPC::LBARX;
11337     StoreMnemonic = PPC::STBCX;
11338     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11339     break;
11340   case 2:
11341     LoadMnemonic = PPC::LHARX;
11342     StoreMnemonic = PPC::STHCX;
11343     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11344     break;
11345   case 4:
11346     LoadMnemonic = PPC::LWARX;
11347     StoreMnemonic = PPC::STWCX;
11348     break;
11349   case 8:
11350     LoadMnemonic = PPC::LDARX;
11351     StoreMnemonic = PPC::STDCX;
11352     break;
11353   }
11354 
11355   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11356   MachineFunction *F = BB->getParent();
11357   MachineFunction::iterator It = ++BB->getIterator();
11358 
11359   Register dest = MI.getOperand(0).getReg();
11360   Register ptrA = MI.getOperand(1).getReg();
11361   Register ptrB = MI.getOperand(2).getReg();
11362   Register incr = MI.getOperand(3).getReg();
11363   DebugLoc dl = MI.getDebugLoc();
11364 
11365   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11366   MachineBasicBlock *loop2MBB =
11367     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11368   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11369   F->insert(It, loopMBB);
11370   if (CmpOpcode)
11371     F->insert(It, loop2MBB);
11372   F->insert(It, exitMBB);
11373   exitMBB->splice(exitMBB->begin(), BB,
11374                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11375   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11376 
11377   MachineRegisterInfo &RegInfo = F->getRegInfo();
11378   Register TmpReg = (!BinOpcode) ? incr :
11379     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11380                                            : &PPC::GPRCRegClass);
11381 
11382   //  thisMBB:
11383   //   ...
11384   //   fallthrough --> loopMBB
11385   BB->addSuccessor(loopMBB);
11386 
11387   //  loopMBB:
11388   //   l[wd]arx dest, ptr
11389   //   add r0, dest, incr
11390   //   st[wd]cx. r0, ptr
11391   //   bne- loopMBB
11392   //   fallthrough --> exitMBB
11393 
11394   // For max/min...
11395   //  loopMBB:
11396   //   l[wd]arx dest, ptr
11397   //   cmpl?[wd] incr, dest
11398   //   bgt exitMBB
11399   //  loop2MBB:
11400   //   st[wd]cx. dest, ptr
11401   //   bne- loopMBB
11402   //   fallthrough --> exitMBB
11403 
11404   BB = loopMBB;
11405   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11406     .addReg(ptrA).addReg(ptrB);
11407   if (BinOpcode)
11408     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11409   if (CmpOpcode) {
11410     // Signed comparisons of byte or halfword values must be sign-extended.
11411     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11412       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11413       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11414               ExtReg).addReg(dest);
11415       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11416         .addReg(incr).addReg(ExtReg);
11417     } else
11418       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11419         .addReg(incr).addReg(dest);
11420 
11421     BuildMI(BB, dl, TII->get(PPC::BCC))
11422       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11423     BB->addSuccessor(loop2MBB);
11424     BB->addSuccessor(exitMBB);
11425     BB = loop2MBB;
11426   }
11427   BuildMI(BB, dl, TII->get(StoreMnemonic))
11428     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11429   BuildMI(BB, dl, TII->get(PPC::BCC))
11430     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11431   BB->addSuccessor(loopMBB);
11432   BB->addSuccessor(exitMBB);
11433 
11434   //  exitMBB:
11435   //   ...
11436   BB = exitMBB;
11437   return BB;
11438 }
11439 
11440 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11441     MachineInstr &MI, MachineBasicBlock *BB,
11442     bool is8bit, // operation
11443     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11444   // If we support part-word atomic mnemonics, just use them
11445   if (Subtarget.hasPartwordAtomics())
11446     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11447                             CmpPred);
11448 
11449   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11450   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11451   // In 64 bit mode we have to use 64 bits for addresses, even though the
11452   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11453   // registers without caring whether they're 32 or 64, but here we're
11454   // doing actual arithmetic on the addresses.
11455   bool is64bit = Subtarget.isPPC64();
11456   bool isLittleEndian = Subtarget.isLittleEndian();
11457   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11458 
11459   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11460   MachineFunction *F = BB->getParent();
11461   MachineFunction::iterator It = ++BB->getIterator();
11462 
11463   Register dest = MI.getOperand(0).getReg();
11464   Register ptrA = MI.getOperand(1).getReg();
11465   Register ptrB = MI.getOperand(2).getReg();
11466   Register incr = MI.getOperand(3).getReg();
11467   DebugLoc dl = MI.getDebugLoc();
11468 
11469   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11470   MachineBasicBlock *loop2MBB =
11471       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11472   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11473   F->insert(It, loopMBB);
11474   if (CmpOpcode)
11475     F->insert(It, loop2MBB);
11476   F->insert(It, exitMBB);
11477   exitMBB->splice(exitMBB->begin(), BB,
11478                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11479   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11480 
11481   MachineRegisterInfo &RegInfo = F->getRegInfo();
11482   const TargetRegisterClass *RC =
11483       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11484   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11485 
11486   Register PtrReg = RegInfo.createVirtualRegister(RC);
11487   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11488   Register ShiftReg =
11489       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11490   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11491   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11492   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11493   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11494   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11495   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11496   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11497   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11498   Register Ptr1Reg;
11499   Register TmpReg =
11500       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11501 
11502   //  thisMBB:
11503   //   ...
11504   //   fallthrough --> loopMBB
11505   BB->addSuccessor(loopMBB);
11506 
11507   // The 4-byte load must be aligned, while a char or short may be
11508   // anywhere in the word.  Hence all this nasty bookkeeping code.
11509   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11510   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11511   //   xori shift, shift1, 24 [16]
11512   //   rlwinm ptr, ptr1, 0, 0, 29
11513   //   slw incr2, incr, shift
11514   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11515   //   slw mask, mask2, shift
11516   //  loopMBB:
11517   //   lwarx tmpDest, ptr
11518   //   add tmp, tmpDest, incr2
11519   //   andc tmp2, tmpDest, mask
11520   //   and tmp3, tmp, mask
11521   //   or tmp4, tmp3, tmp2
11522   //   stwcx. tmp4, ptr
11523   //   bne- loopMBB
11524   //   fallthrough --> exitMBB
11525   //   srw dest, tmpDest, shift
11526   if (ptrA != ZeroReg) {
11527     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11528     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11529         .addReg(ptrA)
11530         .addReg(ptrB);
11531   } else {
11532     Ptr1Reg = ptrB;
11533   }
11534   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11535   // mode.
11536   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11537       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11538       .addImm(3)
11539       .addImm(27)
11540       .addImm(is8bit ? 28 : 27);
11541   if (!isLittleEndian)
11542     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11543         .addReg(Shift1Reg)
11544         .addImm(is8bit ? 24 : 16);
11545   if (is64bit)
11546     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11547         .addReg(Ptr1Reg)
11548         .addImm(0)
11549         .addImm(61);
11550   else
11551     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11552         .addReg(Ptr1Reg)
11553         .addImm(0)
11554         .addImm(0)
11555         .addImm(29);
11556   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11557   if (is8bit)
11558     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11559   else {
11560     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11561     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11562         .addReg(Mask3Reg)
11563         .addImm(65535);
11564   }
11565   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11566       .addReg(Mask2Reg)
11567       .addReg(ShiftReg);
11568 
11569   BB = loopMBB;
11570   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11571       .addReg(ZeroReg)
11572       .addReg(PtrReg);
11573   if (BinOpcode)
11574     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11575         .addReg(Incr2Reg)
11576         .addReg(TmpDestReg);
11577   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11578       .addReg(TmpDestReg)
11579       .addReg(MaskReg);
11580   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11581   if (CmpOpcode) {
11582     // For unsigned comparisons, we can directly compare the shifted values.
11583     // For signed comparisons we shift and sign extend.
11584     Register SReg = RegInfo.createVirtualRegister(GPRC);
11585     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11586         .addReg(TmpDestReg)
11587         .addReg(MaskReg);
11588     unsigned ValueReg = SReg;
11589     unsigned CmpReg = Incr2Reg;
11590     if (CmpOpcode == PPC::CMPW) {
11591       ValueReg = RegInfo.createVirtualRegister(GPRC);
11592       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11593           .addReg(SReg)
11594           .addReg(ShiftReg);
11595       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11596       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11597           .addReg(ValueReg);
11598       ValueReg = ValueSReg;
11599       CmpReg = incr;
11600     }
11601     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11602         .addReg(CmpReg)
11603         .addReg(ValueReg);
11604     BuildMI(BB, dl, TII->get(PPC::BCC))
11605         .addImm(CmpPred)
11606         .addReg(PPC::CR0)
11607         .addMBB(exitMBB);
11608     BB->addSuccessor(loop2MBB);
11609     BB->addSuccessor(exitMBB);
11610     BB = loop2MBB;
11611   }
11612   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11613   BuildMI(BB, dl, TII->get(PPC::STWCX))
11614       .addReg(Tmp4Reg)
11615       .addReg(ZeroReg)
11616       .addReg(PtrReg);
11617   BuildMI(BB, dl, TII->get(PPC::BCC))
11618       .addImm(PPC::PRED_NE)
11619       .addReg(PPC::CR0)
11620       .addMBB(loopMBB);
11621   BB->addSuccessor(loopMBB);
11622   BB->addSuccessor(exitMBB);
11623 
11624   //  exitMBB:
11625   //   ...
11626   BB = exitMBB;
11627   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11628       .addReg(TmpDestReg)
11629       .addReg(ShiftReg);
11630   return BB;
11631 }
11632 
11633 llvm::MachineBasicBlock *
11634 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11635                                     MachineBasicBlock *MBB) const {
11636   DebugLoc DL = MI.getDebugLoc();
11637   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11638   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11639 
11640   MachineFunction *MF = MBB->getParent();
11641   MachineRegisterInfo &MRI = MF->getRegInfo();
11642 
11643   const BasicBlock *BB = MBB->getBasicBlock();
11644   MachineFunction::iterator I = ++MBB->getIterator();
11645 
11646   Register DstReg = MI.getOperand(0).getReg();
11647   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11648   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11649   Register mainDstReg = MRI.createVirtualRegister(RC);
11650   Register restoreDstReg = MRI.createVirtualRegister(RC);
11651 
11652   MVT PVT = getPointerTy(MF->getDataLayout());
11653   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11654          "Invalid Pointer Size!");
11655   // For v = setjmp(buf), we generate
11656   //
11657   // thisMBB:
11658   //  SjLjSetup mainMBB
11659   //  bl mainMBB
11660   //  v_restore = 1
11661   //  b sinkMBB
11662   //
11663   // mainMBB:
11664   //  buf[LabelOffset] = LR
11665   //  v_main = 0
11666   //
11667   // sinkMBB:
11668   //  v = phi(main, restore)
11669   //
11670 
11671   MachineBasicBlock *thisMBB = MBB;
11672   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11673   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11674   MF->insert(I, mainMBB);
11675   MF->insert(I, sinkMBB);
11676 
11677   MachineInstrBuilder MIB;
11678 
11679   // Transfer the remainder of BB and its successor edges to sinkMBB.
11680   sinkMBB->splice(sinkMBB->begin(), MBB,
11681                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11682   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11683 
11684   // Note that the structure of the jmp_buf used here is not compatible
11685   // with that used by libc, and is not designed to be. Specifically, it
11686   // stores only those 'reserved' registers that LLVM does not otherwise
11687   // understand how to spill. Also, by convention, by the time this
11688   // intrinsic is called, Clang has already stored the frame address in the
11689   // first slot of the buffer and stack address in the third. Following the
11690   // X86 target code, we'll store the jump address in the second slot. We also
11691   // need to save the TOC pointer (R2) to handle jumps between shared
11692   // libraries, and that will be stored in the fourth slot. The thread
11693   // identifier (R13) is not affected.
11694 
11695   // thisMBB:
11696   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11697   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11698   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11699 
11700   // Prepare IP either in reg.
11701   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11702   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11703   Register BufReg = MI.getOperand(1).getReg();
11704 
11705   if (Subtarget.is64BitELFABI()) {
11706     setUsesTOCBasePtr(*MBB->getParent());
11707     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11708               .addReg(PPC::X2)
11709               .addImm(TOCOffset)
11710               .addReg(BufReg)
11711               .cloneMemRefs(MI);
11712   }
11713 
11714   // Naked functions never have a base pointer, and so we use r1. For all
11715   // other functions, this decision must be delayed until during PEI.
11716   unsigned BaseReg;
11717   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11718     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11719   else
11720     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11721 
11722   MIB = BuildMI(*thisMBB, MI, DL,
11723                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11724             .addReg(BaseReg)
11725             .addImm(BPOffset)
11726             .addReg(BufReg)
11727             .cloneMemRefs(MI);
11728 
11729   // Setup
11730   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11731   MIB.addRegMask(TRI->getNoPreservedMask());
11732 
11733   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11734 
11735   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11736           .addMBB(mainMBB);
11737   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11738 
11739   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11740   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11741 
11742   // mainMBB:
11743   //  mainDstReg = 0
11744   MIB =
11745       BuildMI(mainMBB, DL,
11746               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11747 
11748   // Store IP
11749   if (Subtarget.isPPC64()) {
11750     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11751             .addReg(LabelReg)
11752             .addImm(LabelOffset)
11753             .addReg(BufReg);
11754   } else {
11755     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11756             .addReg(LabelReg)
11757             .addImm(LabelOffset)
11758             .addReg(BufReg);
11759   }
11760   MIB.cloneMemRefs(MI);
11761 
11762   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11763   mainMBB->addSuccessor(sinkMBB);
11764 
11765   // sinkMBB:
11766   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11767           TII->get(PPC::PHI), DstReg)
11768     .addReg(mainDstReg).addMBB(mainMBB)
11769     .addReg(restoreDstReg).addMBB(thisMBB);
11770 
11771   MI.eraseFromParent();
11772   return sinkMBB;
11773 }
11774 
11775 MachineBasicBlock *
11776 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11777                                      MachineBasicBlock *MBB) const {
11778   DebugLoc DL = MI.getDebugLoc();
11779   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11780 
11781   MachineFunction *MF = MBB->getParent();
11782   MachineRegisterInfo &MRI = MF->getRegInfo();
11783 
11784   MVT PVT = getPointerTy(MF->getDataLayout());
11785   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11786          "Invalid Pointer Size!");
11787 
11788   const TargetRegisterClass *RC =
11789     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11790   Register Tmp = MRI.createVirtualRegister(RC);
11791   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11792   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11793   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11794   unsigned BP =
11795       (PVT == MVT::i64)
11796           ? PPC::X30
11797           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11798                                                               : PPC::R30);
11799 
11800   MachineInstrBuilder MIB;
11801 
11802   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11803   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11804   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11805   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11806 
11807   Register BufReg = MI.getOperand(0).getReg();
11808 
11809   // Reload FP (the jumped-to function may not have had a
11810   // frame pointer, and if so, then its r31 will be restored
11811   // as necessary).
11812   if (PVT == MVT::i64) {
11813     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11814             .addImm(0)
11815             .addReg(BufReg);
11816   } else {
11817     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11818             .addImm(0)
11819             .addReg(BufReg);
11820   }
11821   MIB.cloneMemRefs(MI);
11822 
11823   // Reload IP
11824   if (PVT == MVT::i64) {
11825     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11826             .addImm(LabelOffset)
11827             .addReg(BufReg);
11828   } else {
11829     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11830             .addImm(LabelOffset)
11831             .addReg(BufReg);
11832   }
11833   MIB.cloneMemRefs(MI);
11834 
11835   // Reload SP
11836   if (PVT == MVT::i64) {
11837     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11838             .addImm(SPOffset)
11839             .addReg(BufReg);
11840   } else {
11841     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11842             .addImm(SPOffset)
11843             .addReg(BufReg);
11844   }
11845   MIB.cloneMemRefs(MI);
11846 
11847   // Reload BP
11848   if (PVT == MVT::i64) {
11849     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11850             .addImm(BPOffset)
11851             .addReg(BufReg);
11852   } else {
11853     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11854             .addImm(BPOffset)
11855             .addReg(BufReg);
11856   }
11857   MIB.cloneMemRefs(MI);
11858 
11859   // Reload TOC
11860   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11861     setUsesTOCBasePtr(*MBB->getParent());
11862     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11863               .addImm(TOCOffset)
11864               .addReg(BufReg)
11865               .cloneMemRefs(MI);
11866   }
11867 
11868   // Jump
11869   BuildMI(*MBB, MI, DL,
11870           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11871   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11872 
11873   MI.eraseFromParent();
11874   return MBB;
11875 }
11876 
11877 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11878   // If the function specifically requests inline stack probes, emit them.
11879   if (MF.getFunction().hasFnAttribute("probe-stack"))
11880     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11881            "inline-asm";
11882   return false;
11883 }
11884 
11885 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11886   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11887   unsigned StackAlign = TFI->getStackAlignment();
11888   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11889          "Unexpected stack alignment");
11890   // The default stack probe size is 4096 if the function has no
11891   // stack-probe-size attribute.
11892   unsigned StackProbeSize = 4096;
11893   const Function &Fn = MF.getFunction();
11894   if (Fn.hasFnAttribute("stack-probe-size"))
11895     Fn.getFnAttribute("stack-probe-size")
11896         .getValueAsString()
11897         .getAsInteger(0, StackProbeSize);
11898   // Round down to the stack alignment.
11899   StackProbeSize &= ~(StackAlign - 1);
11900   return StackProbeSize ? StackProbeSize : StackAlign;
11901 }
11902 
11903 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11904 // into three phases. In the first phase, it uses pseudo instruction
11905 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11906 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11907 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11908 // MaxCallFrameSize so that it can calculate correct data area pointer.
11909 MachineBasicBlock *
11910 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11911                                     MachineBasicBlock *MBB) const {
11912   const bool isPPC64 = Subtarget.isPPC64();
11913   MachineFunction *MF = MBB->getParent();
11914   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11915   DebugLoc DL = MI.getDebugLoc();
11916   const unsigned ProbeSize = getStackProbeSize(*MF);
11917   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11918   MachineRegisterInfo &MRI = MF->getRegInfo();
11919   // The CFG of probing stack looks as
11920   //         +-----+
11921   //         | MBB |
11922   //         +--+--+
11923   //            |
11924   //       +----v----+
11925   //  +--->+ TestMBB +---+
11926   //  |    +----+----+   |
11927   //  |         |        |
11928   //  |   +-----v----+   |
11929   //  +---+ BlockMBB |   |
11930   //      +----------+   |
11931   //                     |
11932   //       +---------+   |
11933   //       | TailMBB +<--+
11934   //       +---------+
11935   // In MBB, calculate previous frame pointer and final stack pointer.
11936   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11937   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11938   // TailMBB is spliced via \p MI.
11939   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11940   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11941   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11942 
11943   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11944   MF->insert(MBBIter, TestMBB);
11945   MF->insert(MBBIter, BlockMBB);
11946   MF->insert(MBBIter, TailMBB);
11947 
11948   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11949   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11950 
11951   Register DstReg = MI.getOperand(0).getReg();
11952   Register NegSizeReg = MI.getOperand(1).getReg();
11953   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11954   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11955   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11956   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11957 
11958   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11959   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11960   // NegSize.
11961   unsigned ProbeOpc;
11962   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11963     ProbeOpc =
11964         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11965   else
11966     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11967     // and NegSizeReg will be allocated in the same phyreg to avoid
11968     // redundant copy when NegSizeReg has only one use which is current MI and
11969     // will be replaced by PREPARE_PROBED_ALLOCA then.
11970     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11971                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11972   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11973       .addDef(ActualNegSizeReg)
11974       .addReg(NegSizeReg)
11975       .add(MI.getOperand(2))
11976       .add(MI.getOperand(3));
11977 
11978   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11979   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11980           FinalStackPtr)
11981       .addReg(SPReg)
11982       .addReg(ActualNegSizeReg);
11983 
11984   // Materialize a scratch register for update.
11985   int64_t NegProbeSize = -(int64_t)ProbeSize;
11986   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11987   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11988   if (!isInt<16>(NegProbeSize)) {
11989     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11990     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11991         .addImm(NegProbeSize >> 16);
11992     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11993             ScratchReg)
11994         .addReg(TempReg)
11995         .addImm(NegProbeSize & 0xFFFF);
11996   } else
11997     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11998         .addImm(NegProbeSize);
11999 
12000   {
12001     // Probing leading residual part.
12002     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12003     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
12004         .addReg(ActualNegSizeReg)
12005         .addReg(ScratchReg);
12006     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12007     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
12008         .addReg(Div)
12009         .addReg(ScratchReg);
12010     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12011     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
12012         .addReg(Mul)
12013         .addReg(ActualNegSizeReg);
12014     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12015         .addReg(FramePointer)
12016         .addReg(SPReg)
12017         .addReg(NegMod);
12018   }
12019 
12020   {
12021     // Remaining part should be multiple of ProbeSize.
12022     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
12023     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12024         .addReg(SPReg)
12025         .addReg(FinalStackPtr);
12026     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
12027         .addImm(PPC::PRED_EQ)
12028         .addReg(CmpResult)
12029         .addMBB(TailMBB);
12030     TestMBB->addSuccessor(BlockMBB);
12031     TestMBB->addSuccessor(TailMBB);
12032   }
12033 
12034   {
12035     // Touch the block.
12036     // |P...|P...|P...
12037     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12038         .addReg(FramePointer)
12039         .addReg(SPReg)
12040         .addReg(ScratchReg);
12041     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
12042     BlockMBB->addSuccessor(TestMBB);
12043   }
12044 
12045   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
12046   // DYNAREAOFFSET pseudo instruction to get the future result.
12047   Register MaxCallFrameSizeReg =
12048       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12049   BuildMI(TailMBB, DL,
12050           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12051           MaxCallFrameSizeReg)
12052       .add(MI.getOperand(2))
12053       .add(MI.getOperand(3));
12054   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12055       .addReg(SPReg)
12056       .addReg(MaxCallFrameSizeReg);
12057 
12058   // Splice instructions after MI to TailMBB.
12059   TailMBB->splice(TailMBB->end(), MBB,
12060                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12061   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
12062   MBB->addSuccessor(TestMBB);
12063 
12064   // Delete the pseudo instruction.
12065   MI.eraseFromParent();
12066 
12067   ++NumDynamicAllocaProbed;
12068   return TailMBB;
12069 }
12070 
12071 MachineBasicBlock *
12072 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
12073                                                MachineBasicBlock *BB) const {
12074   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
12075       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12076     if (Subtarget.is64BitELFABI() &&
12077         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12078         !Subtarget.isUsingPCRelativeCalls()) {
12079       // Call lowering should have added an r2 operand to indicate a dependence
12080       // on the TOC base pointer value. It can't however, because there is no
12081       // way to mark the dependence as implicit there, and so the stackmap code
12082       // will confuse it with a regular operand. Instead, add the dependence
12083       // here.
12084       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
12085     }
12086 
12087     return emitPatchPoint(MI, BB);
12088   }
12089 
12090   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12091       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12092     return emitEHSjLjSetJmp(MI, BB);
12093   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12094              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12095     return emitEHSjLjLongJmp(MI, BB);
12096   }
12097 
12098   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12099 
12100   // To "insert" these instructions we actually have to insert their
12101   // control-flow patterns.
12102   const BasicBlock *LLVM_BB = BB->getBasicBlock();
12103   MachineFunction::iterator It = ++BB->getIterator();
12104 
12105   MachineFunction *F = BB->getParent();
12106 
12107   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12108       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
12109       MI.getOpcode() == PPC::SELECT_I8) {
12110     SmallVector<MachineOperand, 2> Cond;
12111     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12112         MI.getOpcode() == PPC::SELECT_CC_I8)
12113       Cond.push_back(MI.getOperand(4));
12114     else
12115       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
12116     Cond.push_back(MI.getOperand(1));
12117 
12118     DebugLoc dl = MI.getDebugLoc();
12119     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12120                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12121   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
12122              MI.getOpcode() == PPC::SELECT_CC_F8 ||
12123              MI.getOpcode() == PPC::SELECT_CC_F16 ||
12124              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
12125              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
12126              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
12127              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
12128              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
12129              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
12130              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
12131              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
12132              MI.getOpcode() == PPC::SELECT_CC_SPE ||
12133              MI.getOpcode() == PPC::SELECT_F4 ||
12134              MI.getOpcode() == PPC::SELECT_F8 ||
12135              MI.getOpcode() == PPC::SELECT_F16 ||
12136              MI.getOpcode() == PPC::SELECT_QFRC ||
12137              MI.getOpcode() == PPC::SELECT_QSRC ||
12138              MI.getOpcode() == PPC::SELECT_QBRC ||
12139              MI.getOpcode() == PPC::SELECT_SPE ||
12140              MI.getOpcode() == PPC::SELECT_SPE4 ||
12141              MI.getOpcode() == PPC::SELECT_VRRC ||
12142              MI.getOpcode() == PPC::SELECT_VSFRC ||
12143              MI.getOpcode() == PPC::SELECT_VSSRC ||
12144              MI.getOpcode() == PPC::SELECT_VSRC) {
12145     // The incoming instruction knows the destination vreg to set, the
12146     // condition code register to branch on, the true/false values to
12147     // select between, and a branch opcode to use.
12148 
12149     //  thisMBB:
12150     //  ...
12151     //   TrueVal = ...
12152     //   cmpTY ccX, r1, r2
12153     //   bCC copy1MBB
12154     //   fallthrough --> copy0MBB
12155     MachineBasicBlock *thisMBB = BB;
12156     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12157     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12158     DebugLoc dl = MI.getDebugLoc();
12159     F->insert(It, copy0MBB);
12160     F->insert(It, sinkMBB);
12161 
12162     // Transfer the remainder of BB and its successor edges to sinkMBB.
12163     sinkMBB->splice(sinkMBB->begin(), BB,
12164                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12165     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12166 
12167     // Next, add the true and fallthrough blocks as its successors.
12168     BB->addSuccessor(copy0MBB);
12169     BB->addSuccessor(sinkMBB);
12170 
12171     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12172         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12173         MI.getOpcode() == PPC::SELECT_F16 ||
12174         MI.getOpcode() == PPC::SELECT_SPE4 ||
12175         MI.getOpcode() == PPC::SELECT_SPE ||
12176         MI.getOpcode() == PPC::SELECT_QFRC ||
12177         MI.getOpcode() == PPC::SELECT_QSRC ||
12178         MI.getOpcode() == PPC::SELECT_QBRC ||
12179         MI.getOpcode() == PPC::SELECT_VRRC ||
12180         MI.getOpcode() == PPC::SELECT_VSFRC ||
12181         MI.getOpcode() == PPC::SELECT_VSSRC ||
12182         MI.getOpcode() == PPC::SELECT_VSRC) {
12183       BuildMI(BB, dl, TII->get(PPC::BC))
12184           .addReg(MI.getOperand(1).getReg())
12185           .addMBB(sinkMBB);
12186     } else {
12187       unsigned SelectPred = MI.getOperand(4).getImm();
12188       BuildMI(BB, dl, TII->get(PPC::BCC))
12189           .addImm(SelectPred)
12190           .addReg(MI.getOperand(1).getReg())
12191           .addMBB(sinkMBB);
12192     }
12193 
12194     //  copy0MBB:
12195     //   %FalseValue = ...
12196     //   # fallthrough to sinkMBB
12197     BB = copy0MBB;
12198 
12199     // Update machine-CFG edges
12200     BB->addSuccessor(sinkMBB);
12201 
12202     //  sinkMBB:
12203     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12204     //  ...
12205     BB = sinkMBB;
12206     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12207         .addReg(MI.getOperand(3).getReg())
12208         .addMBB(copy0MBB)
12209         .addReg(MI.getOperand(2).getReg())
12210         .addMBB(thisMBB);
12211   } else if (MI.getOpcode() == PPC::ReadTB) {
12212     // To read the 64-bit time-base register on a 32-bit target, we read the
12213     // two halves. Should the counter have wrapped while it was being read, we
12214     // need to try again.
12215     // ...
12216     // readLoop:
12217     // mfspr Rx,TBU # load from TBU
12218     // mfspr Ry,TB  # load from TB
12219     // mfspr Rz,TBU # load from TBU
12220     // cmpw crX,Rx,Rz # check if 'old'='new'
12221     // bne readLoop   # branch if they're not equal
12222     // ...
12223 
12224     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12225     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12226     DebugLoc dl = MI.getDebugLoc();
12227     F->insert(It, readMBB);
12228     F->insert(It, sinkMBB);
12229 
12230     // Transfer the remainder of BB and its successor edges to sinkMBB.
12231     sinkMBB->splice(sinkMBB->begin(), BB,
12232                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12233     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12234 
12235     BB->addSuccessor(readMBB);
12236     BB = readMBB;
12237 
12238     MachineRegisterInfo &RegInfo = F->getRegInfo();
12239     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12240     Register LoReg = MI.getOperand(0).getReg();
12241     Register HiReg = MI.getOperand(1).getReg();
12242 
12243     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12244     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12245     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12246 
12247     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12248 
12249     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12250         .addReg(HiReg)
12251         .addReg(ReadAgainReg);
12252     BuildMI(BB, dl, TII->get(PPC::BCC))
12253         .addImm(PPC::PRED_NE)
12254         .addReg(CmpReg)
12255         .addMBB(readMBB);
12256 
12257     BB->addSuccessor(readMBB);
12258     BB->addSuccessor(sinkMBB);
12259   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12260     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12261   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12262     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12263   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12264     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12265   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12266     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12267 
12268   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12269     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12270   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12271     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12272   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12273     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12274   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12275     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12276 
12277   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12278     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12279   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12280     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12281   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12282     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12283   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12284     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12285 
12286   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12287     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12288   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12289     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12290   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12291     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12292   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12293     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12294 
12295   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12296     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12297   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12298     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12299   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12300     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12301   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12302     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12303 
12304   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12305     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12306   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12307     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12308   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12309     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12310   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12311     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12312 
12313   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12314     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12315   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12316     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12317   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12318     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12319   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12320     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12321 
12322   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12323     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12324   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12325     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12326   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12327     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12328   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12329     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12330 
12331   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12332     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12333   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12334     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12335   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12336     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12337   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12338     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12339 
12340   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12341     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12342   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12343     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12344   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12345     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12346   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12347     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12348 
12349   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12350     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12351   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12352     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12353   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12354     BB = EmitAtomicBinary(MI, BB, 4, 0);
12355   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12356     BB = EmitAtomicBinary(MI, BB, 8, 0);
12357   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12358            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12359            (Subtarget.hasPartwordAtomics() &&
12360             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12361            (Subtarget.hasPartwordAtomics() &&
12362             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12363     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12364 
12365     auto LoadMnemonic = PPC::LDARX;
12366     auto StoreMnemonic = PPC::STDCX;
12367     switch (MI.getOpcode()) {
12368     default:
12369       llvm_unreachable("Compare and swap of unknown size");
12370     case PPC::ATOMIC_CMP_SWAP_I8:
12371       LoadMnemonic = PPC::LBARX;
12372       StoreMnemonic = PPC::STBCX;
12373       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12374       break;
12375     case PPC::ATOMIC_CMP_SWAP_I16:
12376       LoadMnemonic = PPC::LHARX;
12377       StoreMnemonic = PPC::STHCX;
12378       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12379       break;
12380     case PPC::ATOMIC_CMP_SWAP_I32:
12381       LoadMnemonic = PPC::LWARX;
12382       StoreMnemonic = PPC::STWCX;
12383       break;
12384     case PPC::ATOMIC_CMP_SWAP_I64:
12385       LoadMnemonic = PPC::LDARX;
12386       StoreMnemonic = PPC::STDCX;
12387       break;
12388     }
12389     Register dest = MI.getOperand(0).getReg();
12390     Register ptrA = MI.getOperand(1).getReg();
12391     Register ptrB = MI.getOperand(2).getReg();
12392     Register oldval = MI.getOperand(3).getReg();
12393     Register newval = MI.getOperand(4).getReg();
12394     DebugLoc dl = MI.getDebugLoc();
12395 
12396     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12397     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12398     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12399     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12400     F->insert(It, loop1MBB);
12401     F->insert(It, loop2MBB);
12402     F->insert(It, midMBB);
12403     F->insert(It, exitMBB);
12404     exitMBB->splice(exitMBB->begin(), BB,
12405                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12406     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12407 
12408     //  thisMBB:
12409     //   ...
12410     //   fallthrough --> loopMBB
12411     BB->addSuccessor(loop1MBB);
12412 
12413     // loop1MBB:
12414     //   l[bhwd]arx dest, ptr
12415     //   cmp[wd] dest, oldval
12416     //   bne- midMBB
12417     // loop2MBB:
12418     //   st[bhwd]cx. newval, ptr
12419     //   bne- loopMBB
12420     //   b exitBB
12421     // midMBB:
12422     //   st[bhwd]cx. dest, ptr
12423     // exitBB:
12424     BB = loop1MBB;
12425     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12426     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12427         .addReg(oldval)
12428         .addReg(dest);
12429     BuildMI(BB, dl, TII->get(PPC::BCC))
12430         .addImm(PPC::PRED_NE)
12431         .addReg(PPC::CR0)
12432         .addMBB(midMBB);
12433     BB->addSuccessor(loop2MBB);
12434     BB->addSuccessor(midMBB);
12435 
12436     BB = loop2MBB;
12437     BuildMI(BB, dl, TII->get(StoreMnemonic))
12438         .addReg(newval)
12439         .addReg(ptrA)
12440         .addReg(ptrB);
12441     BuildMI(BB, dl, TII->get(PPC::BCC))
12442         .addImm(PPC::PRED_NE)
12443         .addReg(PPC::CR0)
12444         .addMBB(loop1MBB);
12445     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12446     BB->addSuccessor(loop1MBB);
12447     BB->addSuccessor(exitMBB);
12448 
12449     BB = midMBB;
12450     BuildMI(BB, dl, TII->get(StoreMnemonic))
12451         .addReg(dest)
12452         .addReg(ptrA)
12453         .addReg(ptrB);
12454     BB->addSuccessor(exitMBB);
12455 
12456     //  exitMBB:
12457     //   ...
12458     BB = exitMBB;
12459   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12460              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12461     // We must use 64-bit registers for addresses when targeting 64-bit,
12462     // since we're actually doing arithmetic on them.  Other registers
12463     // can be 32-bit.
12464     bool is64bit = Subtarget.isPPC64();
12465     bool isLittleEndian = Subtarget.isLittleEndian();
12466     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12467 
12468     Register dest = MI.getOperand(0).getReg();
12469     Register ptrA = MI.getOperand(1).getReg();
12470     Register ptrB = MI.getOperand(2).getReg();
12471     Register oldval = MI.getOperand(3).getReg();
12472     Register newval = MI.getOperand(4).getReg();
12473     DebugLoc dl = MI.getDebugLoc();
12474 
12475     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12476     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12477     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12478     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12479     F->insert(It, loop1MBB);
12480     F->insert(It, loop2MBB);
12481     F->insert(It, midMBB);
12482     F->insert(It, exitMBB);
12483     exitMBB->splice(exitMBB->begin(), BB,
12484                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12485     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12486 
12487     MachineRegisterInfo &RegInfo = F->getRegInfo();
12488     const TargetRegisterClass *RC =
12489         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12490     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12491 
12492     Register PtrReg = RegInfo.createVirtualRegister(RC);
12493     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12494     Register ShiftReg =
12495         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12496     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12497     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12498     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12499     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12500     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12501     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12502     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12503     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12504     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12505     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12506     Register Ptr1Reg;
12507     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12508     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12509     //  thisMBB:
12510     //   ...
12511     //   fallthrough --> loopMBB
12512     BB->addSuccessor(loop1MBB);
12513 
12514     // The 4-byte load must be aligned, while a char or short may be
12515     // anywhere in the word.  Hence all this nasty bookkeeping code.
12516     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12517     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12518     //   xori shift, shift1, 24 [16]
12519     //   rlwinm ptr, ptr1, 0, 0, 29
12520     //   slw newval2, newval, shift
12521     //   slw oldval2, oldval,shift
12522     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12523     //   slw mask, mask2, shift
12524     //   and newval3, newval2, mask
12525     //   and oldval3, oldval2, mask
12526     // loop1MBB:
12527     //   lwarx tmpDest, ptr
12528     //   and tmp, tmpDest, mask
12529     //   cmpw tmp, oldval3
12530     //   bne- midMBB
12531     // loop2MBB:
12532     //   andc tmp2, tmpDest, mask
12533     //   or tmp4, tmp2, newval3
12534     //   stwcx. tmp4, ptr
12535     //   bne- loop1MBB
12536     //   b exitBB
12537     // midMBB:
12538     //   stwcx. tmpDest, ptr
12539     // exitBB:
12540     //   srw dest, tmpDest, shift
12541     if (ptrA != ZeroReg) {
12542       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12543       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12544           .addReg(ptrA)
12545           .addReg(ptrB);
12546     } else {
12547       Ptr1Reg = ptrB;
12548     }
12549 
12550     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12551     // mode.
12552     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12553         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12554         .addImm(3)
12555         .addImm(27)
12556         .addImm(is8bit ? 28 : 27);
12557     if (!isLittleEndian)
12558       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12559           .addReg(Shift1Reg)
12560           .addImm(is8bit ? 24 : 16);
12561     if (is64bit)
12562       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12563           .addReg(Ptr1Reg)
12564           .addImm(0)
12565           .addImm(61);
12566     else
12567       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12568           .addReg(Ptr1Reg)
12569           .addImm(0)
12570           .addImm(0)
12571           .addImm(29);
12572     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12573         .addReg(newval)
12574         .addReg(ShiftReg);
12575     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12576         .addReg(oldval)
12577         .addReg(ShiftReg);
12578     if (is8bit)
12579       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12580     else {
12581       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12582       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12583           .addReg(Mask3Reg)
12584           .addImm(65535);
12585     }
12586     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12587         .addReg(Mask2Reg)
12588         .addReg(ShiftReg);
12589     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12590         .addReg(NewVal2Reg)
12591         .addReg(MaskReg);
12592     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12593         .addReg(OldVal2Reg)
12594         .addReg(MaskReg);
12595 
12596     BB = loop1MBB;
12597     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12598         .addReg(ZeroReg)
12599         .addReg(PtrReg);
12600     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12601         .addReg(TmpDestReg)
12602         .addReg(MaskReg);
12603     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12604         .addReg(TmpReg)
12605         .addReg(OldVal3Reg);
12606     BuildMI(BB, dl, TII->get(PPC::BCC))
12607         .addImm(PPC::PRED_NE)
12608         .addReg(PPC::CR0)
12609         .addMBB(midMBB);
12610     BB->addSuccessor(loop2MBB);
12611     BB->addSuccessor(midMBB);
12612 
12613     BB = loop2MBB;
12614     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12615         .addReg(TmpDestReg)
12616         .addReg(MaskReg);
12617     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12618         .addReg(Tmp2Reg)
12619         .addReg(NewVal3Reg);
12620     BuildMI(BB, dl, TII->get(PPC::STWCX))
12621         .addReg(Tmp4Reg)
12622         .addReg(ZeroReg)
12623         .addReg(PtrReg);
12624     BuildMI(BB, dl, TII->get(PPC::BCC))
12625         .addImm(PPC::PRED_NE)
12626         .addReg(PPC::CR0)
12627         .addMBB(loop1MBB);
12628     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12629     BB->addSuccessor(loop1MBB);
12630     BB->addSuccessor(exitMBB);
12631 
12632     BB = midMBB;
12633     BuildMI(BB, dl, TII->get(PPC::STWCX))
12634         .addReg(TmpDestReg)
12635         .addReg(ZeroReg)
12636         .addReg(PtrReg);
12637     BB->addSuccessor(exitMBB);
12638 
12639     //  exitMBB:
12640     //   ...
12641     BB = exitMBB;
12642     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12643         .addReg(TmpReg)
12644         .addReg(ShiftReg);
12645   } else if (MI.getOpcode() == PPC::FADDrtz) {
12646     // This pseudo performs an FADD with rounding mode temporarily forced
12647     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12648     // is not modeled at the SelectionDAG level.
12649     Register Dest = MI.getOperand(0).getReg();
12650     Register Src1 = MI.getOperand(1).getReg();
12651     Register Src2 = MI.getOperand(2).getReg();
12652     DebugLoc dl = MI.getDebugLoc();
12653 
12654     MachineRegisterInfo &RegInfo = F->getRegInfo();
12655     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12656 
12657     // Save FPSCR value.
12658     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12659 
12660     // Set rounding mode to round-to-zero.
12661     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
12662     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
12663 
12664     // Perform addition.
12665     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
12666 
12667     // Restore FPSCR value.
12668     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12669   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12670              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12671              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12672              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12673     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12674                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12675                           ? PPC::ANDI8_rec
12676                           : PPC::ANDI_rec;
12677     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12678                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12679 
12680     MachineRegisterInfo &RegInfo = F->getRegInfo();
12681     Register Dest = RegInfo.createVirtualRegister(
12682         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12683 
12684     DebugLoc Dl = MI.getDebugLoc();
12685     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12686         .addReg(MI.getOperand(1).getReg())
12687         .addImm(1);
12688     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12689             MI.getOperand(0).getReg())
12690         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12691   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12692     DebugLoc Dl = MI.getDebugLoc();
12693     MachineRegisterInfo &RegInfo = F->getRegInfo();
12694     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12695     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12696     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12697             MI.getOperand(0).getReg())
12698         .addReg(CRReg);
12699   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12700     DebugLoc Dl = MI.getDebugLoc();
12701     unsigned Imm = MI.getOperand(1).getImm();
12702     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12703     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12704             MI.getOperand(0).getReg())
12705         .addReg(PPC::CR0EQ);
12706   } else if (MI.getOpcode() == PPC::SETRNDi) {
12707     DebugLoc dl = MI.getDebugLoc();
12708     Register OldFPSCRReg = MI.getOperand(0).getReg();
12709 
12710     // Save FPSCR value.
12711     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12712 
12713     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12714     // the following settings:
12715     //   00 Round to nearest
12716     //   01 Round to 0
12717     //   10 Round to +inf
12718     //   11 Round to -inf
12719 
12720     // When the operand is immediate, using the two least significant bits of
12721     // the immediate to set the bits 62:63 of FPSCR.
12722     unsigned Mode = MI.getOperand(1).getImm();
12723     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12724       .addImm(31);
12725 
12726     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12727       .addImm(30);
12728   } else if (MI.getOpcode() == PPC::SETRND) {
12729     DebugLoc dl = MI.getDebugLoc();
12730 
12731     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12732     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12733     // If the target doesn't have DirectMove, we should use stack to do the
12734     // conversion, because the target doesn't have the instructions like mtvsrd
12735     // or mfvsrd to do this conversion directly.
12736     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12737       if (Subtarget.hasDirectMove()) {
12738         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12739           .addReg(SrcReg);
12740       } else {
12741         // Use stack to do the register copy.
12742         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12743         MachineRegisterInfo &RegInfo = F->getRegInfo();
12744         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12745         if (RC == &PPC::F8RCRegClass) {
12746           // Copy register from F8RCRegClass to G8RCRegclass.
12747           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12748                  "Unsupported RegClass.");
12749 
12750           StoreOp = PPC::STFD;
12751           LoadOp = PPC::LD;
12752         } else {
12753           // Copy register from G8RCRegClass to F8RCRegclass.
12754           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12755                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12756                  "Unsupported RegClass.");
12757         }
12758 
12759         MachineFrameInfo &MFI = F->getFrameInfo();
12760         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12761 
12762         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12763             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12764             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12765             MFI.getObjectAlign(FrameIdx));
12766 
12767         // Store the SrcReg into the stack.
12768         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12769           .addReg(SrcReg)
12770           .addImm(0)
12771           .addFrameIndex(FrameIdx)
12772           .addMemOperand(MMOStore);
12773 
12774         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12775             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12776             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12777             MFI.getObjectAlign(FrameIdx));
12778 
12779         // Load from the stack where SrcReg is stored, and save to DestReg,
12780         // so we have done the RegClass conversion from RegClass::SrcReg to
12781         // RegClass::DestReg.
12782         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12783           .addImm(0)
12784           .addFrameIndex(FrameIdx)
12785           .addMemOperand(MMOLoad);
12786       }
12787     };
12788 
12789     Register OldFPSCRReg = MI.getOperand(0).getReg();
12790 
12791     // Save FPSCR value.
12792     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12793 
12794     // When the operand is gprc register, use two least significant bits of the
12795     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12796     //
12797     // copy OldFPSCRTmpReg, OldFPSCRReg
12798     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12799     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12800     // copy NewFPSCRReg, NewFPSCRTmpReg
12801     // mtfsf 255, NewFPSCRReg
12802     MachineOperand SrcOp = MI.getOperand(1);
12803     MachineRegisterInfo &RegInfo = F->getRegInfo();
12804     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12805 
12806     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12807 
12808     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12809     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12810 
12811     // The first operand of INSERT_SUBREG should be a register which has
12812     // subregisters, we only care about its RegClass, so we should use an
12813     // IMPLICIT_DEF register.
12814     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12815     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12816       .addReg(ImDefReg)
12817       .add(SrcOp)
12818       .addImm(1);
12819 
12820     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12821     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12822       .addReg(OldFPSCRTmpReg)
12823       .addReg(ExtSrcReg)
12824       .addImm(0)
12825       .addImm(62);
12826 
12827     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12828     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12829 
12830     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12831     // bits of FPSCR.
12832     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12833       .addImm(255)
12834       .addReg(NewFPSCRReg)
12835       .addImm(0)
12836       .addImm(0);
12837   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12838              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12839     return emitProbedAlloca(MI, BB);
12840   } else {
12841     llvm_unreachable("Unexpected instr type to insert");
12842   }
12843 
12844   MI.eraseFromParent(); // The pseudo instruction is gone now.
12845   return BB;
12846 }
12847 
12848 //===----------------------------------------------------------------------===//
12849 // Target Optimization Hooks
12850 //===----------------------------------------------------------------------===//
12851 
12852 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12853   // For the estimates, convergence is quadratic, so we essentially double the
12854   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12855   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12856   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12857   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12858   if (VT.getScalarType() == MVT::f64)
12859     RefinementSteps++;
12860   return RefinementSteps;
12861 }
12862 
12863 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12864                                            int Enabled, int &RefinementSteps,
12865                                            bool &UseOneConstNR,
12866                                            bool Reciprocal) const {
12867   EVT VT = Operand.getValueType();
12868   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12869       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12870       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12871       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12872       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12873       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12874     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12875       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12876 
12877     // The Newton-Raphson computation with a single constant does not provide
12878     // enough accuracy on some CPUs.
12879     UseOneConstNR = !Subtarget.needsTwoConstNR();
12880     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12881   }
12882   return SDValue();
12883 }
12884 
12885 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12886                                             int Enabled,
12887                                             int &RefinementSteps) const {
12888   EVT VT = Operand.getValueType();
12889   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12890       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12891       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12892       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12893       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12894       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12895     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12896       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12897     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12898   }
12899   return SDValue();
12900 }
12901 
12902 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12903   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12904   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12905   // enabled for division), this functionality is redundant with the default
12906   // combiner logic (once the division -> reciprocal/multiply transformation
12907   // has taken place). As a result, this matters more for older cores than for
12908   // newer ones.
12909 
12910   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12911   // reciprocal if there are two or more FDIVs (for embedded cores with only
12912   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12913   switch (Subtarget.getCPUDirective()) {
12914   default:
12915     return 3;
12916   case PPC::DIR_440:
12917   case PPC::DIR_A2:
12918   case PPC::DIR_E500:
12919   case PPC::DIR_E500mc:
12920   case PPC::DIR_E5500:
12921     return 2;
12922   }
12923 }
12924 
12925 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12926 // collapsed, and so we need to look through chains of them.
12927 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12928                                      int64_t& Offset, SelectionDAG &DAG) {
12929   if (DAG.isBaseWithConstantOffset(Loc)) {
12930     Base = Loc.getOperand(0);
12931     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12932 
12933     // The base might itself be a base plus an offset, and if so, accumulate
12934     // that as well.
12935     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12936   }
12937 }
12938 
12939 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12940                             unsigned Bytes, int Dist,
12941                             SelectionDAG &DAG) {
12942   if (VT.getSizeInBits() / 8 != Bytes)
12943     return false;
12944 
12945   SDValue BaseLoc = Base->getBasePtr();
12946   if (Loc.getOpcode() == ISD::FrameIndex) {
12947     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12948       return false;
12949     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12950     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12951     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12952     int FS  = MFI.getObjectSize(FI);
12953     int BFS = MFI.getObjectSize(BFI);
12954     if (FS != BFS || FS != (int)Bytes) return false;
12955     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12956   }
12957 
12958   SDValue Base1 = Loc, Base2 = BaseLoc;
12959   int64_t Offset1 = 0, Offset2 = 0;
12960   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12961   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12962   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12963     return true;
12964 
12965   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12966   const GlobalValue *GV1 = nullptr;
12967   const GlobalValue *GV2 = nullptr;
12968   Offset1 = 0;
12969   Offset2 = 0;
12970   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12971   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12972   if (isGA1 && isGA2 && GV1 == GV2)
12973     return Offset1 == (Offset2 + Dist*Bytes);
12974   return false;
12975 }
12976 
12977 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12978 // not enforce equality of the chain operands.
12979 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12980                             unsigned Bytes, int Dist,
12981                             SelectionDAG &DAG) {
12982   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12983     EVT VT = LS->getMemoryVT();
12984     SDValue Loc = LS->getBasePtr();
12985     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12986   }
12987 
12988   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12989     EVT VT;
12990     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12991     default: return false;
12992     case Intrinsic::ppc_qpx_qvlfd:
12993     case Intrinsic::ppc_qpx_qvlfda:
12994       VT = MVT::v4f64;
12995       break;
12996     case Intrinsic::ppc_qpx_qvlfs:
12997     case Intrinsic::ppc_qpx_qvlfsa:
12998       VT = MVT::v4f32;
12999       break;
13000     case Intrinsic::ppc_qpx_qvlfcd:
13001     case Intrinsic::ppc_qpx_qvlfcda:
13002       VT = MVT::v2f64;
13003       break;
13004     case Intrinsic::ppc_qpx_qvlfcs:
13005     case Intrinsic::ppc_qpx_qvlfcsa:
13006       VT = MVT::v2f32;
13007       break;
13008     case Intrinsic::ppc_qpx_qvlfiwa:
13009     case Intrinsic::ppc_qpx_qvlfiwz:
13010     case Intrinsic::ppc_altivec_lvx:
13011     case Intrinsic::ppc_altivec_lvxl:
13012     case Intrinsic::ppc_vsx_lxvw4x:
13013     case Intrinsic::ppc_vsx_lxvw4x_be:
13014       VT = MVT::v4i32;
13015       break;
13016     case Intrinsic::ppc_vsx_lxvd2x:
13017     case Intrinsic::ppc_vsx_lxvd2x_be:
13018       VT = MVT::v2f64;
13019       break;
13020     case Intrinsic::ppc_altivec_lvebx:
13021       VT = MVT::i8;
13022       break;
13023     case Intrinsic::ppc_altivec_lvehx:
13024       VT = MVT::i16;
13025       break;
13026     case Intrinsic::ppc_altivec_lvewx:
13027       VT = MVT::i32;
13028       break;
13029     }
13030 
13031     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
13032   }
13033 
13034   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
13035     EVT VT;
13036     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13037     default: return false;
13038     case Intrinsic::ppc_qpx_qvstfd:
13039     case Intrinsic::ppc_qpx_qvstfda:
13040       VT = MVT::v4f64;
13041       break;
13042     case Intrinsic::ppc_qpx_qvstfs:
13043     case Intrinsic::ppc_qpx_qvstfsa:
13044       VT = MVT::v4f32;
13045       break;
13046     case Intrinsic::ppc_qpx_qvstfcd:
13047     case Intrinsic::ppc_qpx_qvstfcda:
13048       VT = MVT::v2f64;
13049       break;
13050     case Intrinsic::ppc_qpx_qvstfcs:
13051     case Intrinsic::ppc_qpx_qvstfcsa:
13052       VT = MVT::v2f32;
13053       break;
13054     case Intrinsic::ppc_qpx_qvstfiw:
13055     case Intrinsic::ppc_qpx_qvstfiwa:
13056     case Intrinsic::ppc_altivec_stvx:
13057     case Intrinsic::ppc_altivec_stvxl:
13058     case Intrinsic::ppc_vsx_stxvw4x:
13059       VT = MVT::v4i32;
13060       break;
13061     case Intrinsic::ppc_vsx_stxvd2x:
13062       VT = MVT::v2f64;
13063       break;
13064     case Intrinsic::ppc_vsx_stxvw4x_be:
13065       VT = MVT::v4i32;
13066       break;
13067     case Intrinsic::ppc_vsx_stxvd2x_be:
13068       VT = MVT::v2f64;
13069       break;
13070     case Intrinsic::ppc_altivec_stvebx:
13071       VT = MVT::i8;
13072       break;
13073     case Intrinsic::ppc_altivec_stvehx:
13074       VT = MVT::i16;
13075       break;
13076     case Intrinsic::ppc_altivec_stvewx:
13077       VT = MVT::i32;
13078       break;
13079     }
13080 
13081     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
13082   }
13083 
13084   return false;
13085 }
13086 
13087 // Return true is there is a nearyby consecutive load to the one provided
13088 // (regardless of alignment). We search up and down the chain, looking though
13089 // token factors and other loads (but nothing else). As a result, a true result
13090 // indicates that it is safe to create a new consecutive load adjacent to the
13091 // load provided.
13092 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
13093   SDValue Chain = LD->getChain();
13094   EVT VT = LD->getMemoryVT();
13095 
13096   SmallSet<SDNode *, 16> LoadRoots;
13097   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13098   SmallSet<SDNode *, 16> Visited;
13099 
13100   // First, search up the chain, branching to follow all token-factor operands.
13101   // If we find a consecutive load, then we're done, otherwise, record all
13102   // nodes just above the top-level loads and token factors.
13103   while (!Queue.empty()) {
13104     SDNode *ChainNext = Queue.pop_back_val();
13105     if (!Visited.insert(ChainNext).second)
13106       continue;
13107 
13108     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13109       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13110         return true;
13111 
13112       if (!Visited.count(ChainLD->getChain().getNode()))
13113         Queue.push_back(ChainLD->getChain().getNode());
13114     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13115       for (const SDUse &O : ChainNext->ops())
13116         if (!Visited.count(O.getNode()))
13117           Queue.push_back(O.getNode());
13118     } else
13119       LoadRoots.insert(ChainNext);
13120   }
13121 
13122   // Second, search down the chain, starting from the top-level nodes recorded
13123   // in the first phase. These top-level nodes are the nodes just above all
13124   // loads and token factors. Starting with their uses, recursively look though
13125   // all loads (just the chain uses) and token factors to find a consecutive
13126   // load.
13127   Visited.clear();
13128   Queue.clear();
13129 
13130   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13131        IE = LoadRoots.end(); I != IE; ++I) {
13132     Queue.push_back(*I);
13133 
13134     while (!Queue.empty()) {
13135       SDNode *LoadRoot = Queue.pop_back_val();
13136       if (!Visited.insert(LoadRoot).second)
13137         continue;
13138 
13139       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13140         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13141           return true;
13142 
13143       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13144            UE = LoadRoot->use_end(); UI != UE; ++UI)
13145         if (((isa<MemSDNode>(*UI) &&
13146             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13147             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13148           Queue.push_back(*UI);
13149     }
13150   }
13151 
13152   return false;
13153 }
13154 
13155 /// This function is called when we have proved that a SETCC node can be replaced
13156 /// by subtraction (and other supporting instructions) so that the result of
13157 /// comparison is kept in a GPR instead of CR. This function is purely for
13158 /// codegen purposes and has some flags to guide the codegen process.
13159 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13160                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13161   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13162 
13163   // Zero extend the operands to the largest legal integer. Originally, they
13164   // must be of a strictly smaller size.
13165   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13166                          DAG.getConstant(Size, DL, MVT::i32));
13167   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13168                          DAG.getConstant(Size, DL, MVT::i32));
13169 
13170   // Swap if needed. Depends on the condition code.
13171   if (Swap)
13172     std::swap(Op0, Op1);
13173 
13174   // Subtract extended integers.
13175   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13176 
13177   // Move the sign bit to the least significant position and zero out the rest.
13178   // Now the least significant bit carries the result of original comparison.
13179   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13180                              DAG.getConstant(Size - 1, DL, MVT::i32));
13181   auto Final = Shifted;
13182 
13183   // Complement the result if needed. Based on the condition code.
13184   if (Complement)
13185     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13186                         DAG.getConstant(1, DL, MVT::i64));
13187 
13188   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13189 }
13190 
13191 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13192                                                   DAGCombinerInfo &DCI) const {
13193   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13194 
13195   SelectionDAG &DAG = DCI.DAG;
13196   SDLoc DL(N);
13197 
13198   // Size of integers being compared has a critical role in the following
13199   // analysis, so we prefer to do this when all types are legal.
13200   if (!DCI.isAfterLegalizeDAG())
13201     return SDValue();
13202 
13203   // If all users of SETCC extend its value to a legal integer type
13204   // then we replace SETCC with a subtraction
13205   for (SDNode::use_iterator UI = N->use_begin(),
13206        UE = N->use_end(); UI != UE; ++UI) {
13207     if (UI->getOpcode() != ISD::ZERO_EXTEND)
13208       return SDValue();
13209   }
13210 
13211   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13212   auto OpSize = N->getOperand(0).getValueSizeInBits();
13213 
13214   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13215 
13216   if (OpSize < Size) {
13217     switch (CC) {
13218     default: break;
13219     case ISD::SETULT:
13220       return generateEquivalentSub(N, Size, false, false, DL, DAG);
13221     case ISD::SETULE:
13222       return generateEquivalentSub(N, Size, true, true, DL, DAG);
13223     case ISD::SETUGT:
13224       return generateEquivalentSub(N, Size, false, true, DL, DAG);
13225     case ISD::SETUGE:
13226       return generateEquivalentSub(N, Size, true, false, DL, DAG);
13227     }
13228   }
13229 
13230   return SDValue();
13231 }
13232 
13233 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13234                                                   DAGCombinerInfo &DCI) const {
13235   SelectionDAG &DAG = DCI.DAG;
13236   SDLoc dl(N);
13237 
13238   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13239   // If we're tracking CR bits, we need to be careful that we don't have:
13240   //   trunc(binary-ops(zext(x), zext(y)))
13241   // or
13242   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13243   // such that we're unnecessarily moving things into GPRs when it would be
13244   // better to keep them in CR bits.
13245 
13246   // Note that trunc here can be an actual i1 trunc, or can be the effective
13247   // truncation that comes from a setcc or select_cc.
13248   if (N->getOpcode() == ISD::TRUNCATE &&
13249       N->getValueType(0) != MVT::i1)
13250     return SDValue();
13251 
13252   if (N->getOperand(0).getValueType() != MVT::i32 &&
13253       N->getOperand(0).getValueType() != MVT::i64)
13254     return SDValue();
13255 
13256   if (N->getOpcode() == ISD::SETCC ||
13257       N->getOpcode() == ISD::SELECT_CC) {
13258     // If we're looking at a comparison, then we need to make sure that the
13259     // high bits (all except for the first) don't matter the result.
13260     ISD::CondCode CC =
13261       cast<CondCodeSDNode>(N->getOperand(
13262         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13263     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13264 
13265     if (ISD::isSignedIntSetCC(CC)) {
13266       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13267           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13268         return SDValue();
13269     } else if (ISD::isUnsignedIntSetCC(CC)) {
13270       if (!DAG.MaskedValueIsZero(N->getOperand(0),
13271                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13272           !DAG.MaskedValueIsZero(N->getOperand(1),
13273                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
13274         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13275                                              : SDValue());
13276     } else {
13277       // This is neither a signed nor an unsigned comparison, just make sure
13278       // that the high bits are equal.
13279       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13280       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13281 
13282       // We don't really care about what is known about the first bit (if
13283       // anything), so clear it in all masks prior to comparing them.
13284       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
13285       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
13286 
13287       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
13288         return SDValue();
13289     }
13290   }
13291 
13292   // We now know that the higher-order bits are irrelevant, we just need to
13293   // make sure that all of the intermediate operations are bit operations, and
13294   // all inputs are extensions.
13295   if (N->getOperand(0).getOpcode() != ISD::AND &&
13296       N->getOperand(0).getOpcode() != ISD::OR  &&
13297       N->getOperand(0).getOpcode() != ISD::XOR &&
13298       N->getOperand(0).getOpcode() != ISD::SELECT &&
13299       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13300       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13301       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13302       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13303       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13304     return SDValue();
13305 
13306   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13307       N->getOperand(1).getOpcode() != ISD::AND &&
13308       N->getOperand(1).getOpcode() != ISD::OR  &&
13309       N->getOperand(1).getOpcode() != ISD::XOR &&
13310       N->getOperand(1).getOpcode() != ISD::SELECT &&
13311       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13312       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13313       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13314       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13315       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13316     return SDValue();
13317 
13318   SmallVector<SDValue, 4> Inputs;
13319   SmallVector<SDValue, 8> BinOps, PromOps;
13320   SmallPtrSet<SDNode *, 16> Visited;
13321 
13322   for (unsigned i = 0; i < 2; ++i) {
13323     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13324           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13325           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13326           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13327         isa<ConstantSDNode>(N->getOperand(i)))
13328       Inputs.push_back(N->getOperand(i));
13329     else
13330       BinOps.push_back(N->getOperand(i));
13331 
13332     if (N->getOpcode() == ISD::TRUNCATE)
13333       break;
13334   }
13335 
13336   // Visit all inputs, collect all binary operations (and, or, xor and
13337   // select) that are all fed by extensions.
13338   while (!BinOps.empty()) {
13339     SDValue BinOp = BinOps.back();
13340     BinOps.pop_back();
13341 
13342     if (!Visited.insert(BinOp.getNode()).second)
13343       continue;
13344 
13345     PromOps.push_back(BinOp);
13346 
13347     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13348       // The condition of the select is not promoted.
13349       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13350         continue;
13351       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13352         continue;
13353 
13354       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13355             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13356             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13357            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13358           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13359         Inputs.push_back(BinOp.getOperand(i));
13360       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13361                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13362                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13363                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13364                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13365                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13366                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13367                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13368                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13369         BinOps.push_back(BinOp.getOperand(i));
13370       } else {
13371         // We have an input that is not an extension or another binary
13372         // operation; we'll abort this transformation.
13373         return SDValue();
13374       }
13375     }
13376   }
13377 
13378   // Make sure that this is a self-contained cluster of operations (which
13379   // is not quite the same thing as saying that everything has only one
13380   // use).
13381   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13382     if (isa<ConstantSDNode>(Inputs[i]))
13383       continue;
13384 
13385     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13386                               UE = Inputs[i].getNode()->use_end();
13387          UI != UE; ++UI) {
13388       SDNode *User = *UI;
13389       if (User != N && !Visited.count(User))
13390         return SDValue();
13391 
13392       // Make sure that we're not going to promote the non-output-value
13393       // operand(s) or SELECT or SELECT_CC.
13394       // FIXME: Although we could sometimes handle this, and it does occur in
13395       // practice that one of the condition inputs to the select is also one of
13396       // the outputs, we currently can't deal with this.
13397       if (User->getOpcode() == ISD::SELECT) {
13398         if (User->getOperand(0) == Inputs[i])
13399           return SDValue();
13400       } else if (User->getOpcode() == ISD::SELECT_CC) {
13401         if (User->getOperand(0) == Inputs[i] ||
13402             User->getOperand(1) == Inputs[i])
13403           return SDValue();
13404       }
13405     }
13406   }
13407 
13408   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13409     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13410                               UE = PromOps[i].getNode()->use_end();
13411          UI != UE; ++UI) {
13412       SDNode *User = *UI;
13413       if (User != N && !Visited.count(User))
13414         return SDValue();
13415 
13416       // Make sure that we're not going to promote the non-output-value
13417       // operand(s) or SELECT or SELECT_CC.
13418       // FIXME: Although we could sometimes handle this, and it does occur in
13419       // practice that one of the condition inputs to the select is also one of
13420       // the outputs, we currently can't deal with this.
13421       if (User->getOpcode() == ISD::SELECT) {
13422         if (User->getOperand(0) == PromOps[i])
13423           return SDValue();
13424       } else if (User->getOpcode() == ISD::SELECT_CC) {
13425         if (User->getOperand(0) == PromOps[i] ||
13426             User->getOperand(1) == PromOps[i])
13427           return SDValue();
13428       }
13429     }
13430   }
13431 
13432   // Replace all inputs with the extension operand.
13433   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13434     // Constants may have users outside the cluster of to-be-promoted nodes,
13435     // and so we need to replace those as we do the promotions.
13436     if (isa<ConstantSDNode>(Inputs[i]))
13437       continue;
13438     else
13439       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13440   }
13441 
13442   std::list<HandleSDNode> PromOpHandles;
13443   for (auto &PromOp : PromOps)
13444     PromOpHandles.emplace_back(PromOp);
13445 
13446   // Replace all operations (these are all the same, but have a different
13447   // (i1) return type). DAG.getNode will validate that the types of
13448   // a binary operator match, so go through the list in reverse so that
13449   // we've likely promoted both operands first. Any intermediate truncations or
13450   // extensions disappear.
13451   while (!PromOpHandles.empty()) {
13452     SDValue PromOp = PromOpHandles.back().getValue();
13453     PromOpHandles.pop_back();
13454 
13455     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13456         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13457         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13458         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13459       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13460           PromOp.getOperand(0).getValueType() != MVT::i1) {
13461         // The operand is not yet ready (see comment below).
13462         PromOpHandles.emplace_front(PromOp);
13463         continue;
13464       }
13465 
13466       SDValue RepValue = PromOp.getOperand(0);
13467       if (isa<ConstantSDNode>(RepValue))
13468         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13469 
13470       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13471       continue;
13472     }
13473 
13474     unsigned C;
13475     switch (PromOp.getOpcode()) {
13476     default:             C = 0; break;
13477     case ISD::SELECT:    C = 1; break;
13478     case ISD::SELECT_CC: C = 2; break;
13479     }
13480 
13481     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13482          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13483         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13484          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13485       // The to-be-promoted operands of this node have not yet been
13486       // promoted (this should be rare because we're going through the
13487       // list backward, but if one of the operands has several users in
13488       // this cluster of to-be-promoted nodes, it is possible).
13489       PromOpHandles.emplace_front(PromOp);
13490       continue;
13491     }
13492 
13493     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13494                                 PromOp.getNode()->op_end());
13495 
13496     // If there are any constant inputs, make sure they're replaced now.
13497     for (unsigned i = 0; i < 2; ++i)
13498       if (isa<ConstantSDNode>(Ops[C+i]))
13499         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13500 
13501     DAG.ReplaceAllUsesOfValueWith(PromOp,
13502       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13503   }
13504 
13505   // Now we're left with the initial truncation itself.
13506   if (N->getOpcode() == ISD::TRUNCATE)
13507     return N->getOperand(0);
13508 
13509   // Otherwise, this is a comparison. The operands to be compared have just
13510   // changed type (to i1), but everything else is the same.
13511   return SDValue(N, 0);
13512 }
13513 
13514 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13515                                                   DAGCombinerInfo &DCI) const {
13516   SelectionDAG &DAG = DCI.DAG;
13517   SDLoc dl(N);
13518 
13519   // If we're tracking CR bits, we need to be careful that we don't have:
13520   //   zext(binary-ops(trunc(x), trunc(y)))
13521   // or
13522   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13523   // such that we're unnecessarily moving things into CR bits that can more
13524   // efficiently stay in GPRs. Note that if we're not certain that the high
13525   // bits are set as required by the final extension, we still may need to do
13526   // some masking to get the proper behavior.
13527 
13528   // This same functionality is important on PPC64 when dealing with
13529   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13530   // the return values of functions. Because it is so similar, it is handled
13531   // here as well.
13532 
13533   if (N->getValueType(0) != MVT::i32 &&
13534       N->getValueType(0) != MVT::i64)
13535     return SDValue();
13536 
13537   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13538         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13539     return SDValue();
13540 
13541   if (N->getOperand(0).getOpcode() != ISD::AND &&
13542       N->getOperand(0).getOpcode() != ISD::OR  &&
13543       N->getOperand(0).getOpcode() != ISD::XOR &&
13544       N->getOperand(0).getOpcode() != ISD::SELECT &&
13545       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13546     return SDValue();
13547 
13548   SmallVector<SDValue, 4> Inputs;
13549   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13550   SmallPtrSet<SDNode *, 16> Visited;
13551 
13552   // Visit all inputs, collect all binary operations (and, or, xor and
13553   // select) that are all fed by truncations.
13554   while (!BinOps.empty()) {
13555     SDValue BinOp = BinOps.back();
13556     BinOps.pop_back();
13557 
13558     if (!Visited.insert(BinOp.getNode()).second)
13559       continue;
13560 
13561     PromOps.push_back(BinOp);
13562 
13563     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13564       // The condition of the select is not promoted.
13565       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13566         continue;
13567       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13568         continue;
13569 
13570       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13571           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13572         Inputs.push_back(BinOp.getOperand(i));
13573       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13574                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13575                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13576                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13577                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13578         BinOps.push_back(BinOp.getOperand(i));
13579       } else {
13580         // We have an input that is not a truncation or another binary
13581         // operation; we'll abort this transformation.
13582         return SDValue();
13583       }
13584     }
13585   }
13586 
13587   // The operands of a select that must be truncated when the select is
13588   // promoted because the operand is actually part of the to-be-promoted set.
13589   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13590 
13591   // Make sure that this is a self-contained cluster of operations (which
13592   // is not quite the same thing as saying that everything has only one
13593   // use).
13594   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13595     if (isa<ConstantSDNode>(Inputs[i]))
13596       continue;
13597 
13598     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13599                               UE = Inputs[i].getNode()->use_end();
13600          UI != UE; ++UI) {
13601       SDNode *User = *UI;
13602       if (User != N && !Visited.count(User))
13603         return SDValue();
13604 
13605       // If we're going to promote the non-output-value operand(s) or SELECT or
13606       // SELECT_CC, record them for truncation.
13607       if (User->getOpcode() == ISD::SELECT) {
13608         if (User->getOperand(0) == Inputs[i])
13609           SelectTruncOp[0].insert(std::make_pair(User,
13610                                     User->getOperand(0).getValueType()));
13611       } else if (User->getOpcode() == ISD::SELECT_CC) {
13612         if (User->getOperand(0) == Inputs[i])
13613           SelectTruncOp[0].insert(std::make_pair(User,
13614                                     User->getOperand(0).getValueType()));
13615         if (User->getOperand(1) == Inputs[i])
13616           SelectTruncOp[1].insert(std::make_pair(User,
13617                                     User->getOperand(1).getValueType()));
13618       }
13619     }
13620   }
13621 
13622   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13623     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13624                               UE = PromOps[i].getNode()->use_end();
13625          UI != UE; ++UI) {
13626       SDNode *User = *UI;
13627       if (User != N && !Visited.count(User))
13628         return SDValue();
13629 
13630       // If we're going to promote the non-output-value operand(s) or SELECT or
13631       // SELECT_CC, record them for truncation.
13632       if (User->getOpcode() == ISD::SELECT) {
13633         if (User->getOperand(0) == PromOps[i])
13634           SelectTruncOp[0].insert(std::make_pair(User,
13635                                     User->getOperand(0).getValueType()));
13636       } else if (User->getOpcode() == ISD::SELECT_CC) {
13637         if (User->getOperand(0) == PromOps[i])
13638           SelectTruncOp[0].insert(std::make_pair(User,
13639                                     User->getOperand(0).getValueType()));
13640         if (User->getOperand(1) == PromOps[i])
13641           SelectTruncOp[1].insert(std::make_pair(User,
13642                                     User->getOperand(1).getValueType()));
13643       }
13644     }
13645   }
13646 
13647   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13648   bool ReallyNeedsExt = false;
13649   if (N->getOpcode() != ISD::ANY_EXTEND) {
13650     // If all of the inputs are not already sign/zero extended, then
13651     // we'll still need to do that at the end.
13652     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13653       if (isa<ConstantSDNode>(Inputs[i]))
13654         continue;
13655 
13656       unsigned OpBits =
13657         Inputs[i].getOperand(0).getValueSizeInBits();
13658       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13659 
13660       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13661            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13662                                   APInt::getHighBitsSet(OpBits,
13663                                                         OpBits-PromBits))) ||
13664           (N->getOpcode() == ISD::SIGN_EXTEND &&
13665            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13666              (OpBits-(PromBits-1)))) {
13667         ReallyNeedsExt = true;
13668         break;
13669       }
13670     }
13671   }
13672 
13673   // Replace all inputs, either with the truncation operand, or a
13674   // truncation or extension to the final output type.
13675   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13676     // Constant inputs need to be replaced with the to-be-promoted nodes that
13677     // use them because they might have users outside of the cluster of
13678     // promoted nodes.
13679     if (isa<ConstantSDNode>(Inputs[i]))
13680       continue;
13681 
13682     SDValue InSrc = Inputs[i].getOperand(0);
13683     if (Inputs[i].getValueType() == N->getValueType(0))
13684       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13685     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13686       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13687         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13688     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13689       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13690         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13691     else
13692       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13693         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13694   }
13695 
13696   std::list<HandleSDNode> PromOpHandles;
13697   for (auto &PromOp : PromOps)
13698     PromOpHandles.emplace_back(PromOp);
13699 
13700   // Replace all operations (these are all the same, but have a different
13701   // (promoted) return type). DAG.getNode will validate that the types of
13702   // a binary operator match, so go through the list in reverse so that
13703   // we've likely promoted both operands first.
13704   while (!PromOpHandles.empty()) {
13705     SDValue PromOp = PromOpHandles.back().getValue();
13706     PromOpHandles.pop_back();
13707 
13708     unsigned C;
13709     switch (PromOp.getOpcode()) {
13710     default:             C = 0; break;
13711     case ISD::SELECT:    C = 1; break;
13712     case ISD::SELECT_CC: C = 2; break;
13713     }
13714 
13715     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13716          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13717         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13718          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13719       // The to-be-promoted operands of this node have not yet been
13720       // promoted (this should be rare because we're going through the
13721       // list backward, but if one of the operands has several users in
13722       // this cluster of to-be-promoted nodes, it is possible).
13723       PromOpHandles.emplace_front(PromOp);
13724       continue;
13725     }
13726 
13727     // For SELECT and SELECT_CC nodes, we do a similar check for any
13728     // to-be-promoted comparison inputs.
13729     if (PromOp.getOpcode() == ISD::SELECT ||
13730         PromOp.getOpcode() == ISD::SELECT_CC) {
13731       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13732            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13733           (SelectTruncOp[1].count(PromOp.getNode()) &&
13734            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13735         PromOpHandles.emplace_front(PromOp);
13736         continue;
13737       }
13738     }
13739 
13740     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13741                                 PromOp.getNode()->op_end());
13742 
13743     // If this node has constant inputs, then they'll need to be promoted here.
13744     for (unsigned i = 0; i < 2; ++i) {
13745       if (!isa<ConstantSDNode>(Ops[C+i]))
13746         continue;
13747       if (Ops[C+i].getValueType() == N->getValueType(0))
13748         continue;
13749 
13750       if (N->getOpcode() == ISD::SIGN_EXTEND)
13751         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13752       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13753         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13754       else
13755         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13756     }
13757 
13758     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13759     // truncate them again to the original value type.
13760     if (PromOp.getOpcode() == ISD::SELECT ||
13761         PromOp.getOpcode() == ISD::SELECT_CC) {
13762       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13763       if (SI0 != SelectTruncOp[0].end())
13764         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13765       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13766       if (SI1 != SelectTruncOp[1].end())
13767         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13768     }
13769 
13770     DAG.ReplaceAllUsesOfValueWith(PromOp,
13771       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13772   }
13773 
13774   // Now we're left with the initial extension itself.
13775   if (!ReallyNeedsExt)
13776     return N->getOperand(0);
13777 
13778   // To zero extend, just mask off everything except for the first bit (in the
13779   // i1 case).
13780   if (N->getOpcode() == ISD::ZERO_EXTEND)
13781     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13782                        DAG.getConstant(APInt::getLowBitsSet(
13783                                          N->getValueSizeInBits(0), PromBits),
13784                                        dl, N->getValueType(0)));
13785 
13786   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13787          "Invalid extension type");
13788   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13789   SDValue ShiftCst =
13790       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13791   return DAG.getNode(
13792       ISD::SRA, dl, N->getValueType(0),
13793       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13794       ShiftCst);
13795 }
13796 
13797 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13798                                         DAGCombinerInfo &DCI) const {
13799   assert(N->getOpcode() == ISD::SETCC &&
13800          "Should be called with a SETCC node");
13801 
13802   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13803   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13804     SDValue LHS = N->getOperand(0);
13805     SDValue RHS = N->getOperand(1);
13806 
13807     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13808     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13809         LHS.hasOneUse())
13810       std::swap(LHS, RHS);
13811 
13812     // x == 0-y --> x+y == 0
13813     // x != 0-y --> x+y != 0
13814     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13815         RHS.hasOneUse()) {
13816       SDLoc DL(N);
13817       SelectionDAG &DAG = DCI.DAG;
13818       EVT VT = N->getValueType(0);
13819       EVT OpVT = LHS.getValueType();
13820       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13821       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13822     }
13823   }
13824 
13825   return DAGCombineTruncBoolExt(N, DCI);
13826 }
13827 
13828 // Is this an extending load from an f32 to an f64?
13829 static bool isFPExtLoad(SDValue Op) {
13830   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13831     return LD->getExtensionType() == ISD::EXTLOAD &&
13832       Op.getValueType() == MVT::f64;
13833   return false;
13834 }
13835 
13836 /// Reduces the number of fp-to-int conversion when building a vector.
13837 ///
13838 /// If this vector is built out of floating to integer conversions,
13839 /// transform it to a vector built out of floating point values followed by a
13840 /// single floating to integer conversion of the vector.
13841 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13842 /// becomes (fptosi (build_vector ($A, $B, ...)))
13843 SDValue PPCTargetLowering::
13844 combineElementTruncationToVectorTruncation(SDNode *N,
13845                                            DAGCombinerInfo &DCI) const {
13846   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13847          "Should be called with a BUILD_VECTOR node");
13848 
13849   SelectionDAG &DAG = DCI.DAG;
13850   SDLoc dl(N);
13851 
13852   SDValue FirstInput = N->getOperand(0);
13853   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13854          "The input operand must be an fp-to-int conversion.");
13855 
13856   // This combine happens after legalization so the fp_to_[su]i nodes are
13857   // already converted to PPCSISD nodes.
13858   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13859   if (FirstConversion == PPCISD::FCTIDZ ||
13860       FirstConversion == PPCISD::FCTIDUZ ||
13861       FirstConversion == PPCISD::FCTIWZ ||
13862       FirstConversion == PPCISD::FCTIWUZ) {
13863     bool IsSplat = true;
13864     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13865       FirstConversion == PPCISD::FCTIWUZ;
13866     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13867     SmallVector<SDValue, 4> Ops;
13868     EVT TargetVT = N->getValueType(0);
13869     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13870       SDValue NextOp = N->getOperand(i);
13871       if (NextOp.getOpcode() != PPCISD::MFVSR)
13872         return SDValue();
13873       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13874       if (NextConversion != FirstConversion)
13875         return SDValue();
13876       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13877       // This is not valid if the input was originally double precision. It is
13878       // also not profitable to do unless this is an extending load in which
13879       // case doing this combine will allow us to combine consecutive loads.
13880       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13881         return SDValue();
13882       if (N->getOperand(i) != FirstInput)
13883         IsSplat = false;
13884     }
13885 
13886     // If this is a splat, we leave it as-is since there will be only a single
13887     // fp-to-int conversion followed by a splat of the integer. This is better
13888     // for 32-bit and smaller ints and neutral for 64-bit ints.
13889     if (IsSplat)
13890       return SDValue();
13891 
13892     // Now that we know we have the right type of node, get its operands
13893     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13894       SDValue In = N->getOperand(i).getOperand(0);
13895       if (Is32Bit) {
13896         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13897         // here, we know that all inputs are extending loads so this is safe).
13898         if (In.isUndef())
13899           Ops.push_back(DAG.getUNDEF(SrcVT));
13900         else {
13901           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13902                                       MVT::f32, In.getOperand(0),
13903                                       DAG.getIntPtrConstant(1, dl));
13904           Ops.push_back(Trunc);
13905         }
13906       } else
13907         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13908     }
13909 
13910     unsigned Opcode;
13911     if (FirstConversion == PPCISD::FCTIDZ ||
13912         FirstConversion == PPCISD::FCTIWZ)
13913       Opcode = ISD::FP_TO_SINT;
13914     else
13915       Opcode = ISD::FP_TO_UINT;
13916 
13917     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13918     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13919     return DAG.getNode(Opcode, dl, TargetVT, BV);
13920   }
13921   return SDValue();
13922 }
13923 
13924 /// Reduce the number of loads when building a vector.
13925 ///
13926 /// Building a vector out of multiple loads can be converted to a load
13927 /// of the vector type if the loads are consecutive. If the loads are
13928 /// consecutive but in descending order, a shuffle is added at the end
13929 /// to reorder the vector.
13930 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13931   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13932          "Should be called with a BUILD_VECTOR node");
13933 
13934   SDLoc dl(N);
13935 
13936   // Return early for non byte-sized type, as they can't be consecutive.
13937   if (!N->getValueType(0).getVectorElementType().isByteSized())
13938     return SDValue();
13939 
13940   bool InputsAreConsecutiveLoads = true;
13941   bool InputsAreReverseConsecutive = true;
13942   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13943   SDValue FirstInput = N->getOperand(0);
13944   bool IsRoundOfExtLoad = false;
13945 
13946   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13947       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13948     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13949     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13950   }
13951   // Not a build vector of (possibly fp_rounded) loads.
13952   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13953       N->getNumOperands() == 1)
13954     return SDValue();
13955 
13956   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13957     // If any inputs are fp_round(extload), they all must be.
13958     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13959       return SDValue();
13960 
13961     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13962       N->getOperand(i);
13963     if (NextInput.getOpcode() != ISD::LOAD)
13964       return SDValue();
13965 
13966     SDValue PreviousInput =
13967       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13968     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13969     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13970 
13971     // If any inputs are fp_round(extload), they all must be.
13972     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13973       return SDValue();
13974 
13975     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13976       InputsAreConsecutiveLoads = false;
13977     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13978       InputsAreReverseConsecutive = false;
13979 
13980     // Exit early if the loads are neither consecutive nor reverse consecutive.
13981     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13982       return SDValue();
13983   }
13984 
13985   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13986          "The loads cannot be both consecutive and reverse consecutive.");
13987 
13988   SDValue FirstLoadOp =
13989     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13990   SDValue LastLoadOp =
13991     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13992                        N->getOperand(N->getNumOperands()-1);
13993 
13994   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13995   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13996   if (InputsAreConsecutiveLoads) {
13997     assert(LD1 && "Input needs to be a LoadSDNode.");
13998     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13999                        LD1->getBasePtr(), LD1->getPointerInfo(),
14000                        LD1->getAlignment());
14001   }
14002   if (InputsAreReverseConsecutive) {
14003     assert(LDL && "Input needs to be a LoadSDNode.");
14004     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
14005                                LDL->getBasePtr(), LDL->getPointerInfo(),
14006                                LDL->getAlignment());
14007     SmallVector<int, 16> Ops;
14008     for (int i = N->getNumOperands() - 1; i >= 0; i--)
14009       Ops.push_back(i);
14010 
14011     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
14012                                 DAG.getUNDEF(N->getValueType(0)), Ops);
14013   }
14014   return SDValue();
14015 }
14016 
14017 // This function adds the required vector_shuffle needed to get
14018 // the elements of the vector extract in the correct position
14019 // as specified by the CorrectElems encoding.
14020 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
14021                                       SDValue Input, uint64_t Elems,
14022                                       uint64_t CorrectElems) {
14023   SDLoc dl(N);
14024 
14025   unsigned NumElems = Input.getValueType().getVectorNumElements();
14026   SmallVector<int, 16> ShuffleMask(NumElems, -1);
14027 
14028   // Knowing the element indices being extracted from the original
14029   // vector and the order in which they're being inserted, just put
14030   // them at element indices required for the instruction.
14031   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14032     if (DAG.getDataLayout().isLittleEndian())
14033       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14034     else
14035       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14036     CorrectElems = CorrectElems >> 8;
14037     Elems = Elems >> 8;
14038   }
14039 
14040   SDValue Shuffle =
14041       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
14042                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
14043 
14044   EVT VT = N->getValueType(0);
14045   SDValue Conv = DAG.getBitcast(VT, Shuffle);
14046 
14047   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
14048                                Input.getValueType().getVectorElementType(),
14049                                VT.getVectorNumElements());
14050   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
14051                      DAG.getValueType(ExtVT));
14052 }
14053 
14054 // Look for build vector patterns where input operands come from sign
14055 // extended vector_extract elements of specific indices. If the correct indices
14056 // aren't used, add a vector shuffle to fix up the indices and create
14057 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
14058 // during instruction selection.
14059 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
14060   // This array encodes the indices that the vector sign extend instructions
14061   // extract from when extending from one type to another for both BE and LE.
14062   // The right nibble of each byte corresponds to the LE incides.
14063   // and the left nibble of each byte corresponds to the BE incides.
14064   // For example: 0x3074B8FC  byte->word
14065   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
14066   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
14067   // For example: 0x000070F8  byte->double word
14068   // For LE: the allowed indices are: 0x0,0x8
14069   // For BE: the allowed indices are: 0x7,0xF
14070   uint64_t TargetElems[] = {
14071       0x3074B8FC, // b->w
14072       0x000070F8, // b->d
14073       0x10325476, // h->w
14074       0x00003074, // h->d
14075       0x00001032, // w->d
14076   };
14077 
14078   uint64_t Elems = 0;
14079   int Index;
14080   SDValue Input;
14081 
14082   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
14083     if (!Op)
14084       return false;
14085     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
14086         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
14087       return false;
14088 
14089     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14090     // of the right width.
14091     SDValue Extract = Op.getOperand(0);
14092     if (Extract.getOpcode() == ISD::ANY_EXTEND)
14093       Extract = Extract.getOperand(0);
14094     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14095       return false;
14096 
14097     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14098     if (!ExtOp)
14099       return false;
14100 
14101     Index = ExtOp->getZExtValue();
14102     if (Input && Input != Extract.getOperand(0))
14103       return false;
14104 
14105     if (!Input)
14106       Input = Extract.getOperand(0);
14107 
14108     Elems = Elems << 8;
14109     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14110     Elems |= Index;
14111 
14112     return true;
14113   };
14114 
14115   // If the build vector operands aren't sign extended vector extracts,
14116   // of the same input vector, then return.
14117   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14118     if (!isSExtOfVecExtract(N->getOperand(i))) {
14119       return SDValue();
14120     }
14121   }
14122 
14123   // If the vector extract indicies are not correct, add the appropriate
14124   // vector_shuffle.
14125   int TgtElemArrayIdx;
14126   int InputSize = Input.getValueType().getScalarSizeInBits();
14127   int OutputSize = N->getValueType(0).getScalarSizeInBits();
14128   if (InputSize + OutputSize == 40)
14129     TgtElemArrayIdx = 0;
14130   else if (InputSize + OutputSize == 72)
14131     TgtElemArrayIdx = 1;
14132   else if (InputSize + OutputSize == 48)
14133     TgtElemArrayIdx = 2;
14134   else if (InputSize + OutputSize == 80)
14135     TgtElemArrayIdx = 3;
14136   else if (InputSize + OutputSize == 96)
14137     TgtElemArrayIdx = 4;
14138   else
14139     return SDValue();
14140 
14141   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14142   CorrectElems = DAG.getDataLayout().isLittleEndian()
14143                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14144                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14145   if (Elems != CorrectElems) {
14146     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14147   }
14148 
14149   // Regular lowering will catch cases where a shuffle is not needed.
14150   return SDValue();
14151 }
14152 
14153 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14154                                                  DAGCombinerInfo &DCI) const {
14155   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14156          "Should be called with a BUILD_VECTOR node");
14157 
14158   SelectionDAG &DAG = DCI.DAG;
14159   SDLoc dl(N);
14160 
14161   if (!Subtarget.hasVSX())
14162     return SDValue();
14163 
14164   // The target independent DAG combiner will leave a build_vector of
14165   // float-to-int conversions intact. We can generate MUCH better code for
14166   // a float-to-int conversion of a vector of floats.
14167   SDValue FirstInput = N->getOperand(0);
14168   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14169     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14170     if (Reduced)
14171       return Reduced;
14172   }
14173 
14174   // If we're building a vector out of consecutive loads, just load that
14175   // vector type.
14176   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14177   if (Reduced)
14178     return Reduced;
14179 
14180   // If we're building a vector out of extended elements from another vector
14181   // we have P9 vector integer extend instructions. The code assumes legal
14182   // input types (i.e. it can't handle things like v4i16) so do not run before
14183   // legalization.
14184   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14185     Reduced = combineBVOfVecSExt(N, DAG);
14186     if (Reduced)
14187       return Reduced;
14188   }
14189 
14190 
14191   if (N->getValueType(0) != MVT::v2f64)
14192     return SDValue();
14193 
14194   // Looking for:
14195   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14196   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14197       FirstInput.getOpcode() != ISD::UINT_TO_FP)
14198     return SDValue();
14199   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14200       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14201     return SDValue();
14202   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14203     return SDValue();
14204 
14205   SDValue Ext1 = FirstInput.getOperand(0);
14206   SDValue Ext2 = N->getOperand(1).getOperand(0);
14207   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14208      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14209     return SDValue();
14210 
14211   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14212   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14213   if (!Ext1Op || !Ext2Op)
14214     return SDValue();
14215   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14216       Ext1.getOperand(0) != Ext2.getOperand(0))
14217     return SDValue();
14218 
14219   int FirstElem = Ext1Op->getZExtValue();
14220   int SecondElem = Ext2Op->getZExtValue();
14221   int SubvecIdx;
14222   if (FirstElem == 0 && SecondElem == 1)
14223     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14224   else if (FirstElem == 2 && SecondElem == 3)
14225     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14226   else
14227     return SDValue();
14228 
14229   SDValue SrcVec = Ext1.getOperand(0);
14230   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14231     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14232   return DAG.getNode(NodeType, dl, MVT::v2f64,
14233                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14234 }
14235 
14236 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14237                                               DAGCombinerInfo &DCI) const {
14238   assert((N->getOpcode() == ISD::SINT_TO_FP ||
14239           N->getOpcode() == ISD::UINT_TO_FP) &&
14240          "Need an int -> FP conversion node here");
14241 
14242   if (useSoftFloat() || !Subtarget.has64BitSupport())
14243     return SDValue();
14244 
14245   SelectionDAG &DAG = DCI.DAG;
14246   SDLoc dl(N);
14247   SDValue Op(N, 0);
14248 
14249   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14250   // from the hardware.
14251   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14252     return SDValue();
14253   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14254       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14255     return SDValue();
14256 
14257   SDValue FirstOperand(Op.getOperand(0));
14258   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14259     (FirstOperand.getValueType() == MVT::i8 ||
14260      FirstOperand.getValueType() == MVT::i16);
14261   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14262     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14263     bool DstDouble = Op.getValueType() == MVT::f64;
14264     unsigned ConvOp = Signed ?
14265       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14266       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14267     SDValue WidthConst =
14268       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14269                             dl, false);
14270     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14271     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14272     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14273                                          DAG.getVTList(MVT::f64, MVT::Other),
14274                                          Ops, MVT::i8, LDN->getMemOperand());
14275 
14276     // For signed conversion, we need to sign-extend the value in the VSR
14277     if (Signed) {
14278       SDValue ExtOps[] = { Ld, WidthConst };
14279       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14280       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14281     } else
14282       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14283   }
14284 
14285 
14286   // For i32 intermediate values, unfortunately, the conversion functions
14287   // leave the upper 32 bits of the value are undefined. Within the set of
14288   // scalar instructions, we have no method for zero- or sign-extending the
14289   // value. Thus, we cannot handle i32 intermediate values here.
14290   if (Op.getOperand(0).getValueType() == MVT::i32)
14291     return SDValue();
14292 
14293   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14294          "UINT_TO_FP is supported only with FPCVT");
14295 
14296   // If we have FCFIDS, then use it when converting to single-precision.
14297   // Otherwise, convert to double-precision and then round.
14298   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14299                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14300                                                             : PPCISD::FCFIDS)
14301                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14302                                                             : PPCISD::FCFID);
14303   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14304                   ? MVT::f32
14305                   : MVT::f64;
14306 
14307   // If we're converting from a float, to an int, and back to a float again,
14308   // then we don't need the store/load pair at all.
14309   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14310        Subtarget.hasFPCVT()) ||
14311       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14312     SDValue Src = Op.getOperand(0).getOperand(0);
14313     if (Src.getValueType() == MVT::f32) {
14314       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14315       DCI.AddToWorklist(Src.getNode());
14316     } else if (Src.getValueType() != MVT::f64) {
14317       // Make sure that we don't pick up a ppc_fp128 source value.
14318       return SDValue();
14319     }
14320 
14321     unsigned FCTOp =
14322       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14323                                                         PPCISD::FCTIDUZ;
14324 
14325     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14326     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14327 
14328     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14329       FP = DAG.getNode(ISD::FP_ROUND, dl,
14330                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14331       DCI.AddToWorklist(FP.getNode());
14332     }
14333 
14334     return FP;
14335   }
14336 
14337   return SDValue();
14338 }
14339 
14340 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14341 // builtins) into loads with swaps.
14342 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14343                                               DAGCombinerInfo &DCI) const {
14344   SelectionDAG &DAG = DCI.DAG;
14345   SDLoc dl(N);
14346   SDValue Chain;
14347   SDValue Base;
14348   MachineMemOperand *MMO;
14349 
14350   switch (N->getOpcode()) {
14351   default:
14352     llvm_unreachable("Unexpected opcode for little endian VSX load");
14353   case ISD::LOAD: {
14354     LoadSDNode *LD = cast<LoadSDNode>(N);
14355     Chain = LD->getChain();
14356     Base = LD->getBasePtr();
14357     MMO = LD->getMemOperand();
14358     // If the MMO suggests this isn't a load of a full vector, leave
14359     // things alone.  For a built-in, we have to make the change for
14360     // correctness, so if there is a size problem that will be a bug.
14361     if (MMO->getSize() < 16)
14362       return SDValue();
14363     break;
14364   }
14365   case ISD::INTRINSIC_W_CHAIN: {
14366     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14367     Chain = Intrin->getChain();
14368     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14369     // us what we want. Get operand 2 instead.
14370     Base = Intrin->getOperand(2);
14371     MMO = Intrin->getMemOperand();
14372     break;
14373   }
14374   }
14375 
14376   MVT VecTy = N->getValueType(0).getSimpleVT();
14377 
14378   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14379   // aligned and the type is a vector with elements up to 4 bytes
14380   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14381       VecTy.getScalarSizeInBits() <= 32) {
14382     return SDValue();
14383   }
14384 
14385   SDValue LoadOps[] = { Chain, Base };
14386   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14387                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14388                                          LoadOps, MVT::v2f64, MMO);
14389 
14390   DCI.AddToWorklist(Load.getNode());
14391   Chain = Load.getValue(1);
14392   SDValue Swap = DAG.getNode(
14393       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14394   DCI.AddToWorklist(Swap.getNode());
14395 
14396   // Add a bitcast if the resulting load type doesn't match v2f64.
14397   if (VecTy != MVT::v2f64) {
14398     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14399     DCI.AddToWorklist(N.getNode());
14400     // Package {bitcast value, swap's chain} to match Load's shape.
14401     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14402                        N, Swap.getValue(1));
14403   }
14404 
14405   return Swap;
14406 }
14407 
14408 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14409 // builtins) into stores with swaps.
14410 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14411                                                DAGCombinerInfo &DCI) const {
14412   SelectionDAG &DAG = DCI.DAG;
14413   SDLoc dl(N);
14414   SDValue Chain;
14415   SDValue Base;
14416   unsigned SrcOpnd;
14417   MachineMemOperand *MMO;
14418 
14419   switch (N->getOpcode()) {
14420   default:
14421     llvm_unreachable("Unexpected opcode for little endian VSX store");
14422   case ISD::STORE: {
14423     StoreSDNode *ST = cast<StoreSDNode>(N);
14424     Chain = ST->getChain();
14425     Base = ST->getBasePtr();
14426     MMO = ST->getMemOperand();
14427     SrcOpnd = 1;
14428     // If the MMO suggests this isn't a store of a full vector, leave
14429     // things alone.  For a built-in, we have to make the change for
14430     // correctness, so if there is a size problem that will be a bug.
14431     if (MMO->getSize() < 16)
14432       return SDValue();
14433     break;
14434   }
14435   case ISD::INTRINSIC_VOID: {
14436     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14437     Chain = Intrin->getChain();
14438     // Intrin->getBasePtr() oddly does not get what we want.
14439     Base = Intrin->getOperand(3);
14440     MMO = Intrin->getMemOperand();
14441     SrcOpnd = 2;
14442     break;
14443   }
14444   }
14445 
14446   SDValue Src = N->getOperand(SrcOpnd);
14447   MVT VecTy = Src.getValueType().getSimpleVT();
14448 
14449   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14450   // aligned and the type is a vector with elements up to 4 bytes
14451   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14452       VecTy.getScalarSizeInBits() <= 32) {
14453     return SDValue();
14454   }
14455 
14456   // All stores are done as v2f64 and possible bit cast.
14457   if (VecTy != MVT::v2f64) {
14458     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14459     DCI.AddToWorklist(Src.getNode());
14460   }
14461 
14462   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14463                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14464   DCI.AddToWorklist(Swap.getNode());
14465   Chain = Swap.getValue(1);
14466   SDValue StoreOps[] = { Chain, Swap, Base };
14467   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14468                                           DAG.getVTList(MVT::Other),
14469                                           StoreOps, VecTy, MMO);
14470   DCI.AddToWorklist(Store.getNode());
14471   return Store;
14472 }
14473 
14474 // Handle DAG combine for STORE (FP_TO_INT F).
14475 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14476                                                DAGCombinerInfo &DCI) const {
14477 
14478   SelectionDAG &DAG = DCI.DAG;
14479   SDLoc dl(N);
14480   unsigned Opcode = N->getOperand(1).getOpcode();
14481 
14482   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14483          && "Not a FP_TO_INT Instruction!");
14484 
14485   SDValue Val = N->getOperand(1).getOperand(0);
14486   EVT Op1VT = N->getOperand(1).getValueType();
14487   EVT ResVT = Val.getValueType();
14488 
14489   // Floating point types smaller than 32 bits are not legal on Power.
14490   if (ResVT.getScalarSizeInBits() < 32)
14491     return SDValue();
14492 
14493   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14494   bool ValidTypeForStoreFltAsInt =
14495         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14496          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14497 
14498   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14499       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14500     return SDValue();
14501 
14502   // Extend f32 values to f64
14503   if (ResVT.getScalarSizeInBits() == 32) {
14504     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14505     DCI.AddToWorklist(Val.getNode());
14506   }
14507 
14508   // Set signed or unsigned conversion opcode.
14509   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14510                           PPCISD::FP_TO_SINT_IN_VSR :
14511                           PPCISD::FP_TO_UINT_IN_VSR;
14512 
14513   Val = DAG.getNode(ConvOpcode,
14514                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14515   DCI.AddToWorklist(Val.getNode());
14516 
14517   // Set number of bytes being converted.
14518   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14519   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14520                     DAG.getIntPtrConstant(ByteSize, dl, false),
14521                     DAG.getValueType(Op1VT) };
14522 
14523   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14524           DAG.getVTList(MVT::Other), Ops,
14525           cast<StoreSDNode>(N)->getMemoryVT(),
14526           cast<StoreSDNode>(N)->getMemOperand());
14527 
14528   DCI.AddToWorklist(Val.getNode());
14529   return Val;
14530 }
14531 
14532 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14533   // Check that the source of the element keeps flipping
14534   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14535   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14536   for (int i = 1, e = Mask.size(); i < e; i++) {
14537     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14538       return false;
14539     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14540       return false;
14541     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14542   }
14543   return true;
14544 }
14545 
14546 static bool isSplatBV(SDValue Op) {
14547   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14548     return false;
14549   SDValue FirstOp;
14550 
14551   // Find first non-undef input.
14552   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14553     FirstOp = Op.getOperand(i);
14554     if (!FirstOp.isUndef())
14555       break;
14556   }
14557 
14558   // All inputs are undef or the same as the first non-undef input.
14559   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14560     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14561       return false;
14562   return true;
14563 }
14564 
14565 static SDValue isScalarToVec(SDValue Op) {
14566   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14567     return Op;
14568   if (Op.getOpcode() != ISD::BITCAST)
14569     return SDValue();
14570   Op = Op.getOperand(0);
14571   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14572     return Op;
14573   return SDValue();
14574 }
14575 
14576 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14577                                             int LHSMaxIdx, int RHSMinIdx,
14578                                             int RHSMaxIdx, int HalfVec) {
14579   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14580     int Idx = ShuffV[i];
14581     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14582       ShuffV[i] += HalfVec;
14583   }
14584   return;
14585 }
14586 
14587 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14588 // the original is:
14589 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14590 // In such a case, just change the shuffle mask to extract the element
14591 // from the permuted index.
14592 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14593   SDLoc dl(OrigSToV);
14594   EVT VT = OrigSToV.getValueType();
14595   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14596          "Expecting a SCALAR_TO_VECTOR here");
14597   SDValue Input = OrigSToV.getOperand(0);
14598 
14599   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14600     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14601     SDValue OrigVector = Input.getOperand(0);
14602 
14603     // Can't handle non-const element indices or different vector types
14604     // for the input to the extract and the output of the scalar_to_vector.
14605     if (Idx && VT == OrigVector.getValueType()) {
14606       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14607       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14608       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14609     }
14610   }
14611   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14612                      OrigSToV.getOperand(0));
14613 }
14614 
14615 // On little endian subtargets, combine shuffles such as:
14616 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14617 // into:
14618 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14619 // because the latter can be matched to a single instruction merge.
14620 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14621 // to put the value into element zero. Adjust the shuffle mask so that the
14622 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14623 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14624                                                 SelectionDAG &DAG) const {
14625   SDValue LHS = SVN->getOperand(0);
14626   SDValue RHS = SVN->getOperand(1);
14627   auto Mask = SVN->getMask();
14628   int NumElts = LHS.getValueType().getVectorNumElements();
14629   SDValue Res(SVN, 0);
14630   SDLoc dl(SVN);
14631 
14632   // None of these combines are useful on big endian systems since the ISA
14633   // already has a big endian bias.
14634   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14635     return Res;
14636 
14637   // If this is not a shuffle of a shuffle and the first element comes from
14638   // the second vector, canonicalize to the commuted form. This will make it
14639   // more likely to match one of the single instruction patterns.
14640   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14641       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14642     std::swap(LHS, RHS);
14643     Res = DAG.getCommutedVectorShuffle(*SVN);
14644     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14645   }
14646 
14647   // Adjust the shuffle mask if either input vector comes from a
14648   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14649   // form (to prevent the need for a swap).
14650   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14651   SDValue SToVLHS = isScalarToVec(LHS);
14652   SDValue SToVRHS = isScalarToVec(RHS);
14653   if (SToVLHS || SToVRHS) {
14654     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14655                             : SToVRHS.getValueType().getVectorNumElements();
14656     int NumEltsOut = ShuffV.size();
14657 
14658     // Initially assume that neither input is permuted. These will be adjusted
14659     // accordingly if either input is.
14660     int LHSMaxIdx = -1;
14661     int RHSMinIdx = -1;
14662     int RHSMaxIdx = -1;
14663     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14664 
14665     // Get the permuted scalar to vector nodes for the source(s) that come from
14666     // ISD::SCALAR_TO_VECTOR.
14667     if (SToVLHS) {
14668       // Set up the values for the shuffle vector fixup.
14669       LHSMaxIdx = NumEltsOut / NumEltsIn;
14670       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14671       if (SToVLHS.getValueType() != LHS.getValueType())
14672         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14673       LHS = SToVLHS;
14674     }
14675     if (SToVRHS) {
14676       RHSMinIdx = NumEltsOut;
14677       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14678       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14679       if (SToVRHS.getValueType() != RHS.getValueType())
14680         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14681       RHS = SToVRHS;
14682     }
14683 
14684     // Fix up the shuffle mask to reflect where the desired element actually is.
14685     // The minimum and maximum indices that correspond to element zero for both
14686     // the LHS and RHS are computed and will control which shuffle mask entries
14687     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14688     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14689     // HalfVec to refer to the corresponding element in the permuted vector.
14690     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14691                                     HalfVec);
14692     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14693 
14694     // We may have simplified away the shuffle. We won't be able to do anything
14695     // further with it here.
14696     if (!isa<ShuffleVectorSDNode>(Res))
14697       return Res;
14698     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14699   }
14700 
14701   // The common case after we commuted the shuffle is that the RHS is a splat
14702   // and we have elements coming in from the splat at indices that are not
14703   // conducive to using a merge.
14704   // Example:
14705   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14706   if (!isSplatBV(RHS))
14707     return Res;
14708 
14709   // We are looking for a mask such that all even elements are from
14710   // one vector and all odd elements from the other.
14711   if (!isAlternatingShuffMask(Mask, NumElts))
14712     return Res;
14713 
14714   // Adjust the mask so we are pulling in the same index from the splat
14715   // as the index from the interesting vector in consecutive elements.
14716   // Example (even elements from first vector):
14717   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14718   if (Mask[0] < NumElts)
14719     for (int i = 1, e = Mask.size(); i < e; i += 2)
14720       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14721   // Example (odd elements from first vector):
14722   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14723   else
14724     for (int i = 0, e = Mask.size(); i < e; i += 2)
14725       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14726 
14727   // If the RHS has undefs, we need to remove them since we may have created
14728   // a shuffle that adds those instead of the splat value.
14729   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14730   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14731 
14732   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14733   return Res;
14734 }
14735 
14736 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14737                                                 LSBaseSDNode *LSBase,
14738                                                 DAGCombinerInfo &DCI) const {
14739   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14740         "Not a reverse memop pattern!");
14741 
14742   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14743     auto Mask = SVN->getMask();
14744     int i = 0;
14745     auto I = Mask.rbegin();
14746     auto E = Mask.rend();
14747 
14748     for (; I != E; ++I) {
14749       if (*I != i)
14750         return false;
14751       i++;
14752     }
14753     return true;
14754   };
14755 
14756   SelectionDAG &DAG = DCI.DAG;
14757   EVT VT = SVN->getValueType(0);
14758 
14759   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14760     return SDValue();
14761 
14762   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14763   // See comment in PPCVSXSwapRemoval.cpp.
14764   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14765   if (!Subtarget.hasP9Vector())
14766     return SDValue();
14767 
14768   if(!IsElementReverse(SVN))
14769     return SDValue();
14770 
14771   if (LSBase->getOpcode() == ISD::LOAD) {
14772     SDLoc dl(SVN);
14773     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14774     return DAG.getMemIntrinsicNode(
14775         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14776         LSBase->getMemoryVT(), LSBase->getMemOperand());
14777   }
14778 
14779   if (LSBase->getOpcode() == ISD::STORE) {
14780     SDLoc dl(LSBase);
14781     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14782                           LSBase->getBasePtr()};
14783     return DAG.getMemIntrinsicNode(
14784         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14785         LSBase->getMemoryVT(), LSBase->getMemOperand());
14786   }
14787 
14788   llvm_unreachable("Expected a load or store node here");
14789 }
14790 
14791 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14792                                              DAGCombinerInfo &DCI) const {
14793   SelectionDAG &DAG = DCI.DAG;
14794   SDLoc dl(N);
14795   switch (N->getOpcode()) {
14796   default: break;
14797   case ISD::ADD:
14798     return combineADD(N, DCI);
14799   case ISD::SHL:
14800     return combineSHL(N, DCI);
14801   case ISD::SRA:
14802     return combineSRA(N, DCI);
14803   case ISD::SRL:
14804     return combineSRL(N, DCI);
14805   case ISD::MUL:
14806     return combineMUL(N, DCI);
14807   case ISD::FMA:
14808   case PPCISD::FNMSUB:
14809     return combineFMALike(N, DCI);
14810   case PPCISD::SHL:
14811     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14812         return N->getOperand(0);
14813     break;
14814   case PPCISD::SRL:
14815     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14816         return N->getOperand(0);
14817     break;
14818   case PPCISD::SRA:
14819     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14820       if (C->isNullValue() ||   //  0 >>s V -> 0.
14821           C->isAllOnesValue())    // -1 >>s V -> -1.
14822         return N->getOperand(0);
14823     }
14824     break;
14825   case ISD::SIGN_EXTEND:
14826   case ISD::ZERO_EXTEND:
14827   case ISD::ANY_EXTEND:
14828     return DAGCombineExtBoolTrunc(N, DCI);
14829   case ISD::TRUNCATE:
14830     return combineTRUNCATE(N, DCI);
14831   case ISD::SETCC:
14832     if (SDValue CSCC = combineSetCC(N, DCI))
14833       return CSCC;
14834     LLVM_FALLTHROUGH;
14835   case ISD::SELECT_CC:
14836     return DAGCombineTruncBoolExt(N, DCI);
14837   case ISD::SINT_TO_FP:
14838   case ISD::UINT_TO_FP:
14839     return combineFPToIntToFP(N, DCI);
14840   case ISD::VECTOR_SHUFFLE:
14841     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14842       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14843       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14844     }
14845     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14846   case ISD::STORE: {
14847 
14848     EVT Op1VT = N->getOperand(1).getValueType();
14849     unsigned Opcode = N->getOperand(1).getOpcode();
14850 
14851     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14852       SDValue Val= combineStoreFPToInt(N, DCI);
14853       if (Val)
14854         return Val;
14855     }
14856 
14857     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14858       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14859       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14860       if (Val)
14861         return Val;
14862     }
14863 
14864     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14865     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14866         N->getOperand(1).getNode()->hasOneUse() &&
14867         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14868          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14869 
14870       // STBRX can only handle simple types and it makes no sense to store less
14871       // two bytes in byte-reversed order.
14872       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14873       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14874         break;
14875 
14876       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14877       // Do an any-extend to 32-bits if this is a half-word input.
14878       if (BSwapOp.getValueType() == MVT::i16)
14879         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14880 
14881       // If the type of BSWAP operand is wider than stored memory width
14882       // it need to be shifted to the right side before STBRX.
14883       if (Op1VT.bitsGT(mVT)) {
14884         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14885         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14886                               DAG.getConstant(Shift, dl, MVT::i32));
14887         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14888         if (Op1VT == MVT::i64)
14889           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14890       }
14891 
14892       SDValue Ops[] = {
14893         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14894       };
14895       return
14896         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14897                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14898                                 cast<StoreSDNode>(N)->getMemOperand());
14899     }
14900 
14901     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14902     // So it can increase the chance of CSE constant construction.
14903     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14904         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14905       // Need to sign-extended to 64-bits to handle negative values.
14906       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14907       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14908                                     MemVT.getSizeInBits());
14909       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14910 
14911       // DAG.getTruncStore() can't be used here because it doesn't accept
14912       // the general (base + offset) addressing mode.
14913       // So we use UpdateNodeOperands and setTruncatingStore instead.
14914       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14915                              N->getOperand(3));
14916       cast<StoreSDNode>(N)->setTruncatingStore(true);
14917       return SDValue(N, 0);
14918     }
14919 
14920     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14921     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14922     if (Op1VT.isSimple()) {
14923       MVT StoreVT = Op1VT.getSimpleVT();
14924       if (Subtarget.needsSwapsForVSXMemOps() &&
14925           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14926            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14927         return expandVSXStoreForLE(N, DCI);
14928     }
14929     break;
14930   }
14931   case ISD::LOAD: {
14932     LoadSDNode *LD = cast<LoadSDNode>(N);
14933     EVT VT = LD->getValueType(0);
14934 
14935     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14936     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14937     if (VT.isSimple()) {
14938       MVT LoadVT = VT.getSimpleVT();
14939       if (Subtarget.needsSwapsForVSXMemOps() &&
14940           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14941            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14942         return expandVSXLoadForLE(N, DCI);
14943     }
14944 
14945     // We sometimes end up with a 64-bit integer load, from which we extract
14946     // two single-precision floating-point numbers. This happens with
14947     // std::complex<float>, and other similar structures, because of the way we
14948     // canonicalize structure copies. However, if we lack direct moves,
14949     // then the final bitcasts from the extracted integer values to the
14950     // floating-point numbers turn into store/load pairs. Even with direct moves,
14951     // just loading the two floating-point numbers is likely better.
14952     auto ReplaceTwoFloatLoad = [&]() {
14953       if (VT != MVT::i64)
14954         return false;
14955 
14956       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14957           LD->isVolatile())
14958         return false;
14959 
14960       //  We're looking for a sequence like this:
14961       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14962       //      t16: i64 = srl t13, Constant:i32<32>
14963       //    t17: i32 = truncate t16
14964       //  t18: f32 = bitcast t17
14965       //    t19: i32 = truncate t13
14966       //  t20: f32 = bitcast t19
14967 
14968       if (!LD->hasNUsesOfValue(2, 0))
14969         return false;
14970 
14971       auto UI = LD->use_begin();
14972       while (UI.getUse().getResNo() != 0) ++UI;
14973       SDNode *Trunc = *UI++;
14974       while (UI.getUse().getResNo() != 0) ++UI;
14975       SDNode *RightShift = *UI;
14976       if (Trunc->getOpcode() != ISD::TRUNCATE)
14977         std::swap(Trunc, RightShift);
14978 
14979       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14980           Trunc->getValueType(0) != MVT::i32 ||
14981           !Trunc->hasOneUse())
14982         return false;
14983       if (RightShift->getOpcode() != ISD::SRL ||
14984           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14985           RightShift->getConstantOperandVal(1) != 32 ||
14986           !RightShift->hasOneUse())
14987         return false;
14988 
14989       SDNode *Trunc2 = *RightShift->use_begin();
14990       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14991           Trunc2->getValueType(0) != MVT::i32 ||
14992           !Trunc2->hasOneUse())
14993         return false;
14994 
14995       SDNode *Bitcast = *Trunc->use_begin();
14996       SDNode *Bitcast2 = *Trunc2->use_begin();
14997 
14998       if (Bitcast->getOpcode() != ISD::BITCAST ||
14999           Bitcast->getValueType(0) != MVT::f32)
15000         return false;
15001       if (Bitcast2->getOpcode() != ISD::BITCAST ||
15002           Bitcast2->getValueType(0) != MVT::f32)
15003         return false;
15004 
15005       if (Subtarget.isLittleEndian())
15006         std::swap(Bitcast, Bitcast2);
15007 
15008       // Bitcast has the second float (in memory-layout order) and Bitcast2
15009       // has the first one.
15010 
15011       SDValue BasePtr = LD->getBasePtr();
15012       if (LD->isIndexed()) {
15013         assert(LD->getAddressingMode() == ISD::PRE_INC &&
15014                "Non-pre-inc AM on PPC?");
15015         BasePtr =
15016           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15017                       LD->getOffset());
15018       }
15019 
15020       auto MMOFlags =
15021           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15022       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15023                                       LD->getPointerInfo(), LD->getAlignment(),
15024                                       MMOFlags, LD->getAAInfo());
15025       SDValue AddPtr =
15026         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15027                     BasePtr, DAG.getIntPtrConstant(4, dl));
15028       SDValue FloatLoad2 = DAG.getLoad(
15029           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15030           LD->getPointerInfo().getWithOffset(4),
15031           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
15032 
15033       if (LD->isIndexed()) {
15034         // Note that DAGCombine should re-form any pre-increment load(s) from
15035         // what is produced here if that makes sense.
15036         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15037       }
15038 
15039       DCI.CombineTo(Bitcast2, FloatLoad);
15040       DCI.CombineTo(Bitcast, FloatLoad2);
15041 
15042       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15043                                     SDValue(FloatLoad2.getNode(), 1));
15044       return true;
15045     };
15046 
15047     if (ReplaceTwoFloatLoad())
15048       return SDValue(N, 0);
15049 
15050     EVT MemVT = LD->getMemoryVT();
15051     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15052     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15053     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
15054     Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy);
15055     if (LD->isUnindexed() && VT.isVector() &&
15056         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15057           // P8 and later hardware should just use LOAD.
15058           !Subtarget.hasP8Vector() &&
15059           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15060            VT == MVT::v4f32)) ||
15061          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
15062           LD->getAlign() >= ScalarABIAlignment)) &&
15063         LD->getAlign() < ABIAlignment) {
15064       // This is a type-legal unaligned Altivec or QPX load.
15065       SDValue Chain = LD->getChain();
15066       SDValue Ptr = LD->getBasePtr();
15067       bool isLittleEndian = Subtarget.isLittleEndian();
15068 
15069       // This implements the loading of unaligned vectors as described in
15070       // the venerable Apple Velocity Engine overview. Specifically:
15071       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15072       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15073       //
15074       // The general idea is to expand a sequence of one or more unaligned
15075       // loads into an alignment-based permutation-control instruction (lvsl
15076       // or lvsr), a series of regular vector loads (which always truncate
15077       // their input address to an aligned address), and a series of
15078       // permutations.  The results of these permutations are the requested
15079       // loaded values.  The trick is that the last "extra" load is not taken
15080       // from the address you might suspect (sizeof(vector) bytes after the
15081       // last requested load), but rather sizeof(vector) - 1 bytes after the
15082       // last requested vector. The point of this is to avoid a page fault if
15083       // the base address happened to be aligned. This works because if the
15084       // base address is aligned, then adding less than a full vector length
15085       // will cause the last vector in the sequence to be (re)loaded.
15086       // Otherwise, the next vector will be fetched as you might suspect was
15087       // necessary.
15088 
15089       // We might be able to reuse the permutation generation from
15090       // a different base address offset from this one by an aligned amount.
15091       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15092       // optimization later.
15093       Intrinsic::ID Intr, IntrLD, IntrPerm;
15094       MVT PermCntlTy, PermTy, LDTy;
15095       if (Subtarget.hasAltivec()) {
15096         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
15097                                  Intrinsic::ppc_altivec_lvsl;
15098         IntrLD = Intrinsic::ppc_altivec_lvx;
15099         IntrPerm = Intrinsic::ppc_altivec_vperm;
15100         PermCntlTy = MVT::v16i8;
15101         PermTy = MVT::v4i32;
15102         LDTy = MVT::v4i32;
15103       } else {
15104         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
15105                                        Intrinsic::ppc_qpx_qvlpcls;
15106         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
15107                                        Intrinsic::ppc_qpx_qvlfs;
15108         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
15109         PermCntlTy = MVT::v4f64;
15110         PermTy = MVT::v4f64;
15111         LDTy = MemVT.getSimpleVT();
15112       }
15113 
15114       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15115 
15116       // Create the new MMO for the new base load. It is like the original MMO,
15117       // but represents an area in memory almost twice the vector size centered
15118       // on the original address. If the address is unaligned, we might start
15119       // reading up to (sizeof(vector)-1) bytes below the address of the
15120       // original unaligned load.
15121       MachineFunction &MF = DAG.getMachineFunction();
15122       MachineMemOperand *BaseMMO =
15123         MF.getMachineMemOperand(LD->getMemOperand(),
15124                                 -(long)MemVT.getStoreSize()+1,
15125                                 2*MemVT.getStoreSize()-1);
15126 
15127       // Create the new base load.
15128       SDValue LDXIntID =
15129           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15130       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15131       SDValue BaseLoad =
15132         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15133                                 DAG.getVTList(PermTy, MVT::Other),
15134                                 BaseLoadOps, LDTy, BaseMMO);
15135 
15136       // Note that the value of IncOffset (which is provided to the next
15137       // load's pointer info offset value, and thus used to calculate the
15138       // alignment), and the value of IncValue (which is actually used to
15139       // increment the pointer value) are different! This is because we
15140       // require the next load to appear to be aligned, even though it
15141       // is actually offset from the base pointer by a lesser amount.
15142       int IncOffset = VT.getSizeInBits() / 8;
15143       int IncValue = IncOffset;
15144 
15145       // Walk (both up and down) the chain looking for another load at the real
15146       // (aligned) offset (the alignment of the other load does not matter in
15147       // this case). If found, then do not use the offset reduction trick, as
15148       // that will prevent the loads from being later combined (as they would
15149       // otherwise be duplicates).
15150       if (!findConsecutiveLoad(LD, DAG))
15151         --IncValue;
15152 
15153       SDValue Increment =
15154           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15155       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15156 
15157       MachineMemOperand *ExtraMMO =
15158         MF.getMachineMemOperand(LD->getMemOperand(),
15159                                 1, 2*MemVT.getStoreSize()-1);
15160       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15161       SDValue ExtraLoad =
15162         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15163                                 DAG.getVTList(PermTy, MVT::Other),
15164                                 ExtraLoadOps, LDTy, ExtraMMO);
15165 
15166       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15167         BaseLoad.getValue(1), ExtraLoad.getValue(1));
15168 
15169       // Because vperm has a big-endian bias, we must reverse the order
15170       // of the input vectors and complement the permute control vector
15171       // when generating little endian code.  We have already handled the
15172       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15173       // and ExtraLoad here.
15174       SDValue Perm;
15175       if (isLittleEndian)
15176         Perm = BuildIntrinsicOp(IntrPerm,
15177                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15178       else
15179         Perm = BuildIntrinsicOp(IntrPerm,
15180                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15181 
15182       if (VT != PermTy)
15183         Perm = Subtarget.hasAltivec() ?
15184                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
15185                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
15186                                DAG.getTargetConstant(1, dl, MVT::i64));
15187                                // second argument is 1 because this rounding
15188                                // is always exact.
15189 
15190       // The output of the permutation is our loaded result, the TokenFactor is
15191       // our new chain.
15192       DCI.CombineTo(N, Perm, TF);
15193       return SDValue(N, 0);
15194     }
15195     }
15196     break;
15197     case ISD::INTRINSIC_WO_CHAIN: {
15198       bool isLittleEndian = Subtarget.isLittleEndian();
15199       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15200       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15201                                            : Intrinsic::ppc_altivec_lvsl);
15202       if ((IID == Intr ||
15203            IID == Intrinsic::ppc_qpx_qvlpcld  ||
15204            IID == Intrinsic::ppc_qpx_qvlpcls) &&
15205         N->getOperand(1)->getOpcode() == ISD::ADD) {
15206         SDValue Add = N->getOperand(1);
15207 
15208         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
15209                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
15210 
15211         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15212                                   APInt::getAllOnesValue(Bits /* alignment */)
15213                                       .zext(Add.getScalarValueSizeInBits()))) {
15214           SDNode *BasePtr = Add->getOperand(0).getNode();
15215           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15216                                     UE = BasePtr->use_end();
15217                UI != UE; ++UI) {
15218             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15219                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
15220               // We've found another LVSL/LVSR, and this address is an aligned
15221               // multiple of that one. The results will be the same, so use the
15222               // one we've just found instead.
15223 
15224               return SDValue(*UI, 0);
15225             }
15226           }
15227         }
15228 
15229         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15230           SDNode *BasePtr = Add->getOperand(0).getNode();
15231           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15232                UE = BasePtr->use_end(); UI != UE; ++UI) {
15233             if (UI->getOpcode() == ISD::ADD &&
15234                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15235                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15236                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15237                 (1ULL << Bits) == 0) {
15238               SDNode *OtherAdd = *UI;
15239               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15240                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15241                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15242                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15243                   return SDValue(*VI, 0);
15244                 }
15245               }
15246             }
15247           }
15248         }
15249       }
15250 
15251       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15252       // Expose the vabsduw/h/b opportunity for down stream
15253       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15254           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15255            IID == Intrinsic::ppc_altivec_vmaxsh ||
15256            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15257         SDValue V1 = N->getOperand(1);
15258         SDValue V2 = N->getOperand(2);
15259         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15260              V1.getSimpleValueType() == MVT::v8i16 ||
15261              V1.getSimpleValueType() == MVT::v16i8) &&
15262             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15263           // (0-a, a)
15264           if (V1.getOpcode() == ISD::SUB &&
15265               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15266               V1.getOperand(1) == V2) {
15267             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15268           }
15269           // (a, 0-a)
15270           if (V2.getOpcode() == ISD::SUB &&
15271               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15272               V2.getOperand(1) == V1) {
15273             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15274           }
15275           // (x-y, y-x)
15276           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15277               V1.getOperand(0) == V2.getOperand(1) &&
15278               V1.getOperand(1) == V2.getOperand(0)) {
15279             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15280           }
15281         }
15282       }
15283     }
15284 
15285     break;
15286   case ISD::INTRINSIC_W_CHAIN:
15287     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15288     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15289     if (Subtarget.needsSwapsForVSXMemOps()) {
15290       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15291       default:
15292         break;
15293       case Intrinsic::ppc_vsx_lxvw4x:
15294       case Intrinsic::ppc_vsx_lxvd2x:
15295         return expandVSXLoadForLE(N, DCI);
15296       }
15297     }
15298     break;
15299   case ISD::INTRINSIC_VOID:
15300     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15301     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15302     if (Subtarget.needsSwapsForVSXMemOps()) {
15303       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15304       default:
15305         break;
15306       case Intrinsic::ppc_vsx_stxvw4x:
15307       case Intrinsic::ppc_vsx_stxvd2x:
15308         return expandVSXStoreForLE(N, DCI);
15309       }
15310     }
15311     break;
15312   case ISD::BSWAP:
15313     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15314     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15315         N->getOperand(0).hasOneUse() &&
15316         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15317          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15318           N->getValueType(0) == MVT::i64))) {
15319       SDValue Load = N->getOperand(0);
15320       LoadSDNode *LD = cast<LoadSDNode>(Load);
15321       // Create the byte-swapping load.
15322       SDValue Ops[] = {
15323         LD->getChain(),    // Chain
15324         LD->getBasePtr(),  // Ptr
15325         DAG.getValueType(N->getValueType(0)) // VT
15326       };
15327       SDValue BSLoad =
15328         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15329                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15330                                               MVT::i64 : MVT::i32, MVT::Other),
15331                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15332 
15333       // If this is an i16 load, insert the truncate.
15334       SDValue ResVal = BSLoad;
15335       if (N->getValueType(0) == MVT::i16)
15336         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15337 
15338       // First, combine the bswap away.  This makes the value produced by the
15339       // load dead.
15340       DCI.CombineTo(N, ResVal);
15341 
15342       // Next, combine the load away, we give it a bogus result value but a real
15343       // chain result.  The result value is dead because the bswap is dead.
15344       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15345 
15346       // Return N so it doesn't get rechecked!
15347       return SDValue(N, 0);
15348     }
15349     break;
15350   case PPCISD::VCMP:
15351     // If a VCMPo node already exists with exactly the same operands as this
15352     // node, use its result instead of this node (VCMPo computes both a CR6 and
15353     // a normal output).
15354     //
15355     if (!N->getOperand(0).hasOneUse() &&
15356         !N->getOperand(1).hasOneUse() &&
15357         !N->getOperand(2).hasOneUse()) {
15358 
15359       // Scan all of the users of the LHS, looking for VCMPo's that match.
15360       SDNode *VCMPoNode = nullptr;
15361 
15362       SDNode *LHSN = N->getOperand(0).getNode();
15363       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15364            UI != E; ++UI)
15365         if (UI->getOpcode() == PPCISD::VCMPo &&
15366             UI->getOperand(1) == N->getOperand(1) &&
15367             UI->getOperand(2) == N->getOperand(2) &&
15368             UI->getOperand(0) == N->getOperand(0)) {
15369           VCMPoNode = *UI;
15370           break;
15371         }
15372 
15373       // If there is no VCMPo node, or if the flag value has a single use, don't
15374       // transform this.
15375       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
15376         break;
15377 
15378       // Look at the (necessarily single) use of the flag value.  If it has a
15379       // chain, this transformation is more complex.  Note that multiple things
15380       // could use the value result, which we should ignore.
15381       SDNode *FlagUser = nullptr;
15382       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
15383            FlagUser == nullptr; ++UI) {
15384         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
15385         SDNode *User = *UI;
15386         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15387           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
15388             FlagUser = User;
15389             break;
15390           }
15391         }
15392       }
15393 
15394       // If the user is a MFOCRF instruction, we know this is safe.
15395       // Otherwise we give up for right now.
15396       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15397         return SDValue(VCMPoNode, 0);
15398     }
15399     break;
15400   case ISD::BRCOND: {
15401     SDValue Cond = N->getOperand(1);
15402     SDValue Target = N->getOperand(2);
15403 
15404     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15405         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15406           Intrinsic::loop_decrement) {
15407 
15408       // We now need to make the intrinsic dead (it cannot be instruction
15409       // selected).
15410       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15411       assert(Cond.getNode()->hasOneUse() &&
15412              "Counter decrement has more than one use");
15413 
15414       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15415                          N->getOperand(0), Target);
15416     }
15417   }
15418   break;
15419   case ISD::BR_CC: {
15420     // If this is a branch on an altivec predicate comparison, lower this so
15421     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15422     // lowering is done pre-legalize, because the legalizer lowers the predicate
15423     // compare down to code that is difficult to reassemble.
15424     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15425     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15426 
15427     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15428     // value. If so, pass-through the AND to get to the intrinsic.
15429     if (LHS.getOpcode() == ISD::AND &&
15430         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15431         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15432           Intrinsic::loop_decrement &&
15433         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15434         !isNullConstant(LHS.getOperand(1)))
15435       LHS = LHS.getOperand(0);
15436 
15437     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15438         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15439           Intrinsic::loop_decrement &&
15440         isa<ConstantSDNode>(RHS)) {
15441       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15442              "Counter decrement comparison is not EQ or NE");
15443 
15444       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15445       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15446                     (CC == ISD::SETNE && !Val);
15447 
15448       // We now need to make the intrinsic dead (it cannot be instruction
15449       // selected).
15450       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15451       assert(LHS.getNode()->hasOneUse() &&
15452              "Counter decrement has more than one use");
15453 
15454       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15455                          N->getOperand(0), N->getOperand(4));
15456     }
15457 
15458     int CompareOpc;
15459     bool isDot;
15460 
15461     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15462         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15463         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15464       assert(isDot && "Can't compare against a vector result!");
15465 
15466       // If this is a comparison against something other than 0/1, then we know
15467       // that the condition is never/always true.
15468       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15469       if (Val != 0 && Val != 1) {
15470         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15471           return N->getOperand(0);
15472         // Always !=, turn it into an unconditional branch.
15473         return DAG.getNode(ISD::BR, dl, MVT::Other,
15474                            N->getOperand(0), N->getOperand(4));
15475       }
15476 
15477       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15478 
15479       // Create the PPCISD altivec 'dot' comparison node.
15480       SDValue Ops[] = {
15481         LHS.getOperand(2),  // LHS of compare
15482         LHS.getOperand(3),  // RHS of compare
15483         DAG.getConstant(CompareOpc, dl, MVT::i32)
15484       };
15485       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15486       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15487 
15488       // Unpack the result based on how the target uses it.
15489       PPC::Predicate CompOpc;
15490       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15491       default:  // Can't happen, don't crash on invalid number though.
15492       case 0:   // Branch on the value of the EQ bit of CR6.
15493         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15494         break;
15495       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15496         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15497         break;
15498       case 2:   // Branch on the value of the LT bit of CR6.
15499         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15500         break;
15501       case 3:   // Branch on the inverted value of the LT bit of CR6.
15502         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15503         break;
15504       }
15505 
15506       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15507                          DAG.getConstant(CompOpc, dl, MVT::i32),
15508                          DAG.getRegister(PPC::CR6, MVT::i32),
15509                          N->getOperand(4), CompNode.getValue(1));
15510     }
15511     break;
15512   }
15513   case ISD::BUILD_VECTOR:
15514     return DAGCombineBuildVector(N, DCI);
15515   case ISD::ABS:
15516     return combineABS(N, DCI);
15517   case ISD::VSELECT:
15518     return combineVSelect(N, DCI);
15519   }
15520 
15521   return SDValue();
15522 }
15523 
15524 SDValue
15525 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15526                                  SelectionDAG &DAG,
15527                                  SmallVectorImpl<SDNode *> &Created) const {
15528   // fold (sdiv X, pow2)
15529   EVT VT = N->getValueType(0);
15530   if (VT == MVT::i64 && !Subtarget.isPPC64())
15531     return SDValue();
15532   if ((VT != MVT::i32 && VT != MVT::i64) ||
15533       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15534     return SDValue();
15535 
15536   SDLoc DL(N);
15537   SDValue N0 = N->getOperand(0);
15538 
15539   bool IsNegPow2 = (-Divisor).isPowerOf2();
15540   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15541   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15542 
15543   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15544   Created.push_back(Op.getNode());
15545 
15546   if (IsNegPow2) {
15547     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15548     Created.push_back(Op.getNode());
15549   }
15550 
15551   return Op;
15552 }
15553 
15554 //===----------------------------------------------------------------------===//
15555 // Inline Assembly Support
15556 //===----------------------------------------------------------------------===//
15557 
15558 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15559                                                       KnownBits &Known,
15560                                                       const APInt &DemandedElts,
15561                                                       const SelectionDAG &DAG,
15562                                                       unsigned Depth) const {
15563   Known.resetAll();
15564   switch (Op.getOpcode()) {
15565   default: break;
15566   case PPCISD::LBRX: {
15567     // lhbrx is known to have the top bits cleared out.
15568     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15569       Known.Zero = 0xFFFF0000;
15570     break;
15571   }
15572   case ISD::INTRINSIC_WO_CHAIN: {
15573     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15574     default: break;
15575     case Intrinsic::ppc_altivec_vcmpbfp_p:
15576     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15577     case Intrinsic::ppc_altivec_vcmpequb_p:
15578     case Intrinsic::ppc_altivec_vcmpequh_p:
15579     case Intrinsic::ppc_altivec_vcmpequw_p:
15580     case Intrinsic::ppc_altivec_vcmpequd_p:
15581     case Intrinsic::ppc_altivec_vcmpgefp_p:
15582     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15583     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15584     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15585     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15586     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15587     case Intrinsic::ppc_altivec_vcmpgtub_p:
15588     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15589     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15590     case Intrinsic::ppc_altivec_vcmpgtud_p:
15591       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15592       break;
15593     }
15594   }
15595   }
15596 }
15597 
15598 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15599   switch (Subtarget.getCPUDirective()) {
15600   default: break;
15601   case PPC::DIR_970:
15602   case PPC::DIR_PWR4:
15603   case PPC::DIR_PWR5:
15604   case PPC::DIR_PWR5X:
15605   case PPC::DIR_PWR6:
15606   case PPC::DIR_PWR6X:
15607   case PPC::DIR_PWR7:
15608   case PPC::DIR_PWR8:
15609   case PPC::DIR_PWR9:
15610   case PPC::DIR_PWR10:
15611   case PPC::DIR_PWR_FUTURE: {
15612     if (!ML)
15613       break;
15614 
15615     if (!DisableInnermostLoopAlign32) {
15616       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15617       // so that we can decrease cache misses and branch-prediction misses.
15618       // Actual alignment of the loop will depend on the hotness check and other
15619       // logic in alignBlocks.
15620       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15621         return Align(32);
15622     }
15623 
15624     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15625 
15626     // For small loops (between 5 and 8 instructions), align to a 32-byte
15627     // boundary so that the entire loop fits in one instruction-cache line.
15628     uint64_t LoopSize = 0;
15629     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15630       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15631         LoopSize += TII->getInstSizeInBytes(*J);
15632         if (LoopSize > 32)
15633           break;
15634       }
15635 
15636     if (LoopSize > 16 && LoopSize <= 32)
15637       return Align(32);
15638 
15639     break;
15640   }
15641   }
15642 
15643   return TargetLowering::getPrefLoopAlignment(ML);
15644 }
15645 
15646 /// getConstraintType - Given a constraint, return the type of
15647 /// constraint it is for this target.
15648 PPCTargetLowering::ConstraintType
15649 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15650   if (Constraint.size() == 1) {
15651     switch (Constraint[0]) {
15652     default: break;
15653     case 'b':
15654     case 'r':
15655     case 'f':
15656     case 'd':
15657     case 'v':
15658     case 'y':
15659       return C_RegisterClass;
15660     case 'Z':
15661       // FIXME: While Z does indicate a memory constraint, it specifically
15662       // indicates an r+r address (used in conjunction with the 'y' modifier
15663       // in the replacement string). Currently, we're forcing the base
15664       // register to be r0 in the asm printer (which is interpreted as zero)
15665       // and forming the complete address in the second register. This is
15666       // suboptimal.
15667       return C_Memory;
15668     }
15669   } else if (Constraint == "wc") { // individual CR bits.
15670     return C_RegisterClass;
15671   } else if (Constraint == "wa" || Constraint == "wd" ||
15672              Constraint == "wf" || Constraint == "ws" ||
15673              Constraint == "wi" || Constraint == "ww") {
15674     return C_RegisterClass; // VSX registers.
15675   }
15676   return TargetLowering::getConstraintType(Constraint);
15677 }
15678 
15679 /// Examine constraint type and operand type and determine a weight value.
15680 /// This object must already have been set up with the operand type
15681 /// and the current alternative constraint selected.
15682 TargetLowering::ConstraintWeight
15683 PPCTargetLowering::getSingleConstraintMatchWeight(
15684     AsmOperandInfo &info, const char *constraint) const {
15685   ConstraintWeight weight = CW_Invalid;
15686   Value *CallOperandVal = info.CallOperandVal;
15687     // If we don't have a value, we can't do a match,
15688     // but allow it at the lowest weight.
15689   if (!CallOperandVal)
15690     return CW_Default;
15691   Type *type = CallOperandVal->getType();
15692 
15693   // Look at the constraint type.
15694   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15695     return CW_Register; // an individual CR bit.
15696   else if ((StringRef(constraint) == "wa" ||
15697             StringRef(constraint) == "wd" ||
15698             StringRef(constraint) == "wf") &&
15699            type->isVectorTy())
15700     return CW_Register;
15701   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15702     return CW_Register; // just hold 64-bit integers data.
15703   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15704     return CW_Register;
15705   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15706     return CW_Register;
15707 
15708   switch (*constraint) {
15709   default:
15710     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15711     break;
15712   case 'b':
15713     if (type->isIntegerTy())
15714       weight = CW_Register;
15715     break;
15716   case 'f':
15717     if (type->isFloatTy())
15718       weight = CW_Register;
15719     break;
15720   case 'd':
15721     if (type->isDoubleTy())
15722       weight = CW_Register;
15723     break;
15724   case 'v':
15725     if (type->isVectorTy())
15726       weight = CW_Register;
15727     break;
15728   case 'y':
15729     weight = CW_Register;
15730     break;
15731   case 'Z':
15732     weight = CW_Memory;
15733     break;
15734   }
15735   return weight;
15736 }
15737 
15738 std::pair<unsigned, const TargetRegisterClass *>
15739 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15740                                                 StringRef Constraint,
15741                                                 MVT VT) const {
15742   if (Constraint.size() == 1) {
15743     // GCC RS6000 Constraint Letters
15744     switch (Constraint[0]) {
15745     case 'b':   // R1-R31
15746       if (VT == MVT::i64 && Subtarget.isPPC64())
15747         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15748       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15749     case 'r':   // R0-R31
15750       if (VT == MVT::i64 && Subtarget.isPPC64())
15751         return std::make_pair(0U, &PPC::G8RCRegClass);
15752       return std::make_pair(0U, &PPC::GPRCRegClass);
15753     // 'd' and 'f' constraints are both defined to be "the floating point
15754     // registers", where one is for 32-bit and the other for 64-bit. We don't
15755     // really care overly much here so just give them all the same reg classes.
15756     case 'd':
15757     case 'f':
15758       if (Subtarget.hasSPE()) {
15759         if (VT == MVT::f32 || VT == MVT::i32)
15760           return std::make_pair(0U, &PPC::GPRCRegClass);
15761         if (VT == MVT::f64 || VT == MVT::i64)
15762           return std::make_pair(0U, &PPC::SPERCRegClass);
15763       } else {
15764         if (VT == MVT::f32 || VT == MVT::i32)
15765           return std::make_pair(0U, &PPC::F4RCRegClass);
15766         if (VT == MVT::f64 || VT == MVT::i64)
15767           return std::make_pair(0U, &PPC::F8RCRegClass);
15768         if (VT == MVT::v4f64 && Subtarget.hasQPX())
15769           return std::make_pair(0U, &PPC::QFRCRegClass);
15770         if (VT == MVT::v4f32 && Subtarget.hasQPX())
15771           return std::make_pair(0U, &PPC::QSRCRegClass);
15772       }
15773       break;
15774     case 'v':
15775       if (VT == MVT::v4f64 && Subtarget.hasQPX())
15776         return std::make_pair(0U, &PPC::QFRCRegClass);
15777       if (VT == MVT::v4f32 && Subtarget.hasQPX())
15778         return std::make_pair(0U, &PPC::QSRCRegClass);
15779       if (Subtarget.hasAltivec())
15780         return std::make_pair(0U, &PPC::VRRCRegClass);
15781       break;
15782     case 'y':   // crrc
15783       return std::make_pair(0U, &PPC::CRRCRegClass);
15784     }
15785   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15786     // An individual CR bit.
15787     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15788   } else if ((Constraint == "wa" || Constraint == "wd" ||
15789              Constraint == "wf" || Constraint == "wi") &&
15790              Subtarget.hasVSX()) {
15791     return std::make_pair(0U, &PPC::VSRCRegClass);
15792   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15793     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15794       return std::make_pair(0U, &PPC::VSSRCRegClass);
15795     else
15796       return std::make_pair(0U, &PPC::VSFRCRegClass);
15797   }
15798 
15799   // If we name a VSX register, we can't defer to the base class because it
15800   // will not recognize the correct register (their names will be VSL{0-31}
15801   // and V{0-31} so they won't match). So we match them here.
15802   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15803     int VSNum = atoi(Constraint.data() + 3);
15804     assert(VSNum >= 0 && VSNum <= 63 &&
15805            "Attempted to access a vsr out of range");
15806     if (VSNum < 32)
15807       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15808     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15809   }
15810   std::pair<unsigned, const TargetRegisterClass *> R =
15811       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15812 
15813   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15814   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15815   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15816   // register.
15817   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15818   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15819   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15820       PPC::GPRCRegClass.contains(R.first))
15821     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15822                             PPC::sub_32, &PPC::G8RCRegClass),
15823                           &PPC::G8RCRegClass);
15824 
15825   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15826   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15827     R.first = PPC::CR0;
15828     R.second = &PPC::CRRCRegClass;
15829   }
15830 
15831   return R;
15832 }
15833 
15834 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15835 /// vector.  If it is invalid, don't add anything to Ops.
15836 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15837                                                      std::string &Constraint,
15838                                                      std::vector<SDValue>&Ops,
15839                                                      SelectionDAG &DAG) const {
15840   SDValue Result;
15841 
15842   // Only support length 1 constraints.
15843   if (Constraint.length() > 1) return;
15844 
15845   char Letter = Constraint[0];
15846   switch (Letter) {
15847   default: break;
15848   case 'I':
15849   case 'J':
15850   case 'K':
15851   case 'L':
15852   case 'M':
15853   case 'N':
15854   case 'O':
15855   case 'P': {
15856     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15857     if (!CST) return; // Must be an immediate to match.
15858     SDLoc dl(Op);
15859     int64_t Value = CST->getSExtValue();
15860     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15861                          // numbers are printed as such.
15862     switch (Letter) {
15863     default: llvm_unreachable("Unknown constraint letter!");
15864     case 'I':  // "I" is a signed 16-bit constant.
15865       if (isInt<16>(Value))
15866         Result = DAG.getTargetConstant(Value, dl, TCVT);
15867       break;
15868     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15869       if (isShiftedUInt<16, 16>(Value))
15870         Result = DAG.getTargetConstant(Value, dl, TCVT);
15871       break;
15872     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15873       if (isShiftedInt<16, 16>(Value))
15874         Result = DAG.getTargetConstant(Value, dl, TCVT);
15875       break;
15876     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15877       if (isUInt<16>(Value))
15878         Result = DAG.getTargetConstant(Value, dl, TCVT);
15879       break;
15880     case 'M':  // "M" is a constant that is greater than 31.
15881       if (Value > 31)
15882         Result = DAG.getTargetConstant(Value, dl, TCVT);
15883       break;
15884     case 'N':  // "N" is a positive constant that is an exact power of two.
15885       if (Value > 0 && isPowerOf2_64(Value))
15886         Result = DAG.getTargetConstant(Value, dl, TCVT);
15887       break;
15888     case 'O':  // "O" is the constant zero.
15889       if (Value == 0)
15890         Result = DAG.getTargetConstant(Value, dl, TCVT);
15891       break;
15892     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15893       if (isInt<16>(-Value))
15894         Result = DAG.getTargetConstant(Value, dl, TCVT);
15895       break;
15896     }
15897     break;
15898   }
15899   }
15900 
15901   if (Result.getNode()) {
15902     Ops.push_back(Result);
15903     return;
15904   }
15905 
15906   // Handle standard constraint letters.
15907   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15908 }
15909 
15910 // isLegalAddressingMode - Return true if the addressing mode represented
15911 // by AM is legal for this target, for a load/store of the specified type.
15912 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15913                                               const AddrMode &AM, Type *Ty,
15914                                               unsigned AS, Instruction *I) const {
15915   // PPC does not allow r+i addressing modes for vectors!
15916   if (Ty->isVectorTy() && AM.BaseOffs != 0)
15917     return false;
15918 
15919   // PPC allows a sign-extended 16-bit immediate field.
15920   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15921     return false;
15922 
15923   // No global is ever allowed as a base.
15924   if (AM.BaseGV)
15925     return false;
15926 
15927   // PPC only support r+r,
15928   switch (AM.Scale) {
15929   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15930     break;
15931   case 1:
15932     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15933       return false;
15934     // Otherwise we have r+r or r+i.
15935     break;
15936   case 2:
15937     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15938       return false;
15939     // Allow 2*r as r+r.
15940     break;
15941   default:
15942     // No other scales are supported.
15943     return false;
15944   }
15945 
15946   return true;
15947 }
15948 
15949 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15950                                            SelectionDAG &DAG) const {
15951   MachineFunction &MF = DAG.getMachineFunction();
15952   MachineFrameInfo &MFI = MF.getFrameInfo();
15953   MFI.setReturnAddressIsTaken(true);
15954 
15955   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15956     return SDValue();
15957 
15958   SDLoc dl(Op);
15959   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15960 
15961   // Make sure the function does not optimize away the store of the RA to
15962   // the stack.
15963   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15964   FuncInfo->setLRStoreRequired();
15965   bool isPPC64 = Subtarget.isPPC64();
15966   auto PtrVT = getPointerTy(MF.getDataLayout());
15967 
15968   if (Depth > 0) {
15969     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15970     SDValue Offset =
15971         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15972                         isPPC64 ? MVT::i64 : MVT::i32);
15973     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15974                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15975                        MachinePointerInfo());
15976   }
15977 
15978   // Just load the return address off the stack.
15979   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15980   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15981                      MachinePointerInfo());
15982 }
15983 
15984 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15985                                           SelectionDAG &DAG) const {
15986   SDLoc dl(Op);
15987   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15988 
15989   MachineFunction &MF = DAG.getMachineFunction();
15990   MachineFrameInfo &MFI = MF.getFrameInfo();
15991   MFI.setFrameAddressIsTaken(true);
15992 
15993   EVT PtrVT = getPointerTy(MF.getDataLayout());
15994   bool isPPC64 = PtrVT == MVT::i64;
15995 
15996   // Naked functions never have a frame pointer, and so we use r1. For all
15997   // other functions, this decision must be delayed until during PEI.
15998   unsigned FrameReg;
15999   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
16000     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
16001   else
16002     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
16003 
16004   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
16005                                          PtrVT);
16006   while (Depth--)
16007     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16008                             FrameAddr, MachinePointerInfo());
16009   return FrameAddr;
16010 }
16011 
16012 // FIXME? Maybe this could be a TableGen attribute on some registers and
16013 // this table could be generated automatically from RegInfo.
16014 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
16015                                               const MachineFunction &MF) const {
16016   bool isPPC64 = Subtarget.isPPC64();
16017 
16018   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
16019   if (!is64Bit && VT != LLT::scalar(32))
16020     report_fatal_error("Invalid register global variable type");
16021 
16022   Register Reg = StringSwitch<Register>(RegName)
16023                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
16024                      .Case("r2", isPPC64 ? Register() : PPC::R2)
16025                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
16026                      .Default(Register());
16027 
16028   if (Reg)
16029     return Reg;
16030   report_fatal_error("Invalid register name global variable");
16031 }
16032 
16033 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
16034   // 32-bit SVR4 ABI access everything as got-indirect.
16035   if (Subtarget.is32BitELFABI())
16036     return true;
16037 
16038   // AIX accesses everything indirectly through the TOC, which is similar to
16039   // the GOT.
16040   if (Subtarget.isAIXABI())
16041     return true;
16042 
16043   CodeModel::Model CModel = getTargetMachine().getCodeModel();
16044   // If it is small or large code model, module locals are accessed
16045   // indirectly by loading their address from .toc/.got.
16046   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
16047     return true;
16048 
16049   // JumpTable and BlockAddress are accessed as got-indirect.
16050   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
16051     return true;
16052 
16053   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
16054     return Subtarget.isGVIndirectSymbol(G->getGlobal());
16055 
16056   return false;
16057 }
16058 
16059 bool
16060 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16061   // The PowerPC target isn't yet aware of offsets.
16062   return false;
16063 }
16064 
16065 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16066                                            const CallInst &I,
16067                                            MachineFunction &MF,
16068                                            unsigned Intrinsic) const {
16069   switch (Intrinsic) {
16070   case Intrinsic::ppc_qpx_qvlfd:
16071   case Intrinsic::ppc_qpx_qvlfs:
16072   case Intrinsic::ppc_qpx_qvlfcd:
16073   case Intrinsic::ppc_qpx_qvlfcs:
16074   case Intrinsic::ppc_qpx_qvlfiwa:
16075   case Intrinsic::ppc_qpx_qvlfiwz:
16076   case Intrinsic::ppc_altivec_lvx:
16077   case Intrinsic::ppc_altivec_lvxl:
16078   case Intrinsic::ppc_altivec_lvebx:
16079   case Intrinsic::ppc_altivec_lvehx:
16080   case Intrinsic::ppc_altivec_lvewx:
16081   case Intrinsic::ppc_vsx_lxvd2x:
16082   case Intrinsic::ppc_vsx_lxvw4x: {
16083     EVT VT;
16084     switch (Intrinsic) {
16085     case Intrinsic::ppc_altivec_lvebx:
16086       VT = MVT::i8;
16087       break;
16088     case Intrinsic::ppc_altivec_lvehx:
16089       VT = MVT::i16;
16090       break;
16091     case Intrinsic::ppc_altivec_lvewx:
16092       VT = MVT::i32;
16093       break;
16094     case Intrinsic::ppc_vsx_lxvd2x:
16095       VT = MVT::v2f64;
16096       break;
16097     case Intrinsic::ppc_qpx_qvlfd:
16098       VT = MVT::v4f64;
16099       break;
16100     case Intrinsic::ppc_qpx_qvlfs:
16101       VT = MVT::v4f32;
16102       break;
16103     case Intrinsic::ppc_qpx_qvlfcd:
16104       VT = MVT::v2f64;
16105       break;
16106     case Intrinsic::ppc_qpx_qvlfcs:
16107       VT = MVT::v2f32;
16108       break;
16109     default:
16110       VT = MVT::v4i32;
16111       break;
16112     }
16113 
16114     Info.opc = ISD::INTRINSIC_W_CHAIN;
16115     Info.memVT = VT;
16116     Info.ptrVal = I.getArgOperand(0);
16117     Info.offset = -VT.getStoreSize()+1;
16118     Info.size = 2*VT.getStoreSize()-1;
16119     Info.align = Align(1);
16120     Info.flags = MachineMemOperand::MOLoad;
16121     return true;
16122   }
16123   case Intrinsic::ppc_qpx_qvlfda:
16124   case Intrinsic::ppc_qpx_qvlfsa:
16125   case Intrinsic::ppc_qpx_qvlfcda:
16126   case Intrinsic::ppc_qpx_qvlfcsa:
16127   case Intrinsic::ppc_qpx_qvlfiwaa:
16128   case Intrinsic::ppc_qpx_qvlfiwza: {
16129     EVT VT;
16130     switch (Intrinsic) {
16131     case Intrinsic::ppc_qpx_qvlfda:
16132       VT = MVT::v4f64;
16133       break;
16134     case Intrinsic::ppc_qpx_qvlfsa:
16135       VT = MVT::v4f32;
16136       break;
16137     case Intrinsic::ppc_qpx_qvlfcda:
16138       VT = MVT::v2f64;
16139       break;
16140     case Intrinsic::ppc_qpx_qvlfcsa:
16141       VT = MVT::v2f32;
16142       break;
16143     default:
16144       VT = MVT::v4i32;
16145       break;
16146     }
16147 
16148     Info.opc = ISD::INTRINSIC_W_CHAIN;
16149     Info.memVT = VT;
16150     Info.ptrVal = I.getArgOperand(0);
16151     Info.offset = 0;
16152     Info.size = VT.getStoreSize();
16153     Info.align = Align(1);
16154     Info.flags = MachineMemOperand::MOLoad;
16155     return true;
16156   }
16157   case Intrinsic::ppc_qpx_qvstfd:
16158   case Intrinsic::ppc_qpx_qvstfs:
16159   case Intrinsic::ppc_qpx_qvstfcd:
16160   case Intrinsic::ppc_qpx_qvstfcs:
16161   case Intrinsic::ppc_qpx_qvstfiw:
16162   case Intrinsic::ppc_altivec_stvx:
16163   case Intrinsic::ppc_altivec_stvxl:
16164   case Intrinsic::ppc_altivec_stvebx:
16165   case Intrinsic::ppc_altivec_stvehx:
16166   case Intrinsic::ppc_altivec_stvewx:
16167   case Intrinsic::ppc_vsx_stxvd2x:
16168   case Intrinsic::ppc_vsx_stxvw4x: {
16169     EVT VT;
16170     switch (Intrinsic) {
16171     case Intrinsic::ppc_altivec_stvebx:
16172       VT = MVT::i8;
16173       break;
16174     case Intrinsic::ppc_altivec_stvehx:
16175       VT = MVT::i16;
16176       break;
16177     case Intrinsic::ppc_altivec_stvewx:
16178       VT = MVT::i32;
16179       break;
16180     case Intrinsic::ppc_vsx_stxvd2x:
16181       VT = MVT::v2f64;
16182       break;
16183     case Intrinsic::ppc_qpx_qvstfd:
16184       VT = MVT::v4f64;
16185       break;
16186     case Intrinsic::ppc_qpx_qvstfs:
16187       VT = MVT::v4f32;
16188       break;
16189     case Intrinsic::ppc_qpx_qvstfcd:
16190       VT = MVT::v2f64;
16191       break;
16192     case Intrinsic::ppc_qpx_qvstfcs:
16193       VT = MVT::v2f32;
16194       break;
16195     default:
16196       VT = MVT::v4i32;
16197       break;
16198     }
16199 
16200     Info.opc = ISD::INTRINSIC_VOID;
16201     Info.memVT = VT;
16202     Info.ptrVal = I.getArgOperand(1);
16203     Info.offset = -VT.getStoreSize()+1;
16204     Info.size = 2*VT.getStoreSize()-1;
16205     Info.align = Align(1);
16206     Info.flags = MachineMemOperand::MOStore;
16207     return true;
16208   }
16209   case Intrinsic::ppc_qpx_qvstfda:
16210   case Intrinsic::ppc_qpx_qvstfsa:
16211   case Intrinsic::ppc_qpx_qvstfcda:
16212   case Intrinsic::ppc_qpx_qvstfcsa:
16213   case Intrinsic::ppc_qpx_qvstfiwa: {
16214     EVT VT;
16215     switch (Intrinsic) {
16216     case Intrinsic::ppc_qpx_qvstfda:
16217       VT = MVT::v4f64;
16218       break;
16219     case Intrinsic::ppc_qpx_qvstfsa:
16220       VT = MVT::v4f32;
16221       break;
16222     case Intrinsic::ppc_qpx_qvstfcda:
16223       VT = MVT::v2f64;
16224       break;
16225     case Intrinsic::ppc_qpx_qvstfcsa:
16226       VT = MVT::v2f32;
16227       break;
16228     default:
16229       VT = MVT::v4i32;
16230       break;
16231     }
16232 
16233     Info.opc = ISD::INTRINSIC_VOID;
16234     Info.memVT = VT;
16235     Info.ptrVal = I.getArgOperand(1);
16236     Info.offset = 0;
16237     Info.size = VT.getStoreSize();
16238     Info.align = Align(1);
16239     Info.flags = MachineMemOperand::MOStore;
16240     return true;
16241   }
16242   default:
16243     break;
16244   }
16245 
16246   return false;
16247 }
16248 
16249 /// It returns EVT::Other if the type should be determined using generic
16250 /// target-independent logic.
16251 EVT PPCTargetLowering::getOptimalMemOpType(
16252     const MemOp &Op, const AttributeList &FuncAttributes) const {
16253   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16254     // When expanding a memset, require at least two QPX instructions to cover
16255     // the cost of loading the value to be stored from the constant pool.
16256     if (Subtarget.hasQPX() && Op.size() >= 32 &&
16257         (Op.isMemcpy() || Op.size() >= 64) && Op.isAligned(Align(32)) &&
16258         !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16259       return MVT::v4f64;
16260     }
16261 
16262     // We should use Altivec/VSX loads and stores when available. For unaligned
16263     // addresses, unaligned VSX loads are only fast starting with the P8.
16264     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16265         (Op.isAligned(Align(16)) ||
16266          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16267       return MVT::v4i32;
16268   }
16269 
16270   if (Subtarget.isPPC64()) {
16271     return MVT::i64;
16272   }
16273 
16274   return MVT::i32;
16275 }
16276 
16277 /// Returns true if it is beneficial to convert a load of a constant
16278 /// to just the constant itself.
16279 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16280                                                           Type *Ty) const {
16281   assert(Ty->isIntegerTy());
16282 
16283   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16284   return !(BitSize == 0 || BitSize > 64);
16285 }
16286 
16287 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16288   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16289     return false;
16290   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16291   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16292   return NumBits1 == 64 && NumBits2 == 32;
16293 }
16294 
16295 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16296   if (!VT1.isInteger() || !VT2.isInteger())
16297     return false;
16298   unsigned NumBits1 = VT1.getSizeInBits();
16299   unsigned NumBits2 = VT2.getSizeInBits();
16300   return NumBits1 == 64 && NumBits2 == 32;
16301 }
16302 
16303 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16304   // Generally speaking, zexts are not free, but they are free when they can be
16305   // folded with other operations.
16306   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16307     EVT MemVT = LD->getMemoryVT();
16308     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16309          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16310         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16311          LD->getExtensionType() == ISD::ZEXTLOAD))
16312       return true;
16313   }
16314 
16315   // FIXME: Add other cases...
16316   //  - 32-bit shifts with a zext to i64
16317   //  - zext after ctlz, bswap, etc.
16318   //  - zext after and by a constant mask
16319 
16320   return TargetLowering::isZExtFree(Val, VT2);
16321 }
16322 
16323 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16324   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16325          "invalid fpext types");
16326   // Extending to float128 is not free.
16327   if (DestVT == MVT::f128)
16328     return false;
16329   return true;
16330 }
16331 
16332 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16333   return isInt<16>(Imm) || isUInt<16>(Imm);
16334 }
16335 
16336 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16337   return isInt<16>(Imm) || isUInt<16>(Imm);
16338 }
16339 
16340 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
16341                                                        unsigned,
16342                                                        unsigned,
16343                                                        MachineMemOperand::Flags,
16344                                                        bool *Fast) const {
16345   if (DisablePPCUnaligned)
16346     return false;
16347 
16348   // PowerPC supports unaligned memory access for simple non-vector types.
16349   // Although accessing unaligned addresses is not as efficient as accessing
16350   // aligned addresses, it is generally more efficient than manual expansion,
16351   // and generally only traps for software emulation when crossing page
16352   // boundaries.
16353 
16354   if (!VT.isSimple())
16355     return false;
16356 
16357   if (VT.isFloatingPoint() && !VT.isVector() &&
16358       !Subtarget.allowsUnalignedFPAccess())
16359     return false;
16360 
16361   if (VT.getSimpleVT().isVector()) {
16362     if (Subtarget.hasVSX()) {
16363       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16364           VT != MVT::v4f32 && VT != MVT::v4i32)
16365         return false;
16366     } else {
16367       return false;
16368     }
16369   }
16370 
16371   if (VT == MVT::ppcf128)
16372     return false;
16373 
16374   if (Fast)
16375     *Fast = true;
16376 
16377   return true;
16378 }
16379 
16380 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16381                                                    EVT VT) const {
16382   return isFMAFasterThanFMulAndFAdd(
16383       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16384 }
16385 
16386 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16387                                                    Type *Ty) const {
16388   switch (Ty->getScalarType()->getTypeID()) {
16389   case Type::FloatTyID:
16390   case Type::DoubleTyID:
16391     return true;
16392   case Type::FP128TyID:
16393     return Subtarget.hasP9Vector();
16394   default:
16395     return false;
16396   }
16397 }
16398 
16399 // FIXME: add more patterns which are not profitable to hoist.
16400 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16401   if (!I->hasOneUse())
16402     return true;
16403 
16404   Instruction *User = I->user_back();
16405   assert(User && "A single use instruction with no uses.");
16406 
16407   switch (I->getOpcode()) {
16408   case Instruction::FMul: {
16409     // Don't break FMA, PowerPC prefers FMA.
16410     if (User->getOpcode() != Instruction::FSub &&
16411         User->getOpcode() != Instruction::FAdd)
16412       return true;
16413 
16414     const TargetOptions &Options = getTargetMachine().Options;
16415     const Function *F = I->getFunction();
16416     const DataLayout &DL = F->getParent()->getDataLayout();
16417     Type *Ty = User->getOperand(0)->getType();
16418 
16419     return !(
16420         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16421         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16422         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16423   }
16424   case Instruction::Load: {
16425     // Don't break "store (load float*)" pattern, this pattern will be combined
16426     // to "store (load int32)" in later InstCombine pass. See function
16427     // combineLoadToOperationType. On PowerPC, loading a float point takes more
16428     // cycles than loading a 32 bit integer.
16429     LoadInst *LI = cast<LoadInst>(I);
16430     // For the loads that combineLoadToOperationType does nothing, like
16431     // ordered load, it should be profitable to hoist them.
16432     // For swifterror load, it can only be used for pointer to pointer type, so
16433     // later type check should get rid of this case.
16434     if (!LI->isUnordered())
16435       return true;
16436 
16437     if (User->getOpcode() != Instruction::Store)
16438       return true;
16439 
16440     if (I->getType()->getTypeID() != Type::FloatTyID)
16441       return true;
16442 
16443     return false;
16444   }
16445   default:
16446     return true;
16447   }
16448   return true;
16449 }
16450 
16451 const MCPhysReg *
16452 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16453   // LR is a callee-save register, but we must treat it as clobbered by any call
16454   // site. Hence we include LR in the scratch registers, which are in turn added
16455   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16456   // to CTR, which is used by any indirect call.
16457   static const MCPhysReg ScratchRegs[] = {
16458     PPC::X12, PPC::LR8, PPC::CTR8, 0
16459   };
16460 
16461   return ScratchRegs;
16462 }
16463 
16464 Register PPCTargetLowering::getExceptionPointerRegister(
16465     const Constant *PersonalityFn) const {
16466   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16467 }
16468 
16469 Register PPCTargetLowering::getExceptionSelectorRegister(
16470     const Constant *PersonalityFn) const {
16471   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16472 }
16473 
16474 bool
16475 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16476                      EVT VT , unsigned DefinedValues) const {
16477   if (VT == MVT::v2i64)
16478     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16479 
16480   if (Subtarget.hasVSX() || Subtarget.hasQPX())
16481     return true;
16482 
16483   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16484 }
16485 
16486 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16487   if (DisableILPPref || Subtarget.enableMachineScheduler())
16488     return TargetLowering::getSchedulingPreference(N);
16489 
16490   return Sched::ILP;
16491 }
16492 
16493 // Create a fast isel object.
16494 FastISel *
16495 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16496                                   const TargetLibraryInfo *LibInfo) const {
16497   return PPC::createFastISel(FuncInfo, LibInfo);
16498 }
16499 
16500 // 'Inverted' means the FMA opcode after negating one multiplicand.
16501 // For example, (fma -a b c) = (fnmsub a b c)
16502 static unsigned invertFMAOpcode(unsigned Opc) {
16503   switch (Opc) {
16504   default:
16505     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16506   case ISD::FMA:
16507     return PPCISD::FNMSUB;
16508   case PPCISD::FNMSUB:
16509     return ISD::FMA;
16510   }
16511 }
16512 
16513 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16514                                                 bool LegalOps, bool OptForSize,
16515                                                 NegatibleCost &Cost,
16516                                                 unsigned Depth) const {
16517   if (Depth > SelectionDAG::MaxRecursionDepth)
16518     return SDValue();
16519 
16520   unsigned Opc = Op.getOpcode();
16521   EVT VT = Op.getValueType();
16522   SDNodeFlags Flags = Op.getNode()->getFlags();
16523 
16524   switch (Opc) {
16525   case PPCISD::FNMSUB:
16526     // TODO: QPX subtarget is deprecated. No transformation here.
16527     if (!Op.hasOneUse() || !isTypeLegal(VT) || Subtarget.hasQPX())
16528       break;
16529 
16530     const TargetOptions &Options = getTargetMachine().Options;
16531     SDValue N0 = Op.getOperand(0);
16532     SDValue N1 = Op.getOperand(1);
16533     SDValue N2 = Op.getOperand(2);
16534     SDLoc Loc(Op);
16535 
16536     NegatibleCost N2Cost = NegatibleCost::Expensive;
16537     SDValue NegN2 =
16538         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16539 
16540     if (!NegN2)
16541       return SDValue();
16542 
16543     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16544     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16545     // These transformations may change sign of zeroes. For example,
16546     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16547     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16548       // Try and choose the cheaper one to negate.
16549       NegatibleCost N0Cost = NegatibleCost::Expensive;
16550       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16551                                            N0Cost, Depth + 1);
16552 
16553       NegatibleCost N1Cost = NegatibleCost::Expensive;
16554       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16555                                            N1Cost, Depth + 1);
16556 
16557       if (NegN0 && N0Cost <= N1Cost) {
16558         Cost = std::min(N0Cost, N2Cost);
16559         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16560       } else if (NegN1) {
16561         Cost = std::min(N1Cost, N2Cost);
16562         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16563       }
16564     }
16565 
16566     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16567     if (isOperationLegal(ISD::FMA, VT)) {
16568       Cost = N2Cost;
16569       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16570     }
16571 
16572     break;
16573   }
16574 
16575   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16576                                               Cost, Depth);
16577 }
16578 
16579 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16580 bool PPCTargetLowering::useLoadStackGuardNode() const {
16581   if (!Subtarget.isTargetLinux())
16582     return TargetLowering::useLoadStackGuardNode();
16583   return true;
16584 }
16585 
16586 // Override to disable global variable loading on Linux.
16587 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16588   if (!Subtarget.isTargetLinux())
16589     return TargetLowering::insertSSPDeclarations(M);
16590 }
16591 
16592 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16593                                      bool ForCodeSize) const {
16594   if (!VT.isSimple() || !Subtarget.hasVSX())
16595     return false;
16596 
16597   switch(VT.getSimpleVT().SimpleTy) {
16598   default:
16599     // For FP types that are currently not supported by PPC backend, return
16600     // false. Examples: f16, f80.
16601     return false;
16602   case MVT::f32:
16603   case MVT::f64:
16604     if (Subtarget.hasPrefixInstrs()) {
16605       // With prefixed instructions, we can materialize anything that can be
16606       // represented with a 32-bit immediate, not just positive zero.
16607       APFloat APFloatOfImm = Imm;
16608       return convertToNonDenormSingle(APFloatOfImm);
16609     }
16610     LLVM_FALLTHROUGH;
16611   case MVT::ppcf128:
16612     return Imm.isPosZero();
16613   }
16614 }
16615 
16616 // For vector shift operation op, fold
16617 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16618 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16619                                   SelectionDAG &DAG) {
16620   SDValue N0 = N->getOperand(0);
16621   SDValue N1 = N->getOperand(1);
16622   EVT VT = N0.getValueType();
16623   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16624   unsigned Opcode = N->getOpcode();
16625   unsigned TargetOpcode;
16626 
16627   switch (Opcode) {
16628   default:
16629     llvm_unreachable("Unexpected shift operation");
16630   case ISD::SHL:
16631     TargetOpcode = PPCISD::SHL;
16632     break;
16633   case ISD::SRL:
16634     TargetOpcode = PPCISD::SRL;
16635     break;
16636   case ISD::SRA:
16637     TargetOpcode = PPCISD::SRA;
16638     break;
16639   }
16640 
16641   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16642       N1->getOpcode() == ISD::AND)
16643     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16644       if (Mask->getZExtValue() == OpSizeInBits - 1)
16645         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16646 
16647   return SDValue();
16648 }
16649 
16650 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16651   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16652     return Value;
16653 
16654   SDValue N0 = N->getOperand(0);
16655   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16656   if (!Subtarget.isISA3_0() ||
16657       N0.getOpcode() != ISD::SIGN_EXTEND ||
16658       N0.getOperand(0).getValueType() != MVT::i32 ||
16659       CN1 == nullptr || N->getValueType(0) != MVT::i64)
16660     return SDValue();
16661 
16662   // We can't save an operation here if the value is already extended, and
16663   // the existing shift is easier to combine.
16664   SDValue ExtsSrc = N0.getOperand(0);
16665   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16666       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16667     return SDValue();
16668 
16669   SDLoc DL(N0);
16670   SDValue ShiftBy = SDValue(CN1, 0);
16671   // We want the shift amount to be i32 on the extswli, but the shift could
16672   // have an i64.
16673   if (ShiftBy.getValueType() == MVT::i64)
16674     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16675 
16676   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16677                          ShiftBy);
16678 }
16679 
16680 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16681   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16682     return Value;
16683 
16684   return SDValue();
16685 }
16686 
16687 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16688   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16689     return Value;
16690 
16691   return SDValue();
16692 }
16693 
16694 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16695 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16696 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16697 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16698 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16699                                  const PPCSubtarget &Subtarget) {
16700   if (!Subtarget.isPPC64())
16701     return SDValue();
16702 
16703   SDValue LHS = N->getOperand(0);
16704   SDValue RHS = N->getOperand(1);
16705 
16706   auto isZextOfCompareWithConstant = [](SDValue Op) {
16707     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16708         Op.getValueType() != MVT::i64)
16709       return false;
16710 
16711     SDValue Cmp = Op.getOperand(0);
16712     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16713         Cmp.getOperand(0).getValueType() != MVT::i64)
16714       return false;
16715 
16716     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16717       int64_t NegConstant = 0 - Constant->getSExtValue();
16718       // Due to the limitations of the addi instruction,
16719       // -C is required to be [-32768, 32767].
16720       return isInt<16>(NegConstant);
16721     }
16722 
16723     return false;
16724   };
16725 
16726   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16727   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16728 
16729   // If there is a pattern, canonicalize a zext operand to the RHS.
16730   if (LHSHasPattern && !RHSHasPattern)
16731     std::swap(LHS, RHS);
16732   else if (!LHSHasPattern && !RHSHasPattern)
16733     return SDValue();
16734 
16735   SDLoc DL(N);
16736   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16737   SDValue Cmp = RHS.getOperand(0);
16738   SDValue Z = Cmp.getOperand(0);
16739   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16740 
16741   assert(Constant && "Constant Should not be a null pointer.");
16742   int64_t NegConstant = 0 - Constant->getSExtValue();
16743 
16744   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16745   default: break;
16746   case ISD::SETNE: {
16747     //                                 when C == 0
16748     //                             --> addze X, (addic Z, -1).carry
16749     //                            /
16750     // add X, (zext(setne Z, C))--
16751     //                            \    when -32768 <= -C <= 32767 && C != 0
16752     //                             --> addze X, (addic (addi Z, -C), -1).carry
16753     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16754                               DAG.getConstant(NegConstant, DL, MVT::i64));
16755     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16756     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16757                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16758     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16759                        SDValue(Addc.getNode(), 1));
16760     }
16761   case ISD::SETEQ: {
16762     //                                 when C == 0
16763     //                             --> addze X, (subfic Z, 0).carry
16764     //                            /
16765     // add X, (zext(sete  Z, C))--
16766     //                            \    when -32768 <= -C <= 32767 && C != 0
16767     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16768     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16769                               DAG.getConstant(NegConstant, DL, MVT::i64));
16770     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16771     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16772                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16773     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16774                        SDValue(Subc.getNode(), 1));
16775     }
16776   }
16777 
16778   return SDValue();
16779 }
16780 
16781 // Transform
16782 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16783 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16784 // In this case both C1 and C2 must be known constants.
16785 // C1+C2 must fit into a 34 bit signed integer.
16786 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16787                                           const PPCSubtarget &Subtarget) {
16788   if (!Subtarget.isUsingPCRelativeCalls())
16789     return SDValue();
16790 
16791   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16792   // If we find that node try to cast the Global Address and the Constant.
16793   SDValue LHS = N->getOperand(0);
16794   SDValue RHS = N->getOperand(1);
16795 
16796   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16797     std::swap(LHS, RHS);
16798 
16799   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16800     return SDValue();
16801 
16802   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16803   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16804   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16805 
16806   // Check that both casts succeeded.
16807   if (!GSDN || !ConstNode)
16808     return SDValue();
16809 
16810   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16811   SDLoc DL(GSDN);
16812 
16813   // The signed int offset needs to fit in 34 bits.
16814   if (!isInt<34>(NewOffset))
16815     return SDValue();
16816 
16817   // The new global address is a copy of the old global address except
16818   // that it has the updated Offset.
16819   SDValue GA =
16820       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16821                                  NewOffset, GSDN->getTargetFlags());
16822   SDValue MatPCRel =
16823       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16824   return MatPCRel;
16825 }
16826 
16827 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16828   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16829     return Value;
16830 
16831   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16832     return Value;
16833 
16834   return SDValue();
16835 }
16836 
16837 // Detect TRUNCATE operations on bitcasts of float128 values.
16838 // What we are looking for here is the situtation where we extract a subset
16839 // of bits from a 128 bit float.
16840 // This can be of two forms:
16841 // 1) BITCAST of f128 feeding TRUNCATE
16842 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16843 // The reason this is required is because we do not have a legal i128 type
16844 // and so we want to prevent having to store the f128 and then reload part
16845 // of it.
16846 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16847                                            DAGCombinerInfo &DCI) const {
16848   // If we are using CRBits then try that first.
16849   if (Subtarget.useCRBits()) {
16850     // Check if CRBits did anything and return that if it did.
16851     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16852       return CRTruncValue;
16853   }
16854 
16855   SDLoc dl(N);
16856   SDValue Op0 = N->getOperand(0);
16857 
16858   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16859   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16860     EVT VT = N->getValueType(0);
16861     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16862       return SDValue();
16863     SDValue Sub = Op0.getOperand(0);
16864     if (Sub.getOpcode() == ISD::SUB) {
16865       SDValue SubOp0 = Sub.getOperand(0);
16866       SDValue SubOp1 = Sub.getOperand(1);
16867       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16868           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16869         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16870                                SubOp1.getOperand(0),
16871                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16872       }
16873     }
16874   }
16875 
16876   // Looking for a truncate of i128 to i64.
16877   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16878     return SDValue();
16879 
16880   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16881 
16882   // SRL feeding TRUNCATE.
16883   if (Op0.getOpcode() == ISD::SRL) {
16884     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16885     // The right shift has to be by 64 bits.
16886     if (!ConstNode || ConstNode->getZExtValue() != 64)
16887       return SDValue();
16888 
16889     // Switch the element number to extract.
16890     EltToExtract = EltToExtract ? 0 : 1;
16891     // Update Op0 past the SRL.
16892     Op0 = Op0.getOperand(0);
16893   }
16894 
16895   // BITCAST feeding a TRUNCATE possibly via SRL.
16896   if (Op0.getOpcode() == ISD::BITCAST &&
16897       Op0.getValueType() == MVT::i128 &&
16898       Op0.getOperand(0).getValueType() == MVT::f128) {
16899     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16900     return DCI.DAG.getNode(
16901         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16902         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16903   }
16904   return SDValue();
16905 }
16906 
16907 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16908   SelectionDAG &DAG = DCI.DAG;
16909 
16910   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16911   if (!ConstOpOrElement)
16912     return SDValue();
16913 
16914   // An imul is usually smaller than the alternative sequence for legal type.
16915   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16916       isOperationLegal(ISD::MUL, N->getValueType(0)))
16917     return SDValue();
16918 
16919   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16920     switch (this->Subtarget.getCPUDirective()) {
16921     default:
16922       // TODO: enhance the condition for subtarget before pwr8
16923       return false;
16924     case PPC::DIR_PWR8:
16925       //  type        mul     add    shl
16926       // scalar        4       1      1
16927       // vector        7       2      2
16928       return true;
16929     case PPC::DIR_PWR9:
16930     case PPC::DIR_PWR10:
16931     case PPC::DIR_PWR_FUTURE:
16932       //  type        mul     add    shl
16933       // scalar        5       2      2
16934       // vector        7       2      2
16935 
16936       // The cycle RATIO of related operations are showed as a table above.
16937       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16938       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16939       // are 4, it is always profitable; but for 3 instrs patterns
16940       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16941       // So we should only do it for vector type.
16942       return IsAddOne && IsNeg ? VT.isVector() : true;
16943     }
16944   };
16945 
16946   EVT VT = N->getValueType(0);
16947   SDLoc DL(N);
16948 
16949   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16950   bool IsNeg = MulAmt.isNegative();
16951   APInt MulAmtAbs = MulAmt.abs();
16952 
16953   if ((MulAmtAbs - 1).isPowerOf2()) {
16954     // (mul x, 2^N + 1) => (add (shl x, N), x)
16955     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16956 
16957     if (!IsProfitable(IsNeg, true, VT))
16958       return SDValue();
16959 
16960     SDValue Op0 = N->getOperand(0);
16961     SDValue Op1 =
16962         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16963                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16964     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16965 
16966     if (!IsNeg)
16967       return Res;
16968 
16969     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16970   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16971     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16972     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16973 
16974     if (!IsProfitable(IsNeg, false, VT))
16975       return SDValue();
16976 
16977     SDValue Op0 = N->getOperand(0);
16978     SDValue Op1 =
16979         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16980                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16981 
16982     if (!IsNeg)
16983       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16984     else
16985       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16986 
16987   } else {
16988     return SDValue();
16989   }
16990 }
16991 
16992 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16993 // in combiner since we need to check SD flags and other subtarget features.
16994 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16995                                           DAGCombinerInfo &DCI) const {
16996   SDValue N0 = N->getOperand(0);
16997   SDValue N1 = N->getOperand(1);
16998   SDValue N2 = N->getOperand(2);
16999   SDNodeFlags Flags = N->getFlags();
17000   EVT VT = N->getValueType(0);
17001   SelectionDAG &DAG = DCI.DAG;
17002   const TargetOptions &Options = getTargetMachine().Options;
17003   unsigned Opc = N->getOpcode();
17004   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
17005   bool LegalOps = !DCI.isBeforeLegalizeOps();
17006   SDLoc Loc(N);
17007 
17008   // TODO: QPX subtarget is deprecated. No transformation here.
17009   if (Subtarget.hasQPX() || !isOperationLegal(ISD::FMA, VT))
17010     return SDValue();
17011 
17012   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
17013   // since (fnmsub a b c)=-0 while c-ab=+0.
17014   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
17015     return SDValue();
17016 
17017   // (fma (fneg a) b c) => (fnmsub a b c)
17018   // (fnmsub (fneg a) b c) => (fma a b c)
17019   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
17020     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
17021 
17022   // (fma a (fneg b) c) => (fnmsub a b c)
17023   // (fnmsub a (fneg b) c) => (fma a b c)
17024   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
17025     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
17026 
17027   return SDValue();
17028 }
17029 
17030 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
17031   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
17032   if (!Subtarget.is64BitELFABI())
17033     return false;
17034 
17035   // If not a tail call then no need to proceed.
17036   if (!CI->isTailCall())
17037     return false;
17038 
17039   // If sibling calls have been disabled and tail-calls aren't guaranteed
17040   // there is no reason to duplicate.
17041   auto &TM = getTargetMachine();
17042   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
17043     return false;
17044 
17045   // Can't tail call a function called indirectly, or if it has variadic args.
17046   const Function *Callee = CI->getCalledFunction();
17047   if (!Callee || Callee->isVarArg())
17048     return false;
17049 
17050   // Make sure the callee and caller calling conventions are eligible for tco.
17051   const Function *Caller = CI->getParent()->getParent();
17052   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
17053                                            CI->getCallingConv()))
17054       return false;
17055 
17056   // If the function is local then we have a good chance at tail-calling it
17057   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
17058 }
17059 
17060 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
17061   if (!Subtarget.hasVSX())
17062     return false;
17063   if (Subtarget.hasP9Vector() && VT == MVT::f128)
17064     return true;
17065   return VT == MVT::f32 || VT == MVT::f64 ||
17066     VT == MVT::v4f32 || VT == MVT::v2f64;
17067 }
17068 
17069 bool PPCTargetLowering::
17070 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
17071   const Value *Mask = AndI.getOperand(1);
17072   // If the mask is suitable for andi. or andis. we should sink the and.
17073   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
17074     // Can't handle constants wider than 64-bits.
17075     if (CI->getBitWidth() > 64)
17076       return false;
17077     int64_t ConstVal = CI->getZExtValue();
17078     return isUInt<16>(ConstVal) ||
17079       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
17080   }
17081 
17082   // For non-constant masks, we can always use the record-form and.
17083   return true;
17084 }
17085 
17086 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
17087 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
17088 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
17089 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
17090 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
17091 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
17092   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
17093   assert(Subtarget.hasP9Altivec() &&
17094          "Only combine this when P9 altivec supported!");
17095   EVT VT = N->getValueType(0);
17096   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17097     return SDValue();
17098 
17099   SelectionDAG &DAG = DCI.DAG;
17100   SDLoc dl(N);
17101   if (N->getOperand(0).getOpcode() == ISD::SUB) {
17102     // Even for signed integers, if it's known to be positive (as signed
17103     // integer) due to zero-extended inputs.
17104     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
17105     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
17106     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
17107          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
17108         (SubOpcd1 == ISD::ZERO_EXTEND ||
17109          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
17110       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17111                          N->getOperand(0)->getOperand(0),
17112                          N->getOperand(0)->getOperand(1),
17113                          DAG.getTargetConstant(0, dl, MVT::i32));
17114     }
17115 
17116     // For type v4i32, it can be optimized with xvnegsp + vabsduw
17117     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
17118         N->getOperand(0).hasOneUse()) {
17119       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17120                          N->getOperand(0)->getOperand(0),
17121                          N->getOperand(0)->getOperand(1),
17122                          DAG.getTargetConstant(1, dl, MVT::i32));
17123     }
17124   }
17125 
17126   return SDValue();
17127 }
17128 
17129 // For type v4i32/v8ii16/v16i8, transform
17130 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
17131 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
17132 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
17133 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
17134 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
17135                                           DAGCombinerInfo &DCI) const {
17136   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
17137   assert(Subtarget.hasP9Altivec() &&
17138          "Only combine this when P9 altivec supported!");
17139 
17140   SelectionDAG &DAG = DCI.DAG;
17141   SDLoc dl(N);
17142   SDValue Cond = N->getOperand(0);
17143   SDValue TrueOpnd = N->getOperand(1);
17144   SDValue FalseOpnd = N->getOperand(2);
17145   EVT VT = N->getOperand(1).getValueType();
17146 
17147   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17148       FalseOpnd.getOpcode() != ISD::SUB)
17149     return SDValue();
17150 
17151   // ABSD only available for type v4i32/v8i16/v16i8
17152   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17153     return SDValue();
17154 
17155   // At least to save one more dependent computation
17156   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17157     return SDValue();
17158 
17159   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17160 
17161   // Can only handle unsigned comparison here
17162   switch (CC) {
17163   default:
17164     return SDValue();
17165   case ISD::SETUGT:
17166   case ISD::SETUGE:
17167     break;
17168   case ISD::SETULT:
17169   case ISD::SETULE:
17170     std::swap(TrueOpnd, FalseOpnd);
17171     break;
17172   }
17173 
17174   SDValue CmpOpnd1 = Cond.getOperand(0);
17175   SDValue CmpOpnd2 = Cond.getOperand(1);
17176 
17177   // SETCC CmpOpnd1 CmpOpnd2 cond
17178   // TrueOpnd = CmpOpnd1 - CmpOpnd2
17179   // FalseOpnd = CmpOpnd2 - CmpOpnd1
17180   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17181       TrueOpnd.getOperand(1) == CmpOpnd2 &&
17182       FalseOpnd.getOperand(0) == CmpOpnd2 &&
17183       FalseOpnd.getOperand(1) == CmpOpnd1) {
17184     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17185                        CmpOpnd1, CmpOpnd2,
17186                        DAG.getTargetConstant(0, dl, MVT::i32));
17187   }
17188 
17189   return SDValue();
17190 }
17191