1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetRegisterInfo.h"
56 #include "llvm/CodeGen/ValueTypes.h"
57 #include "llvm/IR/CallSite.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/Module.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/MC/MCContext.h"
74 #include "llvm/MC/MCExpr.h"
75 #include "llvm/MC/MCRegisterInfo.h"
76 #include "llvm/MC/MCSymbolXCOFF.h"
77 #include "llvm/Support/AtomicOrdering.h"
78 #include "llvm/Support/BranchProbability.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/CodeGen.h"
81 #include "llvm/Support/CommandLine.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/Format.h"
86 #include "llvm/Support/KnownBits.h"
87 #include "llvm/Support/MachineValueType.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
91 #include "llvm/Target/TargetOptions.h"
92 #include <algorithm>
93 #include <cassert>
94 #include <cstdint>
95 #include <iterator>
96 #include <list>
97 #include <utility>
98 #include <vector>
99 
100 using namespace llvm;
101 
102 #define DEBUG_TYPE "ppc-lowering"
103 
104 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
105 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
106 
107 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
108 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
109 
110 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
111 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
112 
113 static cl::opt<bool> DisableSCO("disable-ppc-sco",
114 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
115 
116 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
117 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
118 
119 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
120 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
121 
122 STATISTIC(NumTailCalls, "Number of tail calls");
123 STATISTIC(NumSiblingCalls, "Number of sibling calls");
124 
125 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
126 
127 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
128 
129 // FIXME: Remove this once the bug has been fixed!
130 extern cl::opt<bool> ANDIGlueBug;
131 
132 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
133                                      const PPCSubtarget &STI)
134     : TargetLowering(TM), Subtarget(STI) {
135   // Use _setjmp/_longjmp instead of setjmp/longjmp.
136   setUseUnderscoreSetJmp(true);
137   setUseUnderscoreLongJmp(true);
138 
139   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
140   // arguments are at least 4/8 bytes aligned.
141   bool isPPC64 = Subtarget.isPPC64();
142   setMinStackArgumentAlignment(isPPC64 ? 8:4);
143 
144   // Set up the register classes.
145   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
146   if (!useSoftFloat()) {
147     if (hasSPE()) {
148       addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass);
149       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
150     } else {
151       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
152       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
153     }
154   }
155 
156   // Match BITREVERSE to customized fast code sequence in the td file.
157   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
158   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
159 
160   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
161   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
162 
163   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
164   for (MVT VT : MVT::integer_valuetypes()) {
165     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
166     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
167   }
168 
169   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
170 
171   // PowerPC has pre-inc load and store's.
172   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
173   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
174   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
175   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
176   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
177   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
178   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
179   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
180   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
181   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
182   if (!Subtarget.hasSPE()) {
183     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
184     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
185     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
186     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
187   }
188 
189   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
190   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
191   for (MVT VT : ScalarIntVTs) {
192     setOperationAction(ISD::ADDC, VT, Legal);
193     setOperationAction(ISD::ADDE, VT, Legal);
194     setOperationAction(ISD::SUBC, VT, Legal);
195     setOperationAction(ISD::SUBE, VT, Legal);
196   }
197 
198   if (Subtarget.useCRBits()) {
199     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
200 
201     if (isPPC64 || Subtarget.hasFPCVT()) {
202       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
203       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
204                          isPPC64 ? MVT::i64 : MVT::i32);
205       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
206       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
207                         isPPC64 ? MVT::i64 : MVT::i32);
208     } else {
209       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
210       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
211     }
212 
213     // PowerPC does not support direct load/store of condition registers.
214     setOperationAction(ISD::LOAD, MVT::i1, Custom);
215     setOperationAction(ISD::STORE, MVT::i1, Custom);
216 
217     // FIXME: Remove this once the ANDI glue bug is fixed:
218     if (ANDIGlueBug)
219       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
220 
221     for (MVT VT : MVT::integer_valuetypes()) {
222       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
223       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
224       setTruncStoreAction(VT, MVT::i1, Expand);
225     }
226 
227     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
228   }
229 
230   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
231   // PPC (the libcall is not available).
232   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
233   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
234 
235   // We do not currently implement these libm ops for PowerPC.
236   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
237   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
238   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
239   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
240   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
241   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
242 
243   // PowerPC has no SREM/UREM instructions unless we are on P9
244   // On P9 we may use a hardware instruction to compute the remainder.
245   // The instructions are not legalized directly because in the cases where the
246   // result of both the remainder and the division is required it is more
247   // efficient to compute the remainder from the result of the division rather
248   // than use the remainder instruction.
249   if (Subtarget.isISA3_0()) {
250     setOperationAction(ISD::SREM, MVT::i32, Custom);
251     setOperationAction(ISD::UREM, MVT::i32, Custom);
252     setOperationAction(ISD::SREM, MVT::i64, Custom);
253     setOperationAction(ISD::UREM, MVT::i64, Custom);
254   } else {
255     setOperationAction(ISD::SREM, MVT::i32, Expand);
256     setOperationAction(ISD::UREM, MVT::i32, Expand);
257     setOperationAction(ISD::SREM, MVT::i64, Expand);
258     setOperationAction(ISD::UREM, MVT::i64, Expand);
259   }
260 
261   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
262   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
263   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
264   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
265   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
266   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
267   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
268   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
269   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
270 
271   // We don't support sin/cos/sqrt/fmod/pow
272   setOperationAction(ISD::FSIN , MVT::f64, Expand);
273   setOperationAction(ISD::FCOS , MVT::f64, Expand);
274   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
275   setOperationAction(ISD::FREM , MVT::f64, Expand);
276   setOperationAction(ISD::FPOW , MVT::f64, Expand);
277   setOperationAction(ISD::FSIN , MVT::f32, Expand);
278   setOperationAction(ISD::FCOS , MVT::f32, Expand);
279   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
280   setOperationAction(ISD::FREM , MVT::f32, Expand);
281   setOperationAction(ISD::FPOW , MVT::f32, Expand);
282   if (Subtarget.hasSPE()) {
283     setOperationAction(ISD::FMA  , MVT::f64, Expand);
284     setOperationAction(ISD::FMA  , MVT::f32, Expand);
285   } else {
286     setOperationAction(ISD::FMA  , MVT::f64, Legal);
287     setOperationAction(ISD::FMA  , MVT::f32, Legal);
288   }
289 
290   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
291 
292   // If we're enabling GP optimizations, use hardware square root
293   if (!Subtarget.hasFSQRT() &&
294       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
295         Subtarget.hasFRE()))
296     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
297 
298   if (!Subtarget.hasFSQRT() &&
299       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
300         Subtarget.hasFRES()))
301     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
302 
303   if (Subtarget.hasFCPSGN()) {
304     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
305     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
306   } else {
307     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
308     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
309   }
310 
311   if (Subtarget.hasFPRND()) {
312     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
313     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
314     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
315     setOperationAction(ISD::FROUND, MVT::f64, Legal);
316 
317     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
318     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
319     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
320     setOperationAction(ISD::FROUND, MVT::f32, Legal);
321   }
322 
323   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
324   // to speed up scalar BSWAP64.
325   // CTPOP or CTTZ were introduced in P8/P9 respectively
326   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
327   if (Subtarget.hasP9Vector())
328     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
329   else
330     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
331   if (Subtarget.isISA3_0()) {
332     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
333     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
334   } else {
335     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
336     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
337   }
338 
339   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
340     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
341     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
342   } else {
343     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
344     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
345   }
346 
347   // PowerPC does not have ROTR
348   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
349   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
350 
351   if (!Subtarget.useCRBits()) {
352     // PowerPC does not have Select
353     setOperationAction(ISD::SELECT, MVT::i32, Expand);
354     setOperationAction(ISD::SELECT, MVT::i64, Expand);
355     setOperationAction(ISD::SELECT, MVT::f32, Expand);
356     setOperationAction(ISD::SELECT, MVT::f64, Expand);
357   }
358 
359   // PowerPC wants to turn select_cc of FP into fsel when possible.
360   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
361   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
362 
363   // PowerPC wants to optimize integer setcc a bit
364   if (!Subtarget.useCRBits())
365     setOperationAction(ISD::SETCC, MVT::i32, Custom);
366 
367   // PowerPC does not have BRCOND which requires SetCC
368   if (!Subtarget.useCRBits())
369     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
370 
371   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
372 
373   if (Subtarget.hasSPE()) {
374     // SPE has built-in conversions
375     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
376     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
377     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
378   } else {
379     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
380     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
381 
382     // PowerPC does not have [U|S]INT_TO_FP
383     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
384     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
385   }
386 
387   if (Subtarget.hasDirectMove() && isPPC64) {
388     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
389     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
390     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
391     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
392   } else {
393     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
394     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
395     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
396     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
397   }
398 
399   // We cannot sextinreg(i1).  Expand to shifts.
400   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
401 
402   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
403   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
404   // support continuation, user-level threading, and etc.. As a result, no
405   // other SjLj exception interfaces are implemented and please don't build
406   // your own exception handling based on them.
407   // LLVM/Clang supports zero-cost DWARF exception handling.
408   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
409   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
410 
411   // We want to legalize GlobalAddress and ConstantPool nodes into the
412   // appropriate instructions to materialize the address.
413   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
414   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
415   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
416   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
417   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
418   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
419   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
420   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
421   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
422   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
423 
424   // TRAP is legal.
425   setOperationAction(ISD::TRAP, MVT::Other, Legal);
426 
427   // TRAMPOLINE is custom lowered.
428   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
429   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
430 
431   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
432   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
433 
434   if (Subtarget.isSVR4ABI()) {
435     if (isPPC64) {
436       // VAARG always uses double-word chunks, so promote anything smaller.
437       setOperationAction(ISD::VAARG, MVT::i1, Promote);
438       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
439       setOperationAction(ISD::VAARG, MVT::i8, Promote);
440       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
441       setOperationAction(ISD::VAARG, MVT::i16, Promote);
442       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
443       setOperationAction(ISD::VAARG, MVT::i32, Promote);
444       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
445       setOperationAction(ISD::VAARG, MVT::Other, Expand);
446     } else {
447       // VAARG is custom lowered with the 32-bit SVR4 ABI.
448       setOperationAction(ISD::VAARG, MVT::Other, Custom);
449       setOperationAction(ISD::VAARG, MVT::i64, Custom);
450     }
451   } else
452     setOperationAction(ISD::VAARG, MVT::Other, Expand);
453 
454   if (Subtarget.isSVR4ABI() && !isPPC64)
455     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
456     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
457   else
458     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
459 
460   // Use the default implementation.
461   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
462   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
463   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
464   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
465   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
466   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
467   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
468   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
469   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
470 
471   // We want to custom lower some of our intrinsics.
472   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
473 
474   // To handle counter-based loop conditions.
475   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
476 
477   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
478   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
479   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
480   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
481 
482   // Comparisons that require checking two conditions.
483   if (Subtarget.hasSPE()) {
484     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
485     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
486     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
487     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
488   }
489   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
490   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
491   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
492   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
493   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
494   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
495   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
496   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
497   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
498   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
499   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
500   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
501 
502   if (Subtarget.has64BitSupport()) {
503     // They also have instructions for converting between i64 and fp.
504     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
505     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
506     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
507     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
508     // This is just the low 32 bits of a (signed) fp->i64 conversion.
509     // We cannot do this with Promote because i64 is not a legal type.
510     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
511 
512     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
513       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
514   } else {
515     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
516     if (Subtarget.hasSPE())
517       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
518     else
519       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
520   }
521 
522   // With the instructions enabled under FPCVT, we can do everything.
523   if (Subtarget.hasFPCVT()) {
524     if (Subtarget.has64BitSupport()) {
525       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
526       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
527       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
528       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
529     }
530 
531     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
532     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
533     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
534     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
535   }
536 
537   if (Subtarget.use64BitRegs()) {
538     // 64-bit PowerPC implementations can support i64 types directly
539     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
540     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
541     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
542     // 64-bit PowerPC wants to expand i128 shifts itself.
543     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
544     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
545     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
546   } else {
547     // 32-bit PowerPC wants to expand i64 shifts itself.
548     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
549     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
550     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
551   }
552 
553   if (Subtarget.hasAltivec()) {
554     // First set operation action for all vector types to expand. Then we
555     // will selectively turn on ones that can be effectively codegen'd.
556     for (MVT VT : MVT::vector_valuetypes()) {
557       // add/sub are legal for all supported vector VT's.
558       setOperationAction(ISD::ADD, VT, Legal);
559       setOperationAction(ISD::SUB, VT, Legal);
560 
561       // For v2i64, these are only valid with P8Vector. This is corrected after
562       // the loop.
563       setOperationAction(ISD::SMAX, VT, Legal);
564       setOperationAction(ISD::SMIN, VT, Legal);
565       setOperationAction(ISD::UMAX, VT, Legal);
566       setOperationAction(ISD::UMIN, VT, Legal);
567 
568       if (Subtarget.hasVSX()) {
569         setOperationAction(ISD::FMAXNUM, VT, Legal);
570         setOperationAction(ISD::FMINNUM, VT, Legal);
571       }
572 
573       // Vector instructions introduced in P8
574       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
575         setOperationAction(ISD::CTPOP, VT, Legal);
576         setOperationAction(ISD::CTLZ, VT, Legal);
577       }
578       else {
579         setOperationAction(ISD::CTPOP, VT, Expand);
580         setOperationAction(ISD::CTLZ, VT, Expand);
581       }
582 
583       // Vector instructions introduced in P9
584       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
585         setOperationAction(ISD::CTTZ, VT, Legal);
586       else
587         setOperationAction(ISD::CTTZ, VT, Expand);
588 
589       // We promote all shuffles to v16i8.
590       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
591       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
592 
593       // We promote all non-typed operations to v4i32.
594       setOperationAction(ISD::AND   , VT, Promote);
595       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
596       setOperationAction(ISD::OR    , VT, Promote);
597       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
598       setOperationAction(ISD::XOR   , VT, Promote);
599       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
600       setOperationAction(ISD::LOAD  , VT, Promote);
601       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
602       setOperationAction(ISD::SELECT, VT, Promote);
603       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
604       setOperationAction(ISD::VSELECT, VT, Legal);
605       setOperationAction(ISD::SELECT_CC, VT, Promote);
606       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
607       setOperationAction(ISD::STORE, VT, Promote);
608       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
609 
610       // No other operations are legal.
611       setOperationAction(ISD::MUL , VT, Expand);
612       setOperationAction(ISD::SDIV, VT, Expand);
613       setOperationAction(ISD::SREM, VT, Expand);
614       setOperationAction(ISD::UDIV, VT, Expand);
615       setOperationAction(ISD::UREM, VT, Expand);
616       setOperationAction(ISD::FDIV, VT, Expand);
617       setOperationAction(ISD::FREM, VT, Expand);
618       setOperationAction(ISD::FNEG, VT, Expand);
619       setOperationAction(ISD::FSQRT, VT, Expand);
620       setOperationAction(ISD::FLOG, VT, Expand);
621       setOperationAction(ISD::FLOG10, VT, Expand);
622       setOperationAction(ISD::FLOG2, VT, Expand);
623       setOperationAction(ISD::FEXP, VT, Expand);
624       setOperationAction(ISD::FEXP2, VT, Expand);
625       setOperationAction(ISD::FSIN, VT, Expand);
626       setOperationAction(ISD::FCOS, VT, Expand);
627       setOperationAction(ISD::FABS, VT, Expand);
628       setOperationAction(ISD::FFLOOR, VT, Expand);
629       setOperationAction(ISD::FCEIL,  VT, Expand);
630       setOperationAction(ISD::FTRUNC, VT, Expand);
631       setOperationAction(ISD::FRINT,  VT, Expand);
632       setOperationAction(ISD::FNEARBYINT, VT, Expand);
633       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
634       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
635       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
636       setOperationAction(ISD::MULHU, VT, Expand);
637       setOperationAction(ISD::MULHS, VT, Expand);
638       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
639       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
640       setOperationAction(ISD::UDIVREM, VT, Expand);
641       setOperationAction(ISD::SDIVREM, VT, Expand);
642       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
643       setOperationAction(ISD::FPOW, VT, Expand);
644       setOperationAction(ISD::BSWAP, VT, Expand);
645       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
646       setOperationAction(ISD::ROTL, VT, Expand);
647       setOperationAction(ISD::ROTR, VT, Expand);
648 
649       for (MVT InnerVT : MVT::vector_valuetypes()) {
650         setTruncStoreAction(VT, InnerVT, Expand);
651         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
652         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
653         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
654       }
655     }
656     if (!Subtarget.hasP8Vector()) {
657       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
658       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
659       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
660       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
661     }
662 
663     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
664       setOperationAction(ISD::ABS, VT, Custom);
665 
666     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
667     // with merges, splats, etc.
668     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
669 
670     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
671     // are cheap, so handle them before they get expanded to scalar.
672     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
673     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
674     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
675     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
676     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
677 
678     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
679     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
680     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
681     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
682     setOperationAction(ISD::SELECT, MVT::v4i32,
683                        Subtarget.useCRBits() ? Legal : Expand);
684     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
685     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
686     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
687     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
688     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
689     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
690     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
691     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
692     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
693 
694     // Without hasP8Altivec set, v2i64 SMAX isn't available.
695     // But ABS custom lowering requires SMAX support.
696     if (!Subtarget.hasP8Altivec())
697       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
698 
699     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
700     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
701     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
702     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
703 
704     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
705     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
706 
707     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
708       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
709       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
710     }
711 
712     if (Subtarget.hasP8Altivec())
713       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
714     else
715       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
716 
717     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
718     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
719 
720     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
721     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
722 
723     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
724     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
725     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
726     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
727 
728     // Altivec does not contain unordered floating-point compare instructions
729     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
730     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
731     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
732     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
733 
734     if (Subtarget.hasVSX()) {
735       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
736       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
737       if (Subtarget.hasP8Vector()) {
738         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
739         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
740       }
741       if (Subtarget.hasDirectMove() && isPPC64) {
742         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
743         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
744         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
745         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
746         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
747         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
748         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
749         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
750       }
751       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
752 
753       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
754       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
755       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
756       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
757       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
758 
759       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
760 
761       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
762       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
763 
764       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
765       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
766 
767       // Share the Altivec comparison restrictions.
768       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
769       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
770       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
771       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
772 
773       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
774       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
775 
776       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
777 
778       if (Subtarget.hasP8Vector())
779         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
780 
781       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
782 
783       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
784       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
785       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
786 
787       if (Subtarget.hasP8Altivec()) {
788         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
789         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
790         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
791 
792         // 128 bit shifts can be accomplished via 3 instructions for SHL and
793         // SRL, but not for SRA because of the instructions available:
794         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
795         // doing
796         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
797         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
798         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
799 
800         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
801       }
802       else {
803         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
804         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
805         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
806 
807         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
808 
809         // VSX v2i64 only supports non-arithmetic operations.
810         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
811         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
812       }
813 
814       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
815       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
816       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
817       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
818 
819       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
820 
821       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
822       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
823       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
824       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
825 
826       // Custom handling for partial vectors of integers converted to
827       // floating point. We already have optimal handling for v2i32 through
828       // the DAG combine, so those aren't necessary.
829       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
830       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
831       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
832       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
833       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
834       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
835       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
836       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
837 
838       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
839       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
840       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
841       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
842       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
843       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
844 
845       if (Subtarget.hasDirectMove())
846         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
847       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
848 
849       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
850     }
851 
852     if (Subtarget.hasP8Altivec()) {
853       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
854       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
855     }
856 
857     if (Subtarget.hasP9Vector()) {
858       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
859       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
860 
861       // 128 bit shifts can be accomplished via 3 instructions for SHL and
862       // SRL, but not for SRA because of the instructions available:
863       // VS{RL} and VS{RL}O.
864       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
865       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
866       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
867 
868       if (EnableQuadPrecision) {
869         addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
870         setOperationAction(ISD::FADD, MVT::f128, Legal);
871         setOperationAction(ISD::FSUB, MVT::f128, Legal);
872         setOperationAction(ISD::FDIV, MVT::f128, Legal);
873         setOperationAction(ISD::FMUL, MVT::f128, Legal);
874         setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
875         // No extending loads to f128 on PPC.
876         for (MVT FPT : MVT::fp_valuetypes())
877           setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
878         setOperationAction(ISD::FMA, MVT::f128, Legal);
879         setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
880         setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
881         setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
882         setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
883         setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
884         setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
885 
886         setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
887         setOperationAction(ISD::FRINT, MVT::f128, Legal);
888         setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
889         setOperationAction(ISD::FCEIL, MVT::f128, Legal);
890         setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
891         setOperationAction(ISD::FROUND, MVT::f128, Legal);
892 
893         setOperationAction(ISD::SELECT, MVT::f128, Expand);
894         setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
895         setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
896         setTruncStoreAction(MVT::f128, MVT::f64, Expand);
897         setTruncStoreAction(MVT::f128, MVT::f32, Expand);
898         setOperationAction(ISD::BITCAST, MVT::i128, Custom);
899         // No implementation for these ops for PowerPC.
900         setOperationAction(ISD::FSIN , MVT::f128, Expand);
901         setOperationAction(ISD::FCOS , MVT::f128, Expand);
902         setOperationAction(ISD::FPOW, MVT::f128, Expand);
903         setOperationAction(ISD::FPOWI, MVT::f128, Expand);
904         setOperationAction(ISD::FREM, MVT::f128, Expand);
905       }
906       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
907 
908     }
909 
910     if (Subtarget.hasP9Altivec()) {
911       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
912       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
913     }
914   }
915 
916   if (Subtarget.hasQPX()) {
917     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
918     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
919     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
920     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
921 
922     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
923     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
924 
925     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
926     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
927 
928     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
929     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
930 
931     if (!Subtarget.useCRBits())
932       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
933     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
934 
935     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
936     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
937     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
938     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
939     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
940     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
941     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
942 
943     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
944     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
945 
946     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
947     setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
948     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
949 
950     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
951     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
952     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
953     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
954     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
955     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
956     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
957     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
958     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
959     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
960 
961     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
962     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
963 
964     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
965     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
966 
967     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
968 
969     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
970     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
971     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
972     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
973 
974     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
975     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
976 
977     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
978     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
979 
980     if (!Subtarget.useCRBits())
981       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
982     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
983 
984     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
985     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
986     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
987     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
988     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
989     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
990     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
991 
992     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
993     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
994 
995     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
996     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
997     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
998     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
999     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1000     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1001     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1002     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1003     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1004     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1005 
1006     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1007     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1008 
1009     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1010     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1011 
1012     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1013 
1014     setOperationAction(ISD::AND , MVT::v4i1, Legal);
1015     setOperationAction(ISD::OR , MVT::v4i1, Legal);
1016     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1017 
1018     if (!Subtarget.useCRBits())
1019       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1020     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1021 
1022     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
1023     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1024 
1025     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1026     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1027     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1028     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1029     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1030     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1031     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1032 
1033     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1034     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1035 
1036     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1037 
1038     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1039     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1040     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1041     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1042 
1043     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1044     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1045     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1046     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1047 
1048     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1049     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1050 
1051     // These need to set FE_INEXACT, and so cannot be vectorized here.
1052     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1053     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1054 
1055     if (TM.Options.UnsafeFPMath) {
1056       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1057       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1058 
1059       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1060       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1061     } else {
1062       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1063       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1064 
1065       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1066       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1067     }
1068   }
1069 
1070   if (Subtarget.has64BitSupport())
1071     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1072 
1073   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1074 
1075   if (!isPPC64) {
1076     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1077     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1078   }
1079 
1080   setBooleanContents(ZeroOrOneBooleanContent);
1081 
1082   if (Subtarget.hasAltivec()) {
1083     // Altivec instructions set fields to all zeros or all ones.
1084     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1085   }
1086 
1087   if (!isPPC64) {
1088     // These libcalls are not available in 32-bit.
1089     setLibcallName(RTLIB::SHL_I128, nullptr);
1090     setLibcallName(RTLIB::SRL_I128, nullptr);
1091     setLibcallName(RTLIB::SRA_I128, nullptr);
1092   }
1093 
1094   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1095 
1096   // We have target-specific dag combine patterns for the following nodes:
1097   setTargetDAGCombine(ISD::ADD);
1098   setTargetDAGCombine(ISD::SHL);
1099   setTargetDAGCombine(ISD::SRA);
1100   setTargetDAGCombine(ISD::SRL);
1101   setTargetDAGCombine(ISD::MUL);
1102   setTargetDAGCombine(ISD::SINT_TO_FP);
1103   setTargetDAGCombine(ISD::BUILD_VECTOR);
1104   if (Subtarget.hasFPCVT())
1105     setTargetDAGCombine(ISD::UINT_TO_FP);
1106   setTargetDAGCombine(ISD::LOAD);
1107   setTargetDAGCombine(ISD::STORE);
1108   setTargetDAGCombine(ISD::BR_CC);
1109   if (Subtarget.useCRBits())
1110     setTargetDAGCombine(ISD::BRCOND);
1111   setTargetDAGCombine(ISD::BSWAP);
1112   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1113   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1114   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1115 
1116   setTargetDAGCombine(ISD::SIGN_EXTEND);
1117   setTargetDAGCombine(ISD::ZERO_EXTEND);
1118   setTargetDAGCombine(ISD::ANY_EXTEND);
1119 
1120   setTargetDAGCombine(ISD::TRUNCATE);
1121   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1122 
1123 
1124   if (Subtarget.useCRBits()) {
1125     setTargetDAGCombine(ISD::TRUNCATE);
1126     setTargetDAGCombine(ISD::SETCC);
1127     setTargetDAGCombine(ISD::SELECT_CC);
1128   }
1129 
1130   // Use reciprocal estimates.
1131   if (TM.Options.UnsafeFPMath) {
1132     setTargetDAGCombine(ISD::FDIV);
1133     setTargetDAGCombine(ISD::FSQRT);
1134   }
1135 
1136   if (Subtarget.hasP9Altivec()) {
1137     setTargetDAGCombine(ISD::ABS);
1138     setTargetDAGCombine(ISD::VSELECT);
1139   }
1140 
1141   // Darwin long double math library functions have $LDBL128 appended.
1142   if (Subtarget.isDarwin()) {
1143     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1144     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1145     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1146     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1147     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1148     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1149     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1150     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1151     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1152     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1153   }
1154 
1155   if (EnableQuadPrecision) {
1156     setLibcallName(RTLIB::LOG_F128, "logf128");
1157     setLibcallName(RTLIB::LOG2_F128, "log2f128");
1158     setLibcallName(RTLIB::LOG10_F128, "log10f128");
1159     setLibcallName(RTLIB::EXP_F128, "expf128");
1160     setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1161     setLibcallName(RTLIB::SIN_F128, "sinf128");
1162     setLibcallName(RTLIB::COS_F128, "cosf128");
1163     setLibcallName(RTLIB::POW_F128, "powf128");
1164     setLibcallName(RTLIB::FMIN_F128, "fminf128");
1165     setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1166     setLibcallName(RTLIB::POWI_F128, "__powikf2");
1167     setLibcallName(RTLIB::REM_F128, "fmodf128");
1168   }
1169 
1170   // With 32 condition bits, we don't need to sink (and duplicate) compares
1171   // aggressively in CodeGenPrep.
1172   if (Subtarget.useCRBits()) {
1173     setHasMultipleConditionRegisters();
1174     setJumpIsExpensive();
1175   }
1176 
1177   setMinFunctionAlignment(2);
1178   if (Subtarget.isDarwin())
1179     setPrefFunctionAlignment(4);
1180 
1181   switch (Subtarget.getDarwinDirective()) {
1182   default: break;
1183   case PPC::DIR_970:
1184   case PPC::DIR_A2:
1185   case PPC::DIR_E500:
1186   case PPC::DIR_E500mc:
1187   case PPC::DIR_E5500:
1188   case PPC::DIR_PWR4:
1189   case PPC::DIR_PWR5:
1190   case PPC::DIR_PWR5X:
1191   case PPC::DIR_PWR6:
1192   case PPC::DIR_PWR6X:
1193   case PPC::DIR_PWR7:
1194   case PPC::DIR_PWR8:
1195   case PPC::DIR_PWR9:
1196     setPrefFunctionAlignment(4);
1197     setPrefLoopAlignment(4);
1198     break;
1199   }
1200 
1201   if (Subtarget.enableMachineScheduler())
1202     setSchedulingPreference(Sched::Source);
1203   else
1204     setSchedulingPreference(Sched::Hybrid);
1205 
1206   computeRegisterProperties(STI.getRegisterInfo());
1207 
1208   // The Freescale cores do better with aggressive inlining of memcpy and
1209   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1210   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
1211       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
1212     MaxStoresPerMemset = 32;
1213     MaxStoresPerMemsetOptSize = 16;
1214     MaxStoresPerMemcpy = 32;
1215     MaxStoresPerMemcpyOptSize = 8;
1216     MaxStoresPerMemmove = 32;
1217     MaxStoresPerMemmoveOptSize = 8;
1218   } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) {
1219     // The A2 also benefits from (very) aggressive inlining of memcpy and
1220     // friends. The overhead of a the function call, even when warm, can be
1221     // over one hundred cycles.
1222     MaxStoresPerMemset = 128;
1223     MaxStoresPerMemcpy = 128;
1224     MaxStoresPerMemmove = 128;
1225     MaxLoadsPerMemcmp = 128;
1226   } else {
1227     MaxLoadsPerMemcmp = 8;
1228     MaxLoadsPerMemcmpOptSize = 4;
1229   }
1230 }
1231 
1232 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1233 /// the desired ByVal argument alignment.
1234 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1235                              unsigned MaxMaxAlign) {
1236   if (MaxAlign == MaxMaxAlign)
1237     return;
1238   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1239     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1240       MaxAlign = 32;
1241     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1242       MaxAlign = 16;
1243   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1244     unsigned EltAlign = 0;
1245     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1246     if (EltAlign > MaxAlign)
1247       MaxAlign = EltAlign;
1248   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1249     for (auto *EltTy : STy->elements()) {
1250       unsigned EltAlign = 0;
1251       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1252       if (EltAlign > MaxAlign)
1253         MaxAlign = EltAlign;
1254       if (MaxAlign == MaxMaxAlign)
1255         break;
1256     }
1257   }
1258 }
1259 
1260 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1261 /// function arguments in the caller parameter area.
1262 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1263                                                   const DataLayout &DL) const {
1264   // Darwin passes everything on 4 byte boundary.
1265   if (Subtarget.isDarwin())
1266     return 4;
1267 
1268   // 16byte and wider vectors are passed on 16byte boundary.
1269   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1270   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1271   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1272     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1273   return Align;
1274 }
1275 
1276 bool PPCTargetLowering::useSoftFloat() const {
1277   return Subtarget.useSoftFloat();
1278 }
1279 
1280 bool PPCTargetLowering::hasSPE() const {
1281   return Subtarget.hasSPE();
1282 }
1283 
1284 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1285   return VT.isScalarInteger();
1286 }
1287 
1288 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1289   switch ((PPCISD::NodeType)Opcode) {
1290   case PPCISD::FIRST_NUMBER:    break;
1291   case PPCISD::FSEL:            return "PPCISD::FSEL";
1292   case PPCISD::FCFID:           return "PPCISD::FCFID";
1293   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1294   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1295   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1296   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1297   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1298   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1299   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1300   case PPCISD::FP_TO_UINT_IN_VSR:
1301                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1302   case PPCISD::FP_TO_SINT_IN_VSR:
1303                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1304   case PPCISD::FRE:             return "PPCISD::FRE";
1305   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1306   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1307   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1308   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1309   case PPCISD::VPERM:           return "PPCISD::VPERM";
1310   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1311   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1312   case PPCISD::XXREVERSE:       return "PPCISD::XXREVERSE";
1313   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1314   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1315   case PPCISD::CMPB:            return "PPCISD::CMPB";
1316   case PPCISD::Hi:              return "PPCISD::Hi";
1317   case PPCISD::Lo:              return "PPCISD::Lo";
1318   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1319   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1320   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1321   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1322   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1323   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1324   case PPCISD::SRL:             return "PPCISD::SRL";
1325   case PPCISD::SRA:             return "PPCISD::SRA";
1326   case PPCISD::SHL:             return "PPCISD::SHL";
1327   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1328   case PPCISD::CALL:            return "PPCISD::CALL";
1329   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1330   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1331   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1332   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1333   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1334   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1335   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1336   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1337   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1338   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1339   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1340   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1341   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1342   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1343   case PPCISD::ANDIo_1_EQ_BIT:  return "PPCISD::ANDIo_1_EQ_BIT";
1344   case PPCISD::ANDIo_1_GT_BIT:  return "PPCISD::ANDIo_1_GT_BIT";
1345   case PPCISD::VCMP:            return "PPCISD::VCMP";
1346   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1347   case PPCISD::LBRX:            return "PPCISD::LBRX";
1348   case PPCISD::STBRX:           return "PPCISD::STBRX";
1349   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1350   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1351   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1352   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1353   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1354   case PPCISD::SExtVElems:      return "PPCISD::SExtVElems";
1355   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1356   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1357   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1358   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1359   case PPCISD::ST_VSR_SCAL_INT:
1360                                 return "PPCISD::ST_VSR_SCAL_INT";
1361   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1362   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1363   case PPCISD::BDZ:             return "PPCISD::BDZ";
1364   case PPCISD::MFFS:            return "PPCISD::MFFS";
1365   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1366   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1367   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1368   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1369   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1370   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1371   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1372   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1373   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1374   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1375   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1376   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1377   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1378   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1379   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1380   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1381   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1382   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1383   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1384   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1385   case PPCISD::SC:              return "PPCISD::SC";
1386   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1387   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1388   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1389   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1390   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1391   case PPCISD::VABSD:           return "PPCISD::VABSD";
1392   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1393   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1394   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1395   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1396   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1397   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1398   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1399   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1400   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1401   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1402   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1403   case PPCISD::FP_EXTEND_LH:    return "PPCISD::FP_EXTEND_LH";
1404   }
1405   return nullptr;
1406 }
1407 
1408 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1409                                           EVT VT) const {
1410   if (!VT.isVector())
1411     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1412 
1413   if (Subtarget.hasQPX())
1414     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1415 
1416   return VT.changeVectorElementTypeToInteger();
1417 }
1418 
1419 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1420   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1421   return true;
1422 }
1423 
1424 //===----------------------------------------------------------------------===//
1425 // Node matching predicates, for use by the tblgen matching code.
1426 //===----------------------------------------------------------------------===//
1427 
1428 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1429 static bool isFloatingPointZero(SDValue Op) {
1430   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1431     return CFP->getValueAPF().isZero();
1432   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1433     // Maybe this has already been legalized into the constant pool?
1434     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1435       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1436         return CFP->getValueAPF().isZero();
1437   }
1438   return false;
1439 }
1440 
1441 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1442 /// true if Op is undef or if it matches the specified value.
1443 static bool isConstantOrUndef(int Op, int Val) {
1444   return Op < 0 || Op == Val;
1445 }
1446 
1447 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1448 /// VPKUHUM instruction.
1449 /// The ShuffleKind distinguishes between big-endian operations with
1450 /// two different inputs (0), either-endian operations with two identical
1451 /// inputs (1), and little-endian operations with two different inputs (2).
1452 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1453 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1454                                SelectionDAG &DAG) {
1455   bool IsLE = DAG.getDataLayout().isLittleEndian();
1456   if (ShuffleKind == 0) {
1457     if (IsLE)
1458       return false;
1459     for (unsigned i = 0; i != 16; ++i)
1460       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1461         return false;
1462   } else if (ShuffleKind == 2) {
1463     if (!IsLE)
1464       return false;
1465     for (unsigned i = 0; i != 16; ++i)
1466       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1467         return false;
1468   } else if (ShuffleKind == 1) {
1469     unsigned j = IsLE ? 0 : 1;
1470     for (unsigned i = 0; i != 8; ++i)
1471       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1472           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1473         return false;
1474   }
1475   return true;
1476 }
1477 
1478 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1479 /// VPKUWUM instruction.
1480 /// The ShuffleKind distinguishes between big-endian operations with
1481 /// two different inputs (0), either-endian operations with two identical
1482 /// inputs (1), and little-endian operations with two different inputs (2).
1483 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1484 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1485                                SelectionDAG &DAG) {
1486   bool IsLE = DAG.getDataLayout().isLittleEndian();
1487   if (ShuffleKind == 0) {
1488     if (IsLE)
1489       return false;
1490     for (unsigned i = 0; i != 16; i += 2)
1491       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1492           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1493         return false;
1494   } else if (ShuffleKind == 2) {
1495     if (!IsLE)
1496       return false;
1497     for (unsigned i = 0; i != 16; i += 2)
1498       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1499           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1500         return false;
1501   } else if (ShuffleKind == 1) {
1502     unsigned j = IsLE ? 0 : 2;
1503     for (unsigned i = 0; i != 8; i += 2)
1504       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1505           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1506           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1507           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1508         return false;
1509   }
1510   return true;
1511 }
1512 
1513 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1514 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1515 /// current subtarget.
1516 ///
1517 /// The ShuffleKind distinguishes between big-endian operations with
1518 /// two different inputs (0), either-endian operations with two identical
1519 /// inputs (1), and little-endian operations with two different inputs (2).
1520 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1521 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1522                                SelectionDAG &DAG) {
1523   const PPCSubtarget& Subtarget =
1524       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1525   if (!Subtarget.hasP8Vector())
1526     return false;
1527 
1528   bool IsLE = DAG.getDataLayout().isLittleEndian();
1529   if (ShuffleKind == 0) {
1530     if (IsLE)
1531       return false;
1532     for (unsigned i = 0; i != 16; i += 4)
1533       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1534           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1535           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1536           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1537         return false;
1538   } else if (ShuffleKind == 2) {
1539     if (!IsLE)
1540       return false;
1541     for (unsigned i = 0; i != 16; i += 4)
1542       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1543           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1544           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1545           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1546         return false;
1547   } else if (ShuffleKind == 1) {
1548     unsigned j = IsLE ? 0 : 4;
1549     for (unsigned i = 0; i != 8; i += 4)
1550       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1551           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1552           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1553           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1554           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1555           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1556           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1557           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1558         return false;
1559   }
1560   return true;
1561 }
1562 
1563 /// isVMerge - Common function, used to match vmrg* shuffles.
1564 ///
1565 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1566                      unsigned LHSStart, unsigned RHSStart) {
1567   if (N->getValueType(0) != MVT::v16i8)
1568     return false;
1569   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1570          "Unsupported merge size!");
1571 
1572   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1573     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1574       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1575                              LHSStart+j+i*UnitSize) ||
1576           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1577                              RHSStart+j+i*UnitSize))
1578         return false;
1579     }
1580   return true;
1581 }
1582 
1583 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1584 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1585 /// The ShuffleKind distinguishes between big-endian merges with two
1586 /// different inputs (0), either-endian merges with two identical inputs (1),
1587 /// and little-endian merges with two different inputs (2).  For the latter,
1588 /// the input operands are swapped (see PPCInstrAltivec.td).
1589 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1590                              unsigned ShuffleKind, SelectionDAG &DAG) {
1591   if (DAG.getDataLayout().isLittleEndian()) {
1592     if (ShuffleKind == 1) // unary
1593       return isVMerge(N, UnitSize, 0, 0);
1594     else if (ShuffleKind == 2) // swapped
1595       return isVMerge(N, UnitSize, 0, 16);
1596     else
1597       return false;
1598   } else {
1599     if (ShuffleKind == 1) // unary
1600       return isVMerge(N, UnitSize, 8, 8);
1601     else if (ShuffleKind == 0) // normal
1602       return isVMerge(N, UnitSize, 8, 24);
1603     else
1604       return false;
1605   }
1606 }
1607 
1608 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1609 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1610 /// The ShuffleKind distinguishes between big-endian merges with two
1611 /// different inputs (0), either-endian merges with two identical inputs (1),
1612 /// and little-endian merges with two different inputs (2).  For the latter,
1613 /// the input operands are swapped (see PPCInstrAltivec.td).
1614 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1615                              unsigned ShuffleKind, SelectionDAG &DAG) {
1616   if (DAG.getDataLayout().isLittleEndian()) {
1617     if (ShuffleKind == 1) // unary
1618       return isVMerge(N, UnitSize, 8, 8);
1619     else if (ShuffleKind == 2) // swapped
1620       return isVMerge(N, UnitSize, 8, 24);
1621     else
1622       return false;
1623   } else {
1624     if (ShuffleKind == 1) // unary
1625       return isVMerge(N, UnitSize, 0, 0);
1626     else if (ShuffleKind == 0) // normal
1627       return isVMerge(N, UnitSize, 0, 16);
1628     else
1629       return false;
1630   }
1631 }
1632 
1633 /**
1634  * Common function used to match vmrgew and vmrgow shuffles
1635  *
1636  * The indexOffset determines whether to look for even or odd words in
1637  * the shuffle mask. This is based on the of the endianness of the target
1638  * machine.
1639  *   - Little Endian:
1640  *     - Use offset of 0 to check for odd elements
1641  *     - Use offset of 4 to check for even elements
1642  *   - Big Endian:
1643  *     - Use offset of 0 to check for even elements
1644  *     - Use offset of 4 to check for odd elements
1645  * A detailed description of the vector element ordering for little endian and
1646  * big endian can be found at
1647  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1648  * Targeting your applications - what little endian and big endian IBM XL C/C++
1649  * compiler differences mean to you
1650  *
1651  * The mask to the shuffle vector instruction specifies the indices of the
1652  * elements from the two input vectors to place in the result. The elements are
1653  * numbered in array-access order, starting with the first vector. These vectors
1654  * are always of type v16i8, thus each vector will contain 16 elements of size
1655  * 8. More info on the shuffle vector can be found in the
1656  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1657  * Language Reference.
1658  *
1659  * The RHSStartValue indicates whether the same input vectors are used (unary)
1660  * or two different input vectors are used, based on the following:
1661  *   - If the instruction uses the same vector for both inputs, the range of the
1662  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1663  *     be 0.
1664  *   - If the instruction has two different vectors then the range of the
1665  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1666  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1667  *     to 31 specify elements in the second vector).
1668  *
1669  * \param[in] N The shuffle vector SD Node to analyze
1670  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1671  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1672  * vector to the shuffle_vector instruction
1673  * \return true iff this shuffle vector represents an even or odd word merge
1674  */
1675 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1676                      unsigned RHSStartValue) {
1677   if (N->getValueType(0) != MVT::v16i8)
1678     return false;
1679 
1680   for (unsigned i = 0; i < 2; ++i)
1681     for (unsigned j = 0; j < 4; ++j)
1682       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1683                              i*RHSStartValue+j+IndexOffset) ||
1684           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1685                              i*RHSStartValue+j+IndexOffset+8))
1686         return false;
1687   return true;
1688 }
1689 
1690 /**
1691  * Determine if the specified shuffle mask is suitable for the vmrgew or
1692  * vmrgow instructions.
1693  *
1694  * \param[in] N The shuffle vector SD Node to analyze
1695  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1696  * \param[in] ShuffleKind Identify the type of merge:
1697  *   - 0 = big-endian merge with two different inputs;
1698  *   - 1 = either-endian merge with two identical inputs;
1699  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1700  *     little-endian merges).
1701  * \param[in] DAG The current SelectionDAG
1702  * \return true iff this shuffle mask
1703  */
1704 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1705                               unsigned ShuffleKind, SelectionDAG &DAG) {
1706   if (DAG.getDataLayout().isLittleEndian()) {
1707     unsigned indexOffset = CheckEven ? 4 : 0;
1708     if (ShuffleKind == 1) // Unary
1709       return isVMerge(N, indexOffset, 0);
1710     else if (ShuffleKind == 2) // swapped
1711       return isVMerge(N, indexOffset, 16);
1712     else
1713       return false;
1714   }
1715   else {
1716     unsigned indexOffset = CheckEven ? 0 : 4;
1717     if (ShuffleKind == 1) // Unary
1718       return isVMerge(N, indexOffset, 0);
1719     else if (ShuffleKind == 0) // Normal
1720       return isVMerge(N, indexOffset, 16);
1721     else
1722       return false;
1723   }
1724   return false;
1725 }
1726 
1727 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1728 /// amount, otherwise return -1.
1729 /// The ShuffleKind distinguishes between big-endian operations with two
1730 /// different inputs (0), either-endian operations with two identical inputs
1731 /// (1), and little-endian operations with two different inputs (2).  For the
1732 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1733 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1734                              SelectionDAG &DAG) {
1735   if (N->getValueType(0) != MVT::v16i8)
1736     return -1;
1737 
1738   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1739 
1740   // Find the first non-undef value in the shuffle mask.
1741   unsigned i;
1742   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1743     /*search*/;
1744 
1745   if (i == 16) return -1;  // all undef.
1746 
1747   // Otherwise, check to see if the rest of the elements are consecutively
1748   // numbered from this value.
1749   unsigned ShiftAmt = SVOp->getMaskElt(i);
1750   if (ShiftAmt < i) return -1;
1751 
1752   ShiftAmt -= i;
1753   bool isLE = DAG.getDataLayout().isLittleEndian();
1754 
1755   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1756     // Check the rest of the elements to see if they are consecutive.
1757     for (++i; i != 16; ++i)
1758       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1759         return -1;
1760   } else if (ShuffleKind == 1) {
1761     // Check the rest of the elements to see if they are consecutive.
1762     for (++i; i != 16; ++i)
1763       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1764         return -1;
1765   } else
1766     return -1;
1767 
1768   if (isLE)
1769     ShiftAmt = 16 - ShiftAmt;
1770 
1771   return ShiftAmt;
1772 }
1773 
1774 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1775 /// specifies a splat of a single element that is suitable for input to
1776 /// VSPLTB/VSPLTH/VSPLTW.
1777 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1778   assert(N->getValueType(0) == MVT::v16i8 &&
1779          (EltSize == 1 || EltSize == 2 || EltSize == 4));
1780 
1781   // The consecutive indices need to specify an element, not part of two
1782   // different elements.  So abandon ship early if this isn't the case.
1783   if (N->getMaskElt(0) % EltSize != 0)
1784     return false;
1785 
1786   // This is a splat operation if each element of the permute is the same, and
1787   // if the value doesn't reference the second vector.
1788   unsigned ElementBase = N->getMaskElt(0);
1789 
1790   // FIXME: Handle UNDEF elements too!
1791   if (ElementBase >= 16)
1792     return false;
1793 
1794   // Check that the indices are consecutive, in the case of a multi-byte element
1795   // splatted with a v16i8 mask.
1796   for (unsigned i = 1; i != EltSize; ++i)
1797     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1798       return false;
1799 
1800   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1801     if (N->getMaskElt(i) < 0) continue;
1802     for (unsigned j = 0; j != EltSize; ++j)
1803       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1804         return false;
1805   }
1806   return true;
1807 }
1808 
1809 /// Check that the mask is shuffling N byte elements. Within each N byte
1810 /// element of the mask, the indices could be either in increasing or
1811 /// decreasing order as long as they are consecutive.
1812 /// \param[in] N the shuffle vector SD Node to analyze
1813 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1814 /// Word/DoubleWord/QuadWord).
1815 /// \param[in] StepLen the delta indices number among the N byte element, if
1816 /// the mask is in increasing/decreasing order then it is 1/-1.
1817 /// \return true iff the mask is shuffling N byte elements.
1818 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1819                                    int StepLen) {
1820   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1821          "Unexpected element width.");
1822   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1823 
1824   unsigned NumOfElem = 16 / Width;
1825   unsigned MaskVal[16]; //  Width is never greater than 16
1826   for (unsigned i = 0; i < NumOfElem; ++i) {
1827     MaskVal[0] = N->getMaskElt(i * Width);
1828     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1829       return false;
1830     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1831       return false;
1832     }
1833 
1834     for (unsigned int j = 1; j < Width; ++j) {
1835       MaskVal[j] = N->getMaskElt(i * Width + j);
1836       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1837         return false;
1838       }
1839     }
1840   }
1841 
1842   return true;
1843 }
1844 
1845 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1846                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1847   if (!isNByteElemShuffleMask(N, 4, 1))
1848     return false;
1849 
1850   // Now we look at mask elements 0,4,8,12
1851   unsigned M0 = N->getMaskElt(0) / 4;
1852   unsigned M1 = N->getMaskElt(4) / 4;
1853   unsigned M2 = N->getMaskElt(8) / 4;
1854   unsigned M3 = N->getMaskElt(12) / 4;
1855   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1856   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1857 
1858   // Below, let H and L be arbitrary elements of the shuffle mask
1859   // where H is in the range [4,7] and L is in the range [0,3].
1860   // H, 1, 2, 3 or L, 5, 6, 7
1861   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1862       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1863     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1864     InsertAtByte = IsLE ? 12 : 0;
1865     Swap = M0 < 4;
1866     return true;
1867   }
1868   // 0, H, 2, 3 or 4, L, 6, 7
1869   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1870       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1871     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1872     InsertAtByte = IsLE ? 8 : 4;
1873     Swap = M1 < 4;
1874     return true;
1875   }
1876   // 0, 1, H, 3 or 4, 5, L, 7
1877   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1878       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1879     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1880     InsertAtByte = IsLE ? 4 : 8;
1881     Swap = M2 < 4;
1882     return true;
1883   }
1884   // 0, 1, 2, H or 4, 5, 6, L
1885   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1886       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1887     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1888     InsertAtByte = IsLE ? 0 : 12;
1889     Swap = M3 < 4;
1890     return true;
1891   }
1892 
1893   // If both vector operands for the shuffle are the same vector, the mask will
1894   // contain only elements from the first one and the second one will be undef.
1895   if (N->getOperand(1).isUndef()) {
1896     ShiftElts = 0;
1897     Swap = true;
1898     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1899     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1900       InsertAtByte = IsLE ? 12 : 0;
1901       return true;
1902     }
1903     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1904       InsertAtByte = IsLE ? 8 : 4;
1905       return true;
1906     }
1907     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1908       InsertAtByte = IsLE ? 4 : 8;
1909       return true;
1910     }
1911     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1912       InsertAtByte = IsLE ? 0 : 12;
1913       return true;
1914     }
1915   }
1916 
1917   return false;
1918 }
1919 
1920 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1921                                bool &Swap, bool IsLE) {
1922   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1923   // Ensure each byte index of the word is consecutive.
1924   if (!isNByteElemShuffleMask(N, 4, 1))
1925     return false;
1926 
1927   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1928   unsigned M0 = N->getMaskElt(0) / 4;
1929   unsigned M1 = N->getMaskElt(4) / 4;
1930   unsigned M2 = N->getMaskElt(8) / 4;
1931   unsigned M3 = N->getMaskElt(12) / 4;
1932 
1933   // If both vector operands for the shuffle are the same vector, the mask will
1934   // contain only elements from the first one and the second one will be undef.
1935   if (N->getOperand(1).isUndef()) {
1936     assert(M0 < 4 && "Indexing into an undef vector?");
1937     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1938       return false;
1939 
1940     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1941     Swap = false;
1942     return true;
1943   }
1944 
1945   // Ensure each word index of the ShuffleVector Mask is consecutive.
1946   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
1947     return false;
1948 
1949   if (IsLE) {
1950     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
1951       // Input vectors don't need to be swapped if the leading element
1952       // of the result is one of the 3 left elements of the second vector
1953       // (or if there is no shift to be done at all).
1954       Swap = false;
1955       ShiftElts = (8 - M0) % 8;
1956     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
1957       // Input vectors need to be swapped if the leading element
1958       // of the result is one of the 3 left elements of the first vector
1959       // (or if we're shifting by 4 - thereby simply swapping the vectors).
1960       Swap = true;
1961       ShiftElts = (4 - M0) % 4;
1962     }
1963 
1964     return true;
1965   } else {                                          // BE
1966     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
1967       // Input vectors don't need to be swapped if the leading element
1968       // of the result is one of the 4 elements of the first vector.
1969       Swap = false;
1970       ShiftElts = M0;
1971     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
1972       // Input vectors need to be swapped if the leading element
1973       // of the result is one of the 4 elements of the right vector.
1974       Swap = true;
1975       ShiftElts = M0 - 4;
1976     }
1977 
1978     return true;
1979   }
1980 }
1981 
1982 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
1983   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1984 
1985   if (!isNByteElemShuffleMask(N, Width, -1))
1986     return false;
1987 
1988   for (int i = 0; i < 16; i += Width)
1989     if (N->getMaskElt(i) != i + Width - 1)
1990       return false;
1991 
1992   return true;
1993 }
1994 
1995 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
1996   return isXXBRShuffleMaskHelper(N, 2);
1997 }
1998 
1999 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2000   return isXXBRShuffleMaskHelper(N, 4);
2001 }
2002 
2003 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2004   return isXXBRShuffleMaskHelper(N, 8);
2005 }
2006 
2007 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2008   return isXXBRShuffleMaskHelper(N, 16);
2009 }
2010 
2011 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2012 /// if the inputs to the instruction should be swapped and set \p DM to the
2013 /// value for the immediate.
2014 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2015 /// AND element 0 of the result comes from the first input (LE) or second input
2016 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2017 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2018 /// mask.
2019 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2020                                bool &Swap, bool IsLE) {
2021   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2022 
2023   // Ensure each byte index of the double word is consecutive.
2024   if (!isNByteElemShuffleMask(N, 8, 1))
2025     return false;
2026 
2027   unsigned M0 = N->getMaskElt(0) / 8;
2028   unsigned M1 = N->getMaskElt(8) / 8;
2029   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2030 
2031   // If both vector operands for the shuffle are the same vector, the mask will
2032   // contain only elements from the first one and the second one will be undef.
2033   if (N->getOperand(1).isUndef()) {
2034     if ((M0 | M1) < 2) {
2035       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2036       Swap = false;
2037       return true;
2038     } else
2039       return false;
2040   }
2041 
2042   if (IsLE) {
2043     if (M0 > 1 && M1 < 2) {
2044       Swap = false;
2045     } else if (M0 < 2 && M1 > 1) {
2046       M0 = (M0 + 2) % 4;
2047       M1 = (M1 + 2) % 4;
2048       Swap = true;
2049     } else
2050       return false;
2051 
2052     // Note: if control flow comes here that means Swap is already set above
2053     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2054     return true;
2055   } else { // BE
2056     if (M0 < 2 && M1 > 1) {
2057       Swap = false;
2058     } else if (M0 > 1 && M1 < 2) {
2059       M0 = (M0 + 2) % 4;
2060       M1 = (M1 + 2) % 4;
2061       Swap = true;
2062     } else
2063       return false;
2064 
2065     // Note: if control flow comes here that means Swap is already set above
2066     DM = (M0 << 1) + (M1 & 1);
2067     return true;
2068   }
2069 }
2070 
2071 
2072 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
2073 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
2074 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
2075                                 SelectionDAG &DAG) {
2076   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2077   assert(isSplatShuffleMask(SVOp, EltSize));
2078   if (DAG.getDataLayout().isLittleEndian())
2079     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2080   else
2081     return SVOp->getMaskElt(0) / EltSize;
2082 }
2083 
2084 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2085 /// by using a vspltis[bhw] instruction of the specified element size, return
2086 /// the constant being splatted.  The ByteSize field indicates the number of
2087 /// bytes of each element [124] -> [bhw].
2088 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2089   SDValue OpVal(nullptr, 0);
2090 
2091   // If ByteSize of the splat is bigger than the element size of the
2092   // build_vector, then we have a case where we are checking for a splat where
2093   // multiple elements of the buildvector are folded together into a single
2094   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2095   unsigned EltSize = 16/N->getNumOperands();
2096   if (EltSize < ByteSize) {
2097     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2098     SDValue UniquedVals[4];
2099     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2100 
2101     // See if all of the elements in the buildvector agree across.
2102     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2103       if (N->getOperand(i).isUndef()) continue;
2104       // If the element isn't a constant, bail fully out.
2105       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2106 
2107       if (!UniquedVals[i&(Multiple-1)].getNode())
2108         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2109       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2110         return SDValue();  // no match.
2111     }
2112 
2113     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2114     // either constant or undef values that are identical for each chunk.  See
2115     // if these chunks can form into a larger vspltis*.
2116 
2117     // Check to see if all of the leading entries are either 0 or -1.  If
2118     // neither, then this won't fit into the immediate field.
2119     bool LeadingZero = true;
2120     bool LeadingOnes = true;
2121     for (unsigned i = 0; i != Multiple-1; ++i) {
2122       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2123 
2124       LeadingZero &= isNullConstant(UniquedVals[i]);
2125       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2126     }
2127     // Finally, check the least significant entry.
2128     if (LeadingZero) {
2129       if (!UniquedVals[Multiple-1].getNode())
2130         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2131       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2132       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2133         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2134     }
2135     if (LeadingOnes) {
2136       if (!UniquedVals[Multiple-1].getNode())
2137         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2138       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2139       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2140         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2141     }
2142 
2143     return SDValue();
2144   }
2145 
2146   // Check to see if this buildvec has a single non-undef value in its elements.
2147   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2148     if (N->getOperand(i).isUndef()) continue;
2149     if (!OpVal.getNode())
2150       OpVal = N->getOperand(i);
2151     else if (OpVal != N->getOperand(i))
2152       return SDValue();
2153   }
2154 
2155   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2156 
2157   unsigned ValSizeInBytes = EltSize;
2158   uint64_t Value = 0;
2159   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2160     Value = CN->getZExtValue();
2161   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2162     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2163     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2164   }
2165 
2166   // If the splat value is larger than the element value, then we can never do
2167   // this splat.  The only case that we could fit the replicated bits into our
2168   // immediate field for would be zero, and we prefer to use vxor for it.
2169   if (ValSizeInBytes < ByteSize) return SDValue();
2170 
2171   // If the element value is larger than the splat value, check if it consists
2172   // of a repeated bit pattern of size ByteSize.
2173   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2174     return SDValue();
2175 
2176   // Properly sign extend the value.
2177   int MaskVal = SignExtend32(Value, ByteSize * 8);
2178 
2179   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2180   if (MaskVal == 0) return SDValue();
2181 
2182   // Finally, if this value fits in a 5 bit sext field, return it
2183   if (SignExtend32<5>(MaskVal) == MaskVal)
2184     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2185   return SDValue();
2186 }
2187 
2188 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2189 /// amount, otherwise return -1.
2190 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2191   EVT VT = N->getValueType(0);
2192   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2193     return -1;
2194 
2195   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2196 
2197   // Find the first non-undef value in the shuffle mask.
2198   unsigned i;
2199   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2200     /*search*/;
2201 
2202   if (i == 4) return -1;  // all undef.
2203 
2204   // Otherwise, check to see if the rest of the elements are consecutively
2205   // numbered from this value.
2206   unsigned ShiftAmt = SVOp->getMaskElt(i);
2207   if (ShiftAmt < i) return -1;
2208   ShiftAmt -= i;
2209 
2210   // Check the rest of the elements to see if they are consecutive.
2211   for (++i; i != 4; ++i)
2212     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2213       return -1;
2214 
2215   return ShiftAmt;
2216 }
2217 
2218 //===----------------------------------------------------------------------===//
2219 //  Addressing Mode Selection
2220 //===----------------------------------------------------------------------===//
2221 
2222 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2223 /// or 64-bit immediate, and if the value can be accurately represented as a
2224 /// sign extension from a 16-bit value.  If so, this returns true and the
2225 /// immediate.
2226 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2227   if (!isa<ConstantSDNode>(N))
2228     return false;
2229 
2230   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2231   if (N->getValueType(0) == MVT::i32)
2232     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2233   else
2234     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2235 }
2236 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2237   return isIntS16Immediate(Op.getNode(), Imm);
2238 }
2239 
2240 
2241 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2242 /// be represented as an indexed [r+r] operation.
2243 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2244                                                SDValue &Index,
2245                                                SelectionDAG &DAG) const {
2246   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2247       UI != E; ++UI) {
2248     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2249       if (Memop->getMemoryVT() == MVT::f64) {
2250           Base = N.getOperand(0);
2251           Index = N.getOperand(1);
2252           return true;
2253       }
2254     }
2255   }
2256   return false;
2257 }
2258 
2259 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2260 /// can be represented as an indexed [r+r] operation.  Returns false if it
2261 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2262 /// non-zero and N can be represented by a base register plus a signed 16-bit
2263 /// displacement, make a more precise judgement by checking (displacement % \p
2264 /// EncodingAlignment).
2265 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2266                                             SDValue &Index, SelectionDAG &DAG,
2267                                             unsigned EncodingAlignment) const {
2268   int16_t imm = 0;
2269   if (N.getOpcode() == ISD::ADD) {
2270     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2271     // SPE load/store can only handle 8-bit offsets.
2272     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2273         return true;
2274     if (isIntS16Immediate(N.getOperand(1), imm) &&
2275         (!EncodingAlignment || !(imm % EncodingAlignment)))
2276       return false; // r+i
2277     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2278       return false;    // r+i
2279 
2280     Base = N.getOperand(0);
2281     Index = N.getOperand(1);
2282     return true;
2283   } else if (N.getOpcode() == ISD::OR) {
2284     if (isIntS16Immediate(N.getOperand(1), imm) &&
2285         (!EncodingAlignment || !(imm % EncodingAlignment)))
2286       return false; // r+i can fold it if we can.
2287 
2288     // If this is an or of disjoint bitfields, we can codegen this as an add
2289     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2290     // disjoint.
2291     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2292 
2293     if (LHSKnown.Zero.getBoolValue()) {
2294       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2295       // If all of the bits are known zero on the LHS or RHS, the add won't
2296       // carry.
2297       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2298         Base = N.getOperand(0);
2299         Index = N.getOperand(1);
2300         return true;
2301       }
2302     }
2303   }
2304 
2305   return false;
2306 }
2307 
2308 // If we happen to be doing an i64 load or store into a stack slot that has
2309 // less than a 4-byte alignment, then the frame-index elimination may need to
2310 // use an indexed load or store instruction (because the offset may not be a
2311 // multiple of 4). The extra register needed to hold the offset comes from the
2312 // register scavenger, and it is possible that the scavenger will need to use
2313 // an emergency spill slot. As a result, we need to make sure that a spill slot
2314 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2315 // stack slot.
2316 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2317   // FIXME: This does not handle the LWA case.
2318   if (VT != MVT::i64)
2319     return;
2320 
2321   // NOTE: We'll exclude negative FIs here, which come from argument
2322   // lowering, because there are no known test cases triggering this problem
2323   // using packed structures (or similar). We can remove this exclusion if
2324   // we find such a test case. The reason why this is so test-case driven is
2325   // because this entire 'fixup' is only to prevent crashes (from the
2326   // register scavenger) on not-really-valid inputs. For example, if we have:
2327   //   %a = alloca i1
2328   //   %b = bitcast i1* %a to i64*
2329   //   store i64* a, i64 b
2330   // then the store should really be marked as 'align 1', but is not. If it
2331   // were marked as 'align 1' then the indexed form would have been
2332   // instruction-selected initially, and the problem this 'fixup' is preventing
2333   // won't happen regardless.
2334   if (FrameIdx < 0)
2335     return;
2336 
2337   MachineFunction &MF = DAG.getMachineFunction();
2338   MachineFrameInfo &MFI = MF.getFrameInfo();
2339 
2340   unsigned Align = MFI.getObjectAlignment(FrameIdx);
2341   if (Align >= 4)
2342     return;
2343 
2344   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2345   FuncInfo->setHasNonRISpills();
2346 }
2347 
2348 /// Returns true if the address N can be represented by a base register plus
2349 /// a signed 16-bit displacement [r+imm], and if it is not better
2350 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2351 /// displacements that are multiples of that value.
2352 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2353                                             SDValue &Base,
2354                                             SelectionDAG &DAG,
2355                                             unsigned EncodingAlignment) const {
2356   // FIXME dl should come from parent load or store, not from address
2357   SDLoc dl(N);
2358   // If this can be more profitably realized as r+r, fail.
2359   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2360     return false;
2361 
2362   if (N.getOpcode() == ISD::ADD) {
2363     int16_t imm = 0;
2364     if (isIntS16Immediate(N.getOperand(1), imm) &&
2365         (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2366       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2367       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2368         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2369         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2370       } else {
2371         Base = N.getOperand(0);
2372       }
2373       return true; // [r+i]
2374     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2375       // Match LOAD (ADD (X, Lo(G))).
2376       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2377              && "Cannot handle constant offsets yet!");
2378       Disp = N.getOperand(1).getOperand(0);  // The global address.
2379       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2380              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2381              Disp.getOpcode() == ISD::TargetConstantPool ||
2382              Disp.getOpcode() == ISD::TargetJumpTable);
2383       Base = N.getOperand(0);
2384       return true;  // [&g+r]
2385     }
2386   } else if (N.getOpcode() == ISD::OR) {
2387     int16_t imm = 0;
2388     if (isIntS16Immediate(N.getOperand(1), imm) &&
2389         (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2390       // If this is an or of disjoint bitfields, we can codegen this as an add
2391       // (for better address arithmetic) if the LHS and RHS of the OR are
2392       // provably disjoint.
2393       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2394 
2395       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2396         // If all of the bits are known zero on the LHS or RHS, the add won't
2397         // carry.
2398         if (FrameIndexSDNode *FI =
2399               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2400           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2401           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2402         } else {
2403           Base = N.getOperand(0);
2404         }
2405         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2406         return true;
2407       }
2408     }
2409   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2410     // Loading from a constant address.
2411 
2412     // If this address fits entirely in a 16-bit sext immediate field, codegen
2413     // this as "d, 0"
2414     int16_t Imm;
2415     if (isIntS16Immediate(CN, Imm) &&
2416         (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) {
2417       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2418       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2419                              CN->getValueType(0));
2420       return true;
2421     }
2422 
2423     // Handle 32-bit sext immediates with LIS + addr mode.
2424     if ((CN->getValueType(0) == MVT::i32 ||
2425          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2426         (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) {
2427       int Addr = (int)CN->getZExtValue();
2428 
2429       // Otherwise, break this down into an LIS + disp.
2430       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2431 
2432       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2433                                    MVT::i32);
2434       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2435       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2436       return true;
2437     }
2438   }
2439 
2440   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2441   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2442     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2443     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2444   } else
2445     Base = N;
2446   return true;      // [r+0]
2447 }
2448 
2449 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2450 /// represented as an indexed [r+r] operation.
2451 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2452                                                 SDValue &Index,
2453                                                 SelectionDAG &DAG) const {
2454   // Check to see if we can easily represent this as an [r+r] address.  This
2455   // will fail if it thinks that the address is more profitably represented as
2456   // reg+imm, e.g. where imm = 0.
2457   if (SelectAddressRegReg(N, Base, Index, DAG))
2458     return true;
2459 
2460   // If the address is the result of an add, we will utilize the fact that the
2461   // address calculation includes an implicit add.  However, we can reduce
2462   // register pressure if we do not materialize a constant just for use as the
2463   // index register.  We only get rid of the add if it is not an add of a
2464   // value and a 16-bit signed constant and both have a single use.
2465   int16_t imm = 0;
2466   if (N.getOpcode() == ISD::ADD &&
2467       (!isIntS16Immediate(N.getOperand(1), imm) ||
2468        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2469     Base = N.getOperand(0);
2470     Index = N.getOperand(1);
2471     return true;
2472   }
2473 
2474   // Otherwise, do it the hard way, using R0 as the base register.
2475   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2476                          N.getValueType());
2477   Index = N;
2478   return true;
2479 }
2480 
2481 /// Returns true if we should use a direct load into vector instruction
2482 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2483 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2484 
2485   // If there are any other uses other than scalar to vector, then we should
2486   // keep it as a scalar load -> direct move pattern to prevent multiple
2487   // loads.
2488   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2489   if (!LD)
2490     return false;
2491 
2492   EVT MemVT = LD->getMemoryVT();
2493   if (!MemVT.isSimple())
2494     return false;
2495   switch(MemVT.getSimpleVT().SimpleTy) {
2496   case MVT::i64:
2497     break;
2498   case MVT::i32:
2499     if (!ST.hasP8Vector())
2500       return false;
2501     break;
2502   case MVT::i16:
2503   case MVT::i8:
2504     if (!ST.hasP9Vector())
2505       return false;
2506     break;
2507   default:
2508     return false;
2509   }
2510 
2511   SDValue LoadedVal(N, 0);
2512   if (!LoadedVal.hasOneUse())
2513     return false;
2514 
2515   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2516        UI != UE; ++UI)
2517     if (UI.getUse().get().getResNo() == 0 &&
2518         UI->getOpcode() != ISD::SCALAR_TO_VECTOR)
2519       return false;
2520 
2521   return true;
2522 }
2523 
2524 /// getPreIndexedAddressParts - returns true by value, base pointer and
2525 /// offset pointer and addressing mode by reference if the node's address
2526 /// can be legally represented as pre-indexed load / store address.
2527 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2528                                                   SDValue &Offset,
2529                                                   ISD::MemIndexedMode &AM,
2530                                                   SelectionDAG &DAG) const {
2531   if (DisablePPCPreinc) return false;
2532 
2533   bool isLoad = true;
2534   SDValue Ptr;
2535   EVT VT;
2536   unsigned Alignment;
2537   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2538     Ptr = LD->getBasePtr();
2539     VT = LD->getMemoryVT();
2540     Alignment = LD->getAlignment();
2541   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2542     Ptr = ST->getBasePtr();
2543     VT  = ST->getMemoryVT();
2544     Alignment = ST->getAlignment();
2545     isLoad = false;
2546   } else
2547     return false;
2548 
2549   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2550   // instructions because we can fold these into a more efficient instruction
2551   // instead, (such as LXSD).
2552   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2553     return false;
2554   }
2555 
2556   // PowerPC doesn't have preinc load/store instructions for vectors (except
2557   // for QPX, which does have preinc r+r forms).
2558   if (VT.isVector()) {
2559     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2560       return false;
2561     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2562       AM = ISD::PRE_INC;
2563       return true;
2564     }
2565   }
2566 
2567   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2568     // Common code will reject creating a pre-inc form if the base pointer
2569     // is a frame index, or if N is a store and the base pointer is either
2570     // the same as or a predecessor of the value being stored.  Check for
2571     // those situations here, and try with swapped Base/Offset instead.
2572     bool Swap = false;
2573 
2574     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2575       Swap = true;
2576     else if (!isLoad) {
2577       SDValue Val = cast<StoreSDNode>(N)->getValue();
2578       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2579         Swap = true;
2580     }
2581 
2582     if (Swap)
2583       std::swap(Base, Offset);
2584 
2585     AM = ISD::PRE_INC;
2586     return true;
2587   }
2588 
2589   // LDU/STU can only handle immediates that are a multiple of 4.
2590   if (VT != MVT::i64) {
2591     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2592       return false;
2593   } else {
2594     // LDU/STU need an address with at least 4-byte alignment.
2595     if (Alignment < 4)
2596       return false;
2597 
2598     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2599       return false;
2600   }
2601 
2602   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2603     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2604     // sext i32 to i64 when addr mode is r+i.
2605     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2606         LD->getExtensionType() == ISD::SEXTLOAD &&
2607         isa<ConstantSDNode>(Offset))
2608       return false;
2609   }
2610 
2611   AM = ISD::PRE_INC;
2612   return true;
2613 }
2614 
2615 //===----------------------------------------------------------------------===//
2616 //  LowerOperation implementation
2617 //===----------------------------------------------------------------------===//
2618 
2619 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2620 /// and LoOpFlags to the target MO flags.
2621 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2622                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2623                                const GlobalValue *GV = nullptr) {
2624   HiOpFlags = PPCII::MO_HA;
2625   LoOpFlags = PPCII::MO_LO;
2626 
2627   // Don't use the pic base if not in PIC relocation model.
2628   if (IsPIC) {
2629     HiOpFlags |= PPCII::MO_PIC_FLAG;
2630     LoOpFlags |= PPCII::MO_PIC_FLAG;
2631   }
2632 
2633   // If this is a reference to a global value that requires a non-lazy-ptr, make
2634   // sure that instruction lowering adds it.
2635   if (GV && Subtarget.hasLazyResolverStub(GV)) {
2636     HiOpFlags |= PPCII::MO_NLP_FLAG;
2637     LoOpFlags |= PPCII::MO_NLP_FLAG;
2638 
2639     if (GV->hasHiddenVisibility()) {
2640       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2641       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2642     }
2643   }
2644 }
2645 
2646 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2647                              SelectionDAG &DAG) {
2648   SDLoc DL(HiPart);
2649   EVT PtrVT = HiPart.getValueType();
2650   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2651 
2652   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2653   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2654 
2655   // With PIC, the first instruction is actually "GR+hi(&G)".
2656   if (isPIC)
2657     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2658                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2659 
2660   // Generate non-pic code that has direct accesses to the constant pool.
2661   // The address of the global is just (hi(&g)+lo(&g)).
2662   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2663 }
2664 
2665 static void setUsesTOCBasePtr(MachineFunction &MF) {
2666   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2667   FuncInfo->setUsesTOCBasePtr();
2668 }
2669 
2670 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2671   setUsesTOCBasePtr(DAG.getMachineFunction());
2672 }
2673 
2674 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2675                                        SDValue GA) const {
2676   const bool Is64Bit = Subtarget.isPPC64();
2677   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2678   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2679                         : Subtarget.isAIXABI()
2680                               ? DAG.getRegister(PPC::R2, VT)
2681                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2682   SDValue Ops[] = { GA, Reg };
2683   return DAG.getMemIntrinsicNode(
2684       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2685       MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2686       MachineMemOperand::MOLoad);
2687 }
2688 
2689 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2690                                              SelectionDAG &DAG) const {
2691   EVT PtrVT = Op.getValueType();
2692   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2693   const Constant *C = CP->getConstVal();
2694 
2695   // 64-bit SVR4 ABI code is always position-independent.
2696   // The actual address of the GlobalValue is stored in the TOC.
2697   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2698     setUsesTOCBasePtr(DAG);
2699     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2700     return getTOCEntry(DAG, SDLoc(CP), GA);
2701   }
2702 
2703   unsigned MOHiFlag, MOLoFlag;
2704   bool IsPIC = isPositionIndependent();
2705   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2706 
2707   if (IsPIC && Subtarget.isSVR4ABI()) {
2708     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2709                                            PPCII::MO_PIC_FLAG);
2710     return getTOCEntry(DAG, SDLoc(CP), GA);
2711   }
2712 
2713   SDValue CPIHi =
2714     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2715   SDValue CPILo =
2716     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2717   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2718 }
2719 
2720 // For 64-bit PowerPC, prefer the more compact relative encodings.
2721 // This trades 32 bits per jump table entry for one or two instructions
2722 // on the jump site.
2723 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2724   if (isJumpTableRelative())
2725     return MachineJumpTableInfo::EK_LabelDifference32;
2726 
2727   return TargetLowering::getJumpTableEncoding();
2728 }
2729 
2730 bool PPCTargetLowering::isJumpTableRelative() const {
2731   if (Subtarget.isPPC64())
2732     return true;
2733   return TargetLowering::isJumpTableRelative();
2734 }
2735 
2736 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2737                                                     SelectionDAG &DAG) const {
2738   if (!Subtarget.isPPC64())
2739     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2740 
2741   switch (getTargetMachine().getCodeModel()) {
2742   case CodeModel::Small:
2743   case CodeModel::Medium:
2744     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2745   default:
2746     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2747                        getPointerTy(DAG.getDataLayout()));
2748   }
2749 }
2750 
2751 const MCExpr *
2752 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2753                                                 unsigned JTI,
2754                                                 MCContext &Ctx) const {
2755   if (!Subtarget.isPPC64())
2756     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2757 
2758   switch (getTargetMachine().getCodeModel()) {
2759   case CodeModel::Small:
2760   case CodeModel::Medium:
2761     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2762   default:
2763     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2764   }
2765 }
2766 
2767 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2768   EVT PtrVT = Op.getValueType();
2769   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2770 
2771   // 64-bit SVR4 ABI code is always position-independent.
2772   // The actual address of the GlobalValue is stored in the TOC.
2773   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2774     setUsesTOCBasePtr(DAG);
2775     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2776     return getTOCEntry(DAG, SDLoc(JT), GA);
2777   }
2778 
2779   unsigned MOHiFlag, MOLoFlag;
2780   bool IsPIC = isPositionIndependent();
2781   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2782 
2783   if (IsPIC && Subtarget.isSVR4ABI()) {
2784     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2785                                         PPCII::MO_PIC_FLAG);
2786     return getTOCEntry(DAG, SDLoc(GA), GA);
2787   }
2788 
2789   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2790   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2791   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2792 }
2793 
2794 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2795                                              SelectionDAG &DAG) const {
2796   EVT PtrVT = Op.getValueType();
2797   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2798   const BlockAddress *BA = BASDN->getBlockAddress();
2799 
2800   // 64-bit SVR4 ABI code is always position-independent.
2801   // The actual BlockAddress is stored in the TOC.
2802   if (Subtarget.isSVR4ABI() &&
2803       (Subtarget.isPPC64() || isPositionIndependent())) {
2804     if (Subtarget.isPPC64())
2805       setUsesTOCBasePtr(DAG);
2806     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2807     return getTOCEntry(DAG, SDLoc(BASDN), GA);
2808   }
2809 
2810   unsigned MOHiFlag, MOLoFlag;
2811   bool IsPIC = isPositionIndependent();
2812   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2813   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2814   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2815   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2816 }
2817 
2818 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2819                                               SelectionDAG &DAG) const {
2820   // FIXME: TLS addresses currently use medium model code sequences,
2821   // which is the most useful form.  Eventually support for small and
2822   // large models could be added if users need it, at the cost of
2823   // additional complexity.
2824   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2825   if (DAG.getTarget().useEmulatedTLS())
2826     return LowerToTLSEmulatedModel(GA, DAG);
2827 
2828   SDLoc dl(GA);
2829   const GlobalValue *GV = GA->getGlobal();
2830   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2831   bool is64bit = Subtarget.isPPC64();
2832   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2833   PICLevel::Level picLevel = M->getPICLevel();
2834 
2835   const TargetMachine &TM = getTargetMachine();
2836   TLSModel::Model Model = TM.getTLSModel(GV);
2837 
2838   if (Model == TLSModel::LocalExec) {
2839     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2840                                                PPCII::MO_TPREL_HA);
2841     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2842                                                PPCII::MO_TPREL_LO);
2843     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2844                              : DAG.getRegister(PPC::R2, MVT::i32);
2845 
2846     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2847     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2848   }
2849 
2850   if (Model == TLSModel::InitialExec) {
2851     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2852     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2853                                                 PPCII::MO_TLS);
2854     SDValue GOTPtr;
2855     if (is64bit) {
2856       setUsesTOCBasePtr(DAG);
2857       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2858       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2859                            PtrVT, GOTReg, TGA);
2860     } else {
2861       if (!TM.isPositionIndependent())
2862         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2863       else if (picLevel == PICLevel::SmallPIC)
2864         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2865       else
2866         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2867     }
2868     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2869                                    PtrVT, TGA, GOTPtr);
2870     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2871   }
2872 
2873   if (Model == TLSModel::GeneralDynamic) {
2874     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2875     SDValue GOTPtr;
2876     if (is64bit) {
2877       setUsesTOCBasePtr(DAG);
2878       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2879       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2880                                    GOTReg, TGA);
2881     } else {
2882       if (picLevel == PICLevel::SmallPIC)
2883         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2884       else
2885         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2886     }
2887     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2888                        GOTPtr, TGA, TGA);
2889   }
2890 
2891   if (Model == TLSModel::LocalDynamic) {
2892     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2893     SDValue GOTPtr;
2894     if (is64bit) {
2895       setUsesTOCBasePtr(DAG);
2896       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2897       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2898                            GOTReg, TGA);
2899     } else {
2900       if (picLevel == PICLevel::SmallPIC)
2901         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2902       else
2903         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2904     }
2905     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2906                                   PtrVT, GOTPtr, TGA, TGA);
2907     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2908                                       PtrVT, TLSAddr, TGA);
2909     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2910   }
2911 
2912   llvm_unreachable("Unknown TLS model!");
2913 }
2914 
2915 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2916                                               SelectionDAG &DAG) const {
2917   EVT PtrVT = Op.getValueType();
2918   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2919   SDLoc DL(GSDN);
2920   const GlobalValue *GV = GSDN->getGlobal();
2921 
2922   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
2923   // The actual address of the GlobalValue is stored in the TOC.
2924   if ((Subtarget.isSVR4ABI() && Subtarget.isPPC64()) || Subtarget.isAIXABI()) {
2925     setUsesTOCBasePtr(DAG);
2926     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2927     return getTOCEntry(DAG, DL, GA);
2928   }
2929 
2930   unsigned MOHiFlag, MOLoFlag;
2931   bool IsPIC = isPositionIndependent();
2932   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2933 
2934   if (IsPIC && Subtarget.isSVR4ABI()) {
2935     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
2936                                             GSDN->getOffset(),
2937                                             PPCII::MO_PIC_FLAG);
2938     return getTOCEntry(DAG, DL, GA);
2939   }
2940 
2941   SDValue GAHi =
2942     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
2943   SDValue GALo =
2944     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
2945 
2946   SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
2947 
2948   // If the global reference is actually to a non-lazy-pointer, we have to do an
2949   // extra load to get the address of the global.
2950   if (MOHiFlag & PPCII::MO_NLP_FLAG)
2951     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2952   return Ptr;
2953 }
2954 
2955 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2956   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2957   SDLoc dl(Op);
2958 
2959   if (Op.getValueType() == MVT::v2i64) {
2960     // When the operands themselves are v2i64 values, we need to do something
2961     // special because VSX has no underlying comparison operations for these.
2962     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
2963       // Equality can be handled by casting to the legal type for Altivec
2964       // comparisons, everything else needs to be expanded.
2965       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
2966         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
2967                  DAG.getSetCC(dl, MVT::v4i32,
2968                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
2969                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
2970                    CC));
2971       }
2972 
2973       return SDValue();
2974     }
2975 
2976     // We handle most of these in the usual way.
2977     return Op;
2978   }
2979 
2980   // If we're comparing for equality to zero, expose the fact that this is
2981   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
2982   // fold the new nodes.
2983   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
2984     return V;
2985 
2986   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2987     // Leave comparisons against 0 and -1 alone for now, since they're usually
2988     // optimized.  FIXME: revisit this when we can custom lower all setcc
2989     // optimizations.
2990     if (C->isAllOnesValue() || C->isNullValue())
2991       return SDValue();
2992   }
2993 
2994   // If we have an integer seteq/setne, turn it into a compare against zero
2995   // by xor'ing the rhs with the lhs, which is faster than setting a
2996   // condition register, reading it back out, and masking the correct bit.  The
2997   // normal approach here uses sub to do this instead of xor.  Using xor exposes
2998   // the result to other bit-twiddling opportunities.
2999   EVT LHSVT = Op.getOperand(0).getValueType();
3000   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3001     EVT VT = Op.getValueType();
3002     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3003                                 Op.getOperand(1));
3004     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3005   }
3006   return SDValue();
3007 }
3008 
3009 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3010   SDNode *Node = Op.getNode();
3011   EVT VT = Node->getValueType(0);
3012   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3013   SDValue InChain = Node->getOperand(0);
3014   SDValue VAListPtr = Node->getOperand(1);
3015   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3016   SDLoc dl(Node);
3017 
3018   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3019 
3020   // gpr_index
3021   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3022                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3023   InChain = GprIndex.getValue(1);
3024 
3025   if (VT == MVT::i64) {
3026     // Check if GprIndex is even
3027     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3028                                  DAG.getConstant(1, dl, MVT::i32));
3029     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3030                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3031     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3032                                           DAG.getConstant(1, dl, MVT::i32));
3033     // Align GprIndex to be even if it isn't
3034     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3035                            GprIndex);
3036   }
3037 
3038   // fpr index is 1 byte after gpr
3039   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3040                                DAG.getConstant(1, dl, MVT::i32));
3041 
3042   // fpr
3043   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3044                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3045   InChain = FprIndex.getValue(1);
3046 
3047   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3048                                        DAG.getConstant(8, dl, MVT::i32));
3049 
3050   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3051                                         DAG.getConstant(4, dl, MVT::i32));
3052 
3053   // areas
3054   SDValue OverflowArea =
3055       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3056   InChain = OverflowArea.getValue(1);
3057 
3058   SDValue RegSaveArea =
3059       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3060   InChain = RegSaveArea.getValue(1);
3061 
3062   // select overflow_area if index > 8
3063   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3064                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3065 
3066   // adjustment constant gpr_index * 4/8
3067   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3068                                     VT.isInteger() ? GprIndex : FprIndex,
3069                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3070                                                     MVT::i32));
3071 
3072   // OurReg = RegSaveArea + RegConstant
3073   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3074                                RegConstant);
3075 
3076   // Floating types are 32 bytes into RegSaveArea
3077   if (VT.isFloatingPoint())
3078     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3079                          DAG.getConstant(32, dl, MVT::i32));
3080 
3081   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3082   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3083                                    VT.isInteger() ? GprIndex : FprIndex,
3084                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3085                                                    MVT::i32));
3086 
3087   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3088                               VT.isInteger() ? VAListPtr : FprPtr,
3089                               MachinePointerInfo(SV), MVT::i8);
3090 
3091   // determine if we should load from reg_save_area or overflow_area
3092   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3093 
3094   // increase overflow_area by 4/8 if gpr/fpr > 8
3095   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3096                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3097                                           dl, MVT::i32));
3098 
3099   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3100                              OverflowAreaPlusN);
3101 
3102   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3103                               MachinePointerInfo(), MVT::i32);
3104 
3105   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3106 }
3107 
3108 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3109   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3110 
3111   // We have to copy the entire va_list struct:
3112   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3113   return DAG.getMemcpy(Op.getOperand(0), Op,
3114                        Op.getOperand(1), Op.getOperand(2),
3115                        DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
3116                        false, MachinePointerInfo(), MachinePointerInfo());
3117 }
3118 
3119 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3120                                                   SelectionDAG &DAG) const {
3121   return Op.getOperand(0);
3122 }
3123 
3124 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3125                                                 SelectionDAG &DAG) const {
3126   SDValue Chain = Op.getOperand(0);
3127   SDValue Trmp = Op.getOperand(1); // trampoline
3128   SDValue FPtr = Op.getOperand(2); // nested function
3129   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3130   SDLoc dl(Op);
3131 
3132   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3133   bool isPPC64 = (PtrVT == MVT::i64);
3134   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3135 
3136   TargetLowering::ArgListTy Args;
3137   TargetLowering::ArgListEntry Entry;
3138 
3139   Entry.Ty = IntPtrTy;
3140   Entry.Node = Trmp; Args.push_back(Entry);
3141 
3142   // TrampSize == (isPPC64 ? 48 : 40);
3143   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3144                                isPPC64 ? MVT::i64 : MVT::i32);
3145   Args.push_back(Entry);
3146 
3147   Entry.Node = FPtr; Args.push_back(Entry);
3148   Entry.Node = Nest; Args.push_back(Entry);
3149 
3150   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3151   TargetLowering::CallLoweringInfo CLI(DAG);
3152   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3153       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3154       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3155 
3156   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3157   return CallResult.second;
3158 }
3159 
3160 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3161   MachineFunction &MF = DAG.getMachineFunction();
3162   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3163   EVT PtrVT = getPointerTy(MF.getDataLayout());
3164 
3165   SDLoc dl(Op);
3166 
3167   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
3168     // vastart just stores the address of the VarArgsFrameIndex slot into the
3169     // memory location argument.
3170     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3171     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3172     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3173                         MachinePointerInfo(SV));
3174   }
3175 
3176   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3177   // We suppose the given va_list is already allocated.
3178   //
3179   // typedef struct {
3180   //  char gpr;     /* index into the array of 8 GPRs
3181   //                 * stored in the register save area
3182   //                 * gpr=0 corresponds to r3,
3183   //                 * gpr=1 to r4, etc.
3184   //                 */
3185   //  char fpr;     /* index into the array of 8 FPRs
3186   //                 * stored in the register save area
3187   //                 * fpr=0 corresponds to f1,
3188   //                 * fpr=1 to f2, etc.
3189   //                 */
3190   //  char *overflow_arg_area;
3191   //                /* location on stack that holds
3192   //                 * the next overflow argument
3193   //                 */
3194   //  char *reg_save_area;
3195   //               /* where r3:r10 and f1:f8 (if saved)
3196   //                * are stored
3197   //                */
3198   // } va_list[1];
3199 
3200   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3201   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3202   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3203                                             PtrVT);
3204   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3205                                  PtrVT);
3206 
3207   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3208   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3209 
3210   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3211   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3212 
3213   uint64_t FPROffset = 1;
3214   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3215 
3216   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3217 
3218   // Store first byte : number of int regs
3219   SDValue firstStore =
3220       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3221                         MachinePointerInfo(SV), MVT::i8);
3222   uint64_t nextOffset = FPROffset;
3223   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3224                                   ConstFPROffset);
3225 
3226   // Store second byte : number of float regs
3227   SDValue secondStore =
3228       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3229                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3230   nextOffset += StackOffset;
3231   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3232 
3233   // Store second word : arguments given on stack
3234   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3235                                     MachinePointerInfo(SV, nextOffset));
3236   nextOffset += FrameOffset;
3237   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3238 
3239   // Store third word : arguments given in registers
3240   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3241                       MachinePointerInfo(SV, nextOffset));
3242 }
3243 
3244 /// FPR - The set of FP registers that should be allocated for arguments,
3245 /// on Darwin.
3246 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3247                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3248                                 PPC::F11, PPC::F12, PPC::F13};
3249 
3250 /// QFPR - The set of QPX registers that should be allocated for arguments.
3251 static const MCPhysReg QFPR[] = {
3252     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3253     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3254 
3255 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3256 /// the stack.
3257 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3258                                        unsigned PtrByteSize) {
3259   unsigned ArgSize = ArgVT.getStoreSize();
3260   if (Flags.isByVal())
3261     ArgSize = Flags.getByValSize();
3262 
3263   // Round up to multiples of the pointer size, except for array members,
3264   // which are always packed.
3265   if (!Flags.isInConsecutiveRegs())
3266     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3267 
3268   return ArgSize;
3269 }
3270 
3271 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3272 /// on the stack.
3273 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3274                                             ISD::ArgFlagsTy Flags,
3275                                             unsigned PtrByteSize) {
3276   unsigned Align = PtrByteSize;
3277 
3278   // Altivec parameters are padded to a 16 byte boundary.
3279   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3280       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3281       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3282       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3283     Align = 16;
3284   // QPX vector types stored in double-precision are padded to a 32 byte
3285   // boundary.
3286   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3287     Align = 32;
3288 
3289   // ByVal parameters are aligned as requested.
3290   if (Flags.isByVal()) {
3291     unsigned BVAlign = Flags.getByValAlign();
3292     if (BVAlign > PtrByteSize) {
3293       if (BVAlign % PtrByteSize != 0)
3294           llvm_unreachable(
3295             "ByVal alignment is not a multiple of the pointer size");
3296 
3297       Align = BVAlign;
3298     }
3299   }
3300 
3301   // Array members are always packed to their original alignment.
3302   if (Flags.isInConsecutiveRegs()) {
3303     // If the array member was split into multiple registers, the first
3304     // needs to be aligned to the size of the full type.  (Except for
3305     // ppcf128, which is only aligned as its f64 components.)
3306     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3307       Align = OrigVT.getStoreSize();
3308     else
3309       Align = ArgVT.getStoreSize();
3310   }
3311 
3312   return Align;
3313 }
3314 
3315 /// CalculateStackSlotUsed - Return whether this argument will use its
3316 /// stack slot (instead of being passed in registers).  ArgOffset,
3317 /// AvailableFPRs, and AvailableVRs must hold the current argument
3318 /// position, and will be updated to account for this argument.
3319 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3320                                    ISD::ArgFlagsTy Flags,
3321                                    unsigned PtrByteSize,
3322                                    unsigned LinkageSize,
3323                                    unsigned ParamAreaSize,
3324                                    unsigned &ArgOffset,
3325                                    unsigned &AvailableFPRs,
3326                                    unsigned &AvailableVRs, bool HasQPX) {
3327   bool UseMemory = false;
3328 
3329   // Respect alignment of argument on the stack.
3330   unsigned Align =
3331     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3332   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3333   // If there's no space left in the argument save area, we must
3334   // use memory (this check also catches zero-sized arguments).
3335   if (ArgOffset >= LinkageSize + ParamAreaSize)
3336     UseMemory = true;
3337 
3338   // Allocate argument on the stack.
3339   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3340   if (Flags.isInConsecutiveRegsLast())
3341     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3342   // If we overran the argument save area, we must use memory
3343   // (this check catches arguments passed partially in memory)
3344   if (ArgOffset > LinkageSize + ParamAreaSize)
3345     UseMemory = true;
3346 
3347   // However, if the argument is actually passed in an FPR or a VR,
3348   // we don't use memory after all.
3349   if (!Flags.isByVal()) {
3350     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3351         // QPX registers overlap with the scalar FP registers.
3352         (HasQPX && (ArgVT == MVT::v4f32 ||
3353                     ArgVT == MVT::v4f64 ||
3354                     ArgVT == MVT::v4i1)))
3355       if (AvailableFPRs > 0) {
3356         --AvailableFPRs;
3357         return false;
3358       }
3359     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3360         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3361         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3362         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3363       if (AvailableVRs > 0) {
3364         --AvailableVRs;
3365         return false;
3366       }
3367   }
3368 
3369   return UseMemory;
3370 }
3371 
3372 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3373 /// ensure minimum alignment required for target.
3374 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3375                                      unsigned NumBytes) {
3376   unsigned TargetAlign = Lowering->getStackAlignment();
3377   unsigned AlignMask = TargetAlign - 1;
3378   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3379   return NumBytes;
3380 }
3381 
3382 SDValue PPCTargetLowering::LowerFormalArguments(
3383     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3384     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3385     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3386   if (Subtarget.isSVR4ABI()) {
3387     if (Subtarget.isPPC64())
3388       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
3389                                          dl, DAG, InVals);
3390     else
3391       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
3392                                          dl, DAG, InVals);
3393   } else {
3394     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
3395                                        dl, DAG, InVals);
3396   }
3397 }
3398 
3399 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3400     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3401     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3402     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3403 
3404   // 32-bit SVR4 ABI Stack Frame Layout:
3405   //              +-----------------------------------+
3406   //        +-->  |            Back chain             |
3407   //        |     +-----------------------------------+
3408   //        |     | Floating-point register save area |
3409   //        |     +-----------------------------------+
3410   //        |     |    General register save area     |
3411   //        |     +-----------------------------------+
3412   //        |     |          CR save word             |
3413   //        |     +-----------------------------------+
3414   //        |     |         VRSAVE save word          |
3415   //        |     +-----------------------------------+
3416   //        |     |         Alignment padding         |
3417   //        |     +-----------------------------------+
3418   //        |     |     Vector register save area     |
3419   //        |     +-----------------------------------+
3420   //        |     |       Local variable space        |
3421   //        |     +-----------------------------------+
3422   //        |     |        Parameter list area        |
3423   //        |     +-----------------------------------+
3424   //        |     |           LR save word            |
3425   //        |     +-----------------------------------+
3426   // SP-->  +---  |            Back chain             |
3427   //              +-----------------------------------+
3428   //
3429   // Specifications:
3430   //   System V Application Binary Interface PowerPC Processor Supplement
3431   //   AltiVec Technology Programming Interface Manual
3432 
3433   MachineFunction &MF = DAG.getMachineFunction();
3434   MachineFrameInfo &MFI = MF.getFrameInfo();
3435   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3436 
3437   EVT PtrVT = getPointerTy(MF.getDataLayout());
3438   // Potential tail calls could cause overwriting of argument stack slots.
3439   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3440                        (CallConv == CallingConv::Fast));
3441   unsigned PtrByteSize = 4;
3442 
3443   // Assign locations to all of the incoming arguments.
3444   SmallVector<CCValAssign, 16> ArgLocs;
3445   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3446                  *DAG.getContext());
3447 
3448   // Reserve space for the linkage area on the stack.
3449   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3450   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3451   if (useSoftFloat())
3452     CCInfo.PreAnalyzeFormalArguments(Ins);
3453 
3454   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3455   CCInfo.clearWasPPCF128();
3456 
3457   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3458     CCValAssign &VA = ArgLocs[i];
3459 
3460     // Arguments stored in registers.
3461     if (VA.isRegLoc()) {
3462       const TargetRegisterClass *RC;
3463       EVT ValVT = VA.getValVT();
3464 
3465       switch (ValVT.getSimpleVT().SimpleTy) {
3466         default:
3467           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3468         case MVT::i1:
3469         case MVT::i32:
3470           RC = &PPC::GPRCRegClass;
3471           break;
3472         case MVT::f32:
3473           if (Subtarget.hasP8Vector())
3474             RC = &PPC::VSSRCRegClass;
3475           else if (Subtarget.hasSPE())
3476             RC = &PPC::SPE4RCRegClass;
3477           else
3478             RC = &PPC::F4RCRegClass;
3479           break;
3480         case MVT::f64:
3481           if (Subtarget.hasVSX())
3482             RC = &PPC::VSFRCRegClass;
3483           else if (Subtarget.hasSPE())
3484             // SPE passes doubles in GPR pairs.
3485             RC = &PPC::GPRCRegClass;
3486           else
3487             RC = &PPC::F8RCRegClass;
3488           break;
3489         case MVT::v16i8:
3490         case MVT::v8i16:
3491         case MVT::v4i32:
3492           RC = &PPC::VRRCRegClass;
3493           break;
3494         case MVT::v4f32:
3495           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3496           break;
3497         case MVT::v2f64:
3498         case MVT::v2i64:
3499           RC = &PPC::VRRCRegClass;
3500           break;
3501         case MVT::v4f64:
3502           RC = &PPC::QFRCRegClass;
3503           break;
3504         case MVT::v4i1:
3505           RC = &PPC::QBRCRegClass;
3506           break;
3507       }
3508 
3509       SDValue ArgValue;
3510       // Transform the arguments stored in physical registers into
3511       // virtual ones.
3512       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3513         assert(i + 1 < e && "No second half of double precision argument");
3514         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3515         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3516         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3517         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3518         if (!Subtarget.isLittleEndian())
3519           std::swap (ArgValueLo, ArgValueHi);
3520         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3521                                ArgValueHi);
3522       } else {
3523         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3524         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3525                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3526         if (ValVT == MVT::i1)
3527           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3528       }
3529 
3530       InVals.push_back(ArgValue);
3531     } else {
3532       // Argument stored in memory.
3533       assert(VA.isMemLoc());
3534 
3535       // Get the extended size of the argument type in stack
3536       unsigned ArgSize = VA.getLocVT().getStoreSize();
3537       // Get the actual size of the argument type
3538       unsigned ObjSize = VA.getValVT().getStoreSize();
3539       unsigned ArgOffset = VA.getLocMemOffset();
3540       // Stack objects in PPC32 are right justified.
3541       ArgOffset += ArgSize - ObjSize;
3542       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3543 
3544       // Create load nodes to retrieve arguments from the stack.
3545       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3546       InVals.push_back(
3547           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3548     }
3549   }
3550 
3551   // Assign locations to all of the incoming aggregate by value arguments.
3552   // Aggregates passed by value are stored in the local variable space of the
3553   // caller's stack frame, right above the parameter list area.
3554   SmallVector<CCValAssign, 16> ByValArgLocs;
3555   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3556                       ByValArgLocs, *DAG.getContext());
3557 
3558   // Reserve stack space for the allocations in CCInfo.
3559   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3560 
3561   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3562 
3563   // Area that is at least reserved in the caller of this function.
3564   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3565   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3566 
3567   // Set the size that is at least reserved in caller of this function.  Tail
3568   // call optimized function's reserved stack space needs to be aligned so that
3569   // taking the difference between two stack areas will result in an aligned
3570   // stack.
3571   MinReservedArea =
3572       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3573   FuncInfo->setMinReservedArea(MinReservedArea);
3574 
3575   SmallVector<SDValue, 8> MemOps;
3576 
3577   // If the function takes variable number of arguments, make a frame index for
3578   // the start of the first vararg value... for expansion of llvm.va_start.
3579   if (isVarArg) {
3580     static const MCPhysReg GPArgRegs[] = {
3581       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3582       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3583     };
3584     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3585 
3586     static const MCPhysReg FPArgRegs[] = {
3587       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3588       PPC::F8
3589     };
3590     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3591 
3592     if (useSoftFloat() || hasSPE())
3593        NumFPArgRegs = 0;
3594 
3595     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3596     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3597 
3598     // Make room for NumGPArgRegs and NumFPArgRegs.
3599     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3600                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3601 
3602     FuncInfo->setVarArgsStackOffset(
3603       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3604                             CCInfo.getNextStackOffset(), true));
3605 
3606     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3607     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3608 
3609     // The fixed integer arguments of a variadic function are stored to the
3610     // VarArgsFrameIndex on the stack so that they may be loaded by
3611     // dereferencing the result of va_next.
3612     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3613       // Get an existing live-in vreg, or add a new one.
3614       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3615       if (!VReg)
3616         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3617 
3618       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3619       SDValue Store =
3620           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3621       MemOps.push_back(Store);
3622       // Increment the address by four for the next argument to store
3623       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3624       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3625     }
3626 
3627     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3628     // is set.
3629     // The double arguments are stored to the VarArgsFrameIndex
3630     // on the stack.
3631     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3632       // Get an existing live-in vreg, or add a new one.
3633       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3634       if (!VReg)
3635         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3636 
3637       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3638       SDValue Store =
3639           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3640       MemOps.push_back(Store);
3641       // Increment the address by eight for the next argument to store
3642       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3643                                          PtrVT);
3644       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3645     }
3646   }
3647 
3648   if (!MemOps.empty())
3649     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3650 
3651   return Chain;
3652 }
3653 
3654 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3655 // value to MVT::i64 and then truncate to the correct register size.
3656 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3657                                              EVT ObjectVT, SelectionDAG &DAG,
3658                                              SDValue ArgVal,
3659                                              const SDLoc &dl) const {
3660   if (Flags.isSExt())
3661     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3662                          DAG.getValueType(ObjectVT));
3663   else if (Flags.isZExt())
3664     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3665                          DAG.getValueType(ObjectVT));
3666 
3667   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3668 }
3669 
3670 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3671     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3672     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3673     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3674   // TODO: add description of PPC stack frame format, or at least some docs.
3675   //
3676   bool isELFv2ABI = Subtarget.isELFv2ABI();
3677   bool isLittleEndian = Subtarget.isLittleEndian();
3678   MachineFunction &MF = DAG.getMachineFunction();
3679   MachineFrameInfo &MFI = MF.getFrameInfo();
3680   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3681 
3682   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3683          "fastcc not supported on varargs functions");
3684 
3685   EVT PtrVT = getPointerTy(MF.getDataLayout());
3686   // Potential tail calls could cause overwriting of argument stack slots.
3687   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3688                        (CallConv == CallingConv::Fast));
3689   unsigned PtrByteSize = 8;
3690   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3691 
3692   static const MCPhysReg GPR[] = {
3693     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3694     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3695   };
3696   static const MCPhysReg VR[] = {
3697     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3698     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3699   };
3700 
3701   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3702   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3703   const unsigned Num_VR_Regs  = array_lengthof(VR);
3704   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3705 
3706   // Do a first pass over the arguments to determine whether the ABI
3707   // guarantees that our caller has allocated the parameter save area
3708   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3709   // in the ELFv2 ABI, it is true if this is a vararg function or if
3710   // any parameter is located in a stack slot.
3711 
3712   bool HasParameterArea = !isELFv2ABI || isVarArg;
3713   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3714   unsigned NumBytes = LinkageSize;
3715   unsigned AvailableFPRs = Num_FPR_Regs;
3716   unsigned AvailableVRs = Num_VR_Regs;
3717   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3718     if (Ins[i].Flags.isNest())
3719       continue;
3720 
3721     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3722                                PtrByteSize, LinkageSize, ParamAreaSize,
3723                                NumBytes, AvailableFPRs, AvailableVRs,
3724                                Subtarget.hasQPX()))
3725       HasParameterArea = true;
3726   }
3727 
3728   // Add DAG nodes to load the arguments or copy them out of registers.  On
3729   // entry to a function on PPC, the arguments start after the linkage area,
3730   // although the first ones are often in registers.
3731 
3732   unsigned ArgOffset = LinkageSize;
3733   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3734   unsigned &QFPR_idx = FPR_idx;
3735   SmallVector<SDValue, 8> MemOps;
3736   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3737   unsigned CurArgIdx = 0;
3738   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3739     SDValue ArgVal;
3740     bool needsLoad = false;
3741     EVT ObjectVT = Ins[ArgNo].VT;
3742     EVT OrigVT = Ins[ArgNo].ArgVT;
3743     unsigned ObjSize = ObjectVT.getStoreSize();
3744     unsigned ArgSize = ObjSize;
3745     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3746     if (Ins[ArgNo].isOrigArg()) {
3747       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3748       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3749     }
3750     // We re-align the argument offset for each argument, except when using the
3751     // fast calling convention, when we need to make sure we do that only when
3752     // we'll actually use a stack slot.
3753     unsigned CurArgOffset, Align;
3754     auto ComputeArgOffset = [&]() {
3755       /* Respect alignment of argument on the stack.  */
3756       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3757       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3758       CurArgOffset = ArgOffset;
3759     };
3760 
3761     if (CallConv != CallingConv::Fast) {
3762       ComputeArgOffset();
3763 
3764       /* Compute GPR index associated with argument offset.  */
3765       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3766       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3767     }
3768 
3769     // FIXME the codegen can be much improved in some cases.
3770     // We do not have to keep everything in memory.
3771     if (Flags.isByVal()) {
3772       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3773 
3774       if (CallConv == CallingConv::Fast)
3775         ComputeArgOffset();
3776 
3777       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3778       ObjSize = Flags.getByValSize();
3779       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3780       // Empty aggregate parameters do not take up registers.  Examples:
3781       //   struct { } a;
3782       //   union  { } b;
3783       //   int c[0];
3784       // etc.  However, we have to provide a place-holder in InVals, so
3785       // pretend we have an 8-byte item at the current address for that
3786       // purpose.
3787       if (!ObjSize) {
3788         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3789         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3790         InVals.push_back(FIN);
3791         continue;
3792       }
3793 
3794       // Create a stack object covering all stack doublewords occupied
3795       // by the argument.  If the argument is (fully or partially) on
3796       // the stack, or if the argument is fully in registers but the
3797       // caller has allocated the parameter save anyway, we can refer
3798       // directly to the caller's stack frame.  Otherwise, create a
3799       // local copy in our own frame.
3800       int FI;
3801       if (HasParameterArea ||
3802           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3803         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3804       else
3805         FI = MFI.CreateStackObject(ArgSize, Align, false);
3806       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3807 
3808       // Handle aggregates smaller than 8 bytes.
3809       if (ObjSize < PtrByteSize) {
3810         // The value of the object is its address, which differs from the
3811         // address of the enclosing doubleword on big-endian systems.
3812         SDValue Arg = FIN;
3813         if (!isLittleEndian) {
3814           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3815           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3816         }
3817         InVals.push_back(Arg);
3818 
3819         if (GPR_idx != Num_GPR_Regs) {
3820           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3821           FuncInfo->addLiveInAttr(VReg, Flags);
3822           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3823           SDValue Store;
3824 
3825           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3826             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3827                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3828             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3829                                       MachinePointerInfo(&*FuncArg), ObjType);
3830           } else {
3831             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3832             // store the whole register as-is to the parameter save area
3833             // slot.
3834             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3835                                  MachinePointerInfo(&*FuncArg));
3836           }
3837 
3838           MemOps.push_back(Store);
3839         }
3840         // Whether we copied from a register or not, advance the offset
3841         // into the parameter save area by a full doubleword.
3842         ArgOffset += PtrByteSize;
3843         continue;
3844       }
3845 
3846       // The value of the object is its address, which is the address of
3847       // its first stack doubleword.
3848       InVals.push_back(FIN);
3849 
3850       // Store whatever pieces of the object are in registers to memory.
3851       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3852         if (GPR_idx == Num_GPR_Regs)
3853           break;
3854 
3855         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3856         FuncInfo->addLiveInAttr(VReg, Flags);
3857         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3858         SDValue Addr = FIN;
3859         if (j) {
3860           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3861           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3862         }
3863         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3864                                      MachinePointerInfo(&*FuncArg, j));
3865         MemOps.push_back(Store);
3866         ++GPR_idx;
3867       }
3868       ArgOffset += ArgSize;
3869       continue;
3870     }
3871 
3872     switch (ObjectVT.getSimpleVT().SimpleTy) {
3873     default: llvm_unreachable("Unhandled argument type!");
3874     case MVT::i1:
3875     case MVT::i32:
3876     case MVT::i64:
3877       if (Flags.isNest()) {
3878         // The 'nest' parameter, if any, is passed in R11.
3879         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3880         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3881 
3882         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3883           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3884 
3885         break;
3886       }
3887 
3888       // These can be scalar arguments or elements of an integer array type
3889       // passed directly.  Clang may use those instead of "byval" aggregate
3890       // types to avoid forcing arguments to memory unnecessarily.
3891       if (GPR_idx != Num_GPR_Regs) {
3892         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3893         FuncInfo->addLiveInAttr(VReg, Flags);
3894         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3895 
3896         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3897           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3898           // value to MVT::i64 and then truncate to the correct register size.
3899           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3900       } else {
3901         if (CallConv == CallingConv::Fast)
3902           ComputeArgOffset();
3903 
3904         needsLoad = true;
3905         ArgSize = PtrByteSize;
3906       }
3907       if (CallConv != CallingConv::Fast || needsLoad)
3908         ArgOffset += 8;
3909       break;
3910 
3911     case MVT::f32:
3912     case MVT::f64:
3913       // These can be scalar arguments or elements of a float array type
3914       // passed directly.  The latter are used to implement ELFv2 homogenous
3915       // float aggregates.
3916       if (FPR_idx != Num_FPR_Regs) {
3917         unsigned VReg;
3918 
3919         if (ObjectVT == MVT::f32)
3920           VReg = MF.addLiveIn(FPR[FPR_idx],
3921                               Subtarget.hasP8Vector()
3922                                   ? &PPC::VSSRCRegClass
3923                                   : &PPC::F4RCRegClass);
3924         else
3925           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3926                                                 ? &PPC::VSFRCRegClass
3927                                                 : &PPC::F8RCRegClass);
3928 
3929         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3930         ++FPR_idx;
3931       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
3932         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
3933         // once we support fp <-> gpr moves.
3934 
3935         // This can only ever happen in the presence of f32 array types,
3936         // since otherwise we never run out of FPRs before running out
3937         // of GPRs.
3938         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3939         FuncInfo->addLiveInAttr(VReg, Flags);
3940         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3941 
3942         if (ObjectVT == MVT::f32) {
3943           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3944             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
3945                                  DAG.getConstant(32, dl, MVT::i32));
3946           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
3947         }
3948 
3949         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
3950       } else {
3951         if (CallConv == CallingConv::Fast)
3952           ComputeArgOffset();
3953 
3954         needsLoad = true;
3955       }
3956 
3957       // When passing an array of floats, the array occupies consecutive
3958       // space in the argument area; only round up to the next doubleword
3959       // at the end of the array.  Otherwise, each float takes 8 bytes.
3960       if (CallConv != CallingConv::Fast || needsLoad) {
3961         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
3962         ArgOffset += ArgSize;
3963         if (Flags.isInConsecutiveRegsLast())
3964           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3965       }
3966       break;
3967     case MVT::v4f32:
3968     case MVT::v4i32:
3969     case MVT::v8i16:
3970     case MVT::v16i8:
3971     case MVT::v2f64:
3972     case MVT::v2i64:
3973     case MVT::v1i128:
3974     case MVT::f128:
3975       if (!Subtarget.hasQPX()) {
3976         // These can be scalar arguments or elements of a vector array type
3977         // passed directly.  The latter are used to implement ELFv2 homogenous
3978         // vector aggregates.
3979         if (VR_idx != Num_VR_Regs) {
3980           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3981           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3982           ++VR_idx;
3983         } else {
3984           if (CallConv == CallingConv::Fast)
3985             ComputeArgOffset();
3986           needsLoad = true;
3987         }
3988         if (CallConv != CallingConv::Fast || needsLoad)
3989           ArgOffset += 16;
3990         break;
3991       } // not QPX
3992 
3993       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
3994              "Invalid QPX parameter type");
3995       LLVM_FALLTHROUGH;
3996 
3997     case MVT::v4f64:
3998     case MVT::v4i1:
3999       // QPX vectors are treated like their scalar floating-point subregisters
4000       // (except that they're larger).
4001       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4002       if (QFPR_idx != Num_QFPR_Regs) {
4003         const TargetRegisterClass *RC;
4004         switch (ObjectVT.getSimpleVT().SimpleTy) {
4005         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4006         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4007         default:         RC = &PPC::QBRCRegClass; break;
4008         }
4009 
4010         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4011         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4012         ++QFPR_idx;
4013       } else {
4014         if (CallConv == CallingConv::Fast)
4015           ComputeArgOffset();
4016         needsLoad = true;
4017       }
4018       if (CallConv != CallingConv::Fast || needsLoad)
4019         ArgOffset += Sz;
4020       break;
4021     }
4022 
4023     // We need to load the argument to a virtual register if we determined
4024     // above that we ran out of physical registers of the appropriate type.
4025     if (needsLoad) {
4026       if (ObjSize < ArgSize && !isLittleEndian)
4027         CurArgOffset += ArgSize - ObjSize;
4028       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4029       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4030       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4031     }
4032 
4033     InVals.push_back(ArgVal);
4034   }
4035 
4036   // Area that is at least reserved in the caller of this function.
4037   unsigned MinReservedArea;
4038   if (HasParameterArea)
4039     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4040   else
4041     MinReservedArea = LinkageSize;
4042 
4043   // Set the size that is at least reserved in caller of this function.  Tail
4044   // call optimized functions' reserved stack space needs to be aligned so that
4045   // taking the difference between two stack areas will result in an aligned
4046   // stack.
4047   MinReservedArea =
4048       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4049   FuncInfo->setMinReservedArea(MinReservedArea);
4050 
4051   // If the function takes variable number of arguments, make a frame index for
4052   // the start of the first vararg value... for expansion of llvm.va_start.
4053   if (isVarArg) {
4054     int Depth = ArgOffset;
4055 
4056     FuncInfo->setVarArgsFrameIndex(
4057       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4058     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4059 
4060     // If this function is vararg, store any remaining integer argument regs
4061     // to their spots on the stack so that they may be loaded by dereferencing
4062     // the result of va_next.
4063     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4064          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4065       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4066       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4067       SDValue Store =
4068           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4069       MemOps.push_back(Store);
4070       // Increment the address by four for the next argument to store
4071       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4072       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4073     }
4074   }
4075 
4076   if (!MemOps.empty())
4077     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4078 
4079   return Chain;
4080 }
4081 
4082 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4083     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4084     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4085     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4086   // TODO: add description of PPC stack frame format, or at least some docs.
4087   //
4088   MachineFunction &MF = DAG.getMachineFunction();
4089   MachineFrameInfo &MFI = MF.getFrameInfo();
4090   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4091 
4092   EVT PtrVT = getPointerTy(MF.getDataLayout());
4093   bool isPPC64 = PtrVT == MVT::i64;
4094   // Potential tail calls could cause overwriting of argument stack slots.
4095   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4096                        (CallConv == CallingConv::Fast));
4097   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4098   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4099   unsigned ArgOffset = LinkageSize;
4100   // Area that is at least reserved in caller of this function.
4101   unsigned MinReservedArea = ArgOffset;
4102 
4103   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4104     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4105     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4106   };
4107   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4108     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4109     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4110   };
4111   static const MCPhysReg VR[] = {
4112     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4113     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4114   };
4115 
4116   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4117   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4118   const unsigned Num_VR_Regs  = array_lengthof( VR);
4119 
4120   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4121 
4122   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4123 
4124   // In 32-bit non-varargs functions, the stack space for vectors is after the
4125   // stack space for non-vectors.  We do not use this space unless we have
4126   // too many vectors to fit in registers, something that only occurs in
4127   // constructed examples:), but we have to walk the arglist to figure
4128   // that out...for the pathological case, compute VecArgOffset as the
4129   // start of the vector parameter area.  Computing VecArgOffset is the
4130   // entire point of the following loop.
4131   unsigned VecArgOffset = ArgOffset;
4132   if (!isVarArg && !isPPC64) {
4133     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4134          ++ArgNo) {
4135       EVT ObjectVT = Ins[ArgNo].VT;
4136       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4137 
4138       if (Flags.isByVal()) {
4139         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4140         unsigned ObjSize = Flags.getByValSize();
4141         unsigned ArgSize =
4142                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4143         VecArgOffset += ArgSize;
4144         continue;
4145       }
4146 
4147       switch(ObjectVT.getSimpleVT().SimpleTy) {
4148       default: llvm_unreachable("Unhandled argument type!");
4149       case MVT::i1:
4150       case MVT::i32:
4151       case MVT::f32:
4152         VecArgOffset += 4;
4153         break;
4154       case MVT::i64:  // PPC64
4155       case MVT::f64:
4156         // FIXME: We are guaranteed to be !isPPC64 at this point.
4157         // Does MVT::i64 apply?
4158         VecArgOffset += 8;
4159         break;
4160       case MVT::v4f32:
4161       case MVT::v4i32:
4162       case MVT::v8i16:
4163       case MVT::v16i8:
4164         // Nothing to do, we're only looking at Nonvector args here.
4165         break;
4166       }
4167     }
4168   }
4169   // We've found where the vector parameter area in memory is.  Skip the
4170   // first 12 parameters; these don't use that memory.
4171   VecArgOffset = ((VecArgOffset+15)/16)*16;
4172   VecArgOffset += 12*16;
4173 
4174   // Add DAG nodes to load the arguments or copy them out of registers.  On
4175   // entry to a function on PPC, the arguments start after the linkage area,
4176   // although the first ones are often in registers.
4177 
4178   SmallVector<SDValue, 8> MemOps;
4179   unsigned nAltivecParamsAtEnd = 0;
4180   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4181   unsigned CurArgIdx = 0;
4182   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4183     SDValue ArgVal;
4184     bool needsLoad = false;
4185     EVT ObjectVT = Ins[ArgNo].VT;
4186     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4187     unsigned ArgSize = ObjSize;
4188     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4189     if (Ins[ArgNo].isOrigArg()) {
4190       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4191       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4192     }
4193     unsigned CurArgOffset = ArgOffset;
4194 
4195     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4196     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4197         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4198       if (isVarArg || isPPC64) {
4199         MinReservedArea = ((MinReservedArea+15)/16)*16;
4200         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4201                                                   Flags,
4202                                                   PtrByteSize);
4203       } else  nAltivecParamsAtEnd++;
4204     } else
4205       // Calculate min reserved area.
4206       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4207                                                 Flags,
4208                                                 PtrByteSize);
4209 
4210     // FIXME the codegen can be much improved in some cases.
4211     // We do not have to keep everything in memory.
4212     if (Flags.isByVal()) {
4213       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4214 
4215       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4216       ObjSize = Flags.getByValSize();
4217       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4218       // Objects of size 1 and 2 are right justified, everything else is
4219       // left justified.  This means the memory address is adjusted forwards.
4220       if (ObjSize==1 || ObjSize==2) {
4221         CurArgOffset = CurArgOffset + (4 - ObjSize);
4222       }
4223       // The value of the object is its address.
4224       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4225       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4226       InVals.push_back(FIN);
4227       if (ObjSize==1 || ObjSize==2) {
4228         if (GPR_idx != Num_GPR_Regs) {
4229           unsigned VReg;
4230           if (isPPC64)
4231             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4232           else
4233             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4234           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4235           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4236           SDValue Store =
4237               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4238                                 MachinePointerInfo(&*FuncArg), ObjType);
4239           MemOps.push_back(Store);
4240           ++GPR_idx;
4241         }
4242 
4243         ArgOffset += PtrByteSize;
4244 
4245         continue;
4246       }
4247       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4248         // Store whatever pieces of the object are in registers
4249         // to memory.  ArgOffset will be the address of the beginning
4250         // of the object.
4251         if (GPR_idx != Num_GPR_Regs) {
4252           unsigned VReg;
4253           if (isPPC64)
4254             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4255           else
4256             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4257           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4258           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4259           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4260           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4261                                        MachinePointerInfo(&*FuncArg, j));
4262           MemOps.push_back(Store);
4263           ++GPR_idx;
4264           ArgOffset += PtrByteSize;
4265         } else {
4266           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4267           break;
4268         }
4269       }
4270       continue;
4271     }
4272 
4273     switch (ObjectVT.getSimpleVT().SimpleTy) {
4274     default: llvm_unreachable("Unhandled argument type!");
4275     case MVT::i1:
4276     case MVT::i32:
4277       if (!isPPC64) {
4278         if (GPR_idx != Num_GPR_Regs) {
4279           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4280           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4281 
4282           if (ObjectVT == MVT::i1)
4283             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4284 
4285           ++GPR_idx;
4286         } else {
4287           needsLoad = true;
4288           ArgSize = PtrByteSize;
4289         }
4290         // All int arguments reserve stack space in the Darwin ABI.
4291         ArgOffset += PtrByteSize;
4292         break;
4293       }
4294       LLVM_FALLTHROUGH;
4295     case MVT::i64:  // PPC64
4296       if (GPR_idx != Num_GPR_Regs) {
4297         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4298         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4299 
4300         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4301           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4302           // value to MVT::i64 and then truncate to the correct register size.
4303           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4304 
4305         ++GPR_idx;
4306       } else {
4307         needsLoad = true;
4308         ArgSize = PtrByteSize;
4309       }
4310       // All int arguments reserve stack space in the Darwin ABI.
4311       ArgOffset += 8;
4312       break;
4313 
4314     case MVT::f32:
4315     case MVT::f64:
4316       // Every 4 bytes of argument space consumes one of the GPRs available for
4317       // argument passing.
4318       if (GPR_idx != Num_GPR_Regs) {
4319         ++GPR_idx;
4320         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4321           ++GPR_idx;
4322       }
4323       if (FPR_idx != Num_FPR_Regs) {
4324         unsigned VReg;
4325 
4326         if (ObjectVT == MVT::f32)
4327           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4328         else
4329           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4330 
4331         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4332         ++FPR_idx;
4333       } else {
4334         needsLoad = true;
4335       }
4336 
4337       // All FP arguments reserve stack space in the Darwin ABI.
4338       ArgOffset += isPPC64 ? 8 : ObjSize;
4339       break;
4340     case MVT::v4f32:
4341     case MVT::v4i32:
4342     case MVT::v8i16:
4343     case MVT::v16i8:
4344       // Note that vector arguments in registers don't reserve stack space,
4345       // except in varargs functions.
4346       if (VR_idx != Num_VR_Regs) {
4347         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4348         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4349         if (isVarArg) {
4350           while ((ArgOffset % 16) != 0) {
4351             ArgOffset += PtrByteSize;
4352             if (GPR_idx != Num_GPR_Regs)
4353               GPR_idx++;
4354           }
4355           ArgOffset += 16;
4356           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4357         }
4358         ++VR_idx;
4359       } else {
4360         if (!isVarArg && !isPPC64) {
4361           // Vectors go after all the nonvectors.
4362           CurArgOffset = VecArgOffset;
4363           VecArgOffset += 16;
4364         } else {
4365           // Vectors are aligned.
4366           ArgOffset = ((ArgOffset+15)/16)*16;
4367           CurArgOffset = ArgOffset;
4368           ArgOffset += 16;
4369         }
4370         needsLoad = true;
4371       }
4372       break;
4373     }
4374 
4375     // We need to load the argument to a virtual register if we determined above
4376     // that we ran out of physical registers of the appropriate type.
4377     if (needsLoad) {
4378       int FI = MFI.CreateFixedObject(ObjSize,
4379                                      CurArgOffset + (ArgSize - ObjSize),
4380                                      isImmutable);
4381       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4382       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4383     }
4384 
4385     InVals.push_back(ArgVal);
4386   }
4387 
4388   // Allow for Altivec parameters at the end, if needed.
4389   if (nAltivecParamsAtEnd) {
4390     MinReservedArea = ((MinReservedArea+15)/16)*16;
4391     MinReservedArea += 16*nAltivecParamsAtEnd;
4392   }
4393 
4394   // Area that is at least reserved in the caller of this function.
4395   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4396 
4397   // Set the size that is at least reserved in caller of this function.  Tail
4398   // call optimized functions' reserved stack space needs to be aligned so that
4399   // taking the difference between two stack areas will result in an aligned
4400   // stack.
4401   MinReservedArea =
4402       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4403   FuncInfo->setMinReservedArea(MinReservedArea);
4404 
4405   // If the function takes variable number of arguments, make a frame index for
4406   // the start of the first vararg value... for expansion of llvm.va_start.
4407   if (isVarArg) {
4408     int Depth = ArgOffset;
4409 
4410     FuncInfo->setVarArgsFrameIndex(
4411       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4412                             Depth, true));
4413     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4414 
4415     // If this function is vararg, store any remaining integer argument regs
4416     // to their spots on the stack so that they may be loaded by dereferencing
4417     // the result of va_next.
4418     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4419       unsigned VReg;
4420 
4421       if (isPPC64)
4422         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4423       else
4424         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4425 
4426       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4427       SDValue Store =
4428           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4429       MemOps.push_back(Store);
4430       // Increment the address by four for the next argument to store
4431       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4432       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4433     }
4434   }
4435 
4436   if (!MemOps.empty())
4437     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4438 
4439   return Chain;
4440 }
4441 
4442 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4443 /// adjusted to accommodate the arguments for the tailcall.
4444 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4445                                    unsigned ParamSize) {
4446 
4447   if (!isTailCall) return 0;
4448 
4449   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4450   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4451   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4452   // Remember only if the new adjustment is bigger.
4453   if (SPDiff < FI->getTailCallSPDelta())
4454     FI->setTailCallSPDelta(SPDiff);
4455 
4456   return SPDiff;
4457 }
4458 
4459 static bool isFunctionGlobalAddress(SDValue Callee);
4460 
4461 static bool
4462 callsShareTOCBase(const Function *Caller, SDValue Callee,
4463                     const TargetMachine &TM) {
4464    // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4465    // don't have enough information to determine if the caller and calle share
4466    // the same  TOC base, so we have to pessimistically assume they don't for
4467    // correctness.
4468    GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4469    if (!G)
4470      return false;
4471 
4472    const GlobalValue *GV = G->getGlobal();
4473   // The medium and large code models are expected to provide a sufficiently
4474   // large TOC to provide all data addressing needs of a module with a
4475   // single TOC. Since each module will be addressed with a single TOC then we
4476   // only need to check that caller and callee don't cross dso boundaries.
4477   if (CodeModel::Medium == TM.getCodeModel() ||
4478       CodeModel::Large == TM.getCodeModel())
4479     return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV);
4480 
4481   // Otherwise we need to ensure callee and caller are in the same section,
4482   // since the linker may allocate multiple TOCs, and we don't know which
4483   // sections will belong to the same TOC base.
4484 
4485   if (!GV->isStrongDefinitionForLinker())
4486     return false;
4487 
4488   // Any explicitly-specified sections and section prefixes must also match.
4489   // Also, if we're using -ffunction-sections, then each function is always in
4490   // a different section (the same is true for COMDAT functions).
4491   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4492       GV->getSection() != Caller->getSection())
4493     return false;
4494   if (const auto *F = dyn_cast<Function>(GV)) {
4495     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4496       return false;
4497   }
4498 
4499   // If the callee might be interposed, then we can't assume the ultimate call
4500   // target will be in the same section. Even in cases where we can assume that
4501   // interposition won't happen, in any case where the linker might insert a
4502   // stub to allow for interposition, we must generate code as though
4503   // interposition might occur. To understand why this matters, consider a
4504   // situation where: a -> b -> c where the arrows indicate calls. b and c are
4505   // in the same section, but a is in a different module (i.e. has a different
4506   // TOC base pointer). If the linker allows for interposition between b and c,
4507   // then it will generate a stub for the call edge between b and c which will
4508   // save the TOC pointer into the designated stack slot allocated by b. If we
4509   // return true here, and therefore allow a tail call between b and c, that
4510   // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4511   // pointer into the stack slot allocated by a (where the a -> b stub saved
4512   // a's TOC base pointer). If we're not considering a tail call, but rather,
4513   // whether a nop is needed after the call instruction in b, because the linker
4514   // will insert a stub, it might complain about a missing nop if we omit it
4515   // (although many don't complain in this case).
4516   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4517     return false;
4518 
4519   return true;
4520 }
4521 
4522 static bool
4523 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4524                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4525   assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
4526 
4527   const unsigned PtrByteSize = 8;
4528   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4529 
4530   static const MCPhysReg GPR[] = {
4531     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4532     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4533   };
4534   static const MCPhysReg VR[] = {
4535     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4536     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4537   };
4538 
4539   const unsigned NumGPRs = array_lengthof(GPR);
4540   const unsigned NumFPRs = 13;
4541   const unsigned NumVRs = array_lengthof(VR);
4542   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4543 
4544   unsigned NumBytes = LinkageSize;
4545   unsigned AvailableFPRs = NumFPRs;
4546   unsigned AvailableVRs = NumVRs;
4547 
4548   for (const ISD::OutputArg& Param : Outs) {
4549     if (Param.Flags.isNest()) continue;
4550 
4551     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4552                                PtrByteSize, LinkageSize, ParamAreaSize,
4553                                NumBytes, AvailableFPRs, AvailableVRs,
4554                                Subtarget.hasQPX()))
4555       return true;
4556   }
4557   return false;
4558 }
4559 
4560 static bool
4561 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4562   if (CS.arg_size() != CallerFn->arg_size())
4563     return false;
4564 
4565   ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4566   ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4567   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4568 
4569   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4570     const Value* CalleeArg = *CalleeArgIter;
4571     const Value* CallerArg = &(*CallerArgIter);
4572     if (CalleeArg == CallerArg)
4573       continue;
4574 
4575     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4576     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4577     //      }
4578     // 1st argument of callee is undef and has the same type as caller.
4579     if (CalleeArg->getType() == CallerArg->getType() &&
4580         isa<UndefValue>(CalleeArg))
4581       continue;
4582 
4583     return false;
4584   }
4585 
4586   return true;
4587 }
4588 
4589 // Returns true if TCO is possible between the callers and callees
4590 // calling conventions.
4591 static bool
4592 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4593                                     CallingConv::ID CalleeCC) {
4594   // Tail calls are possible with fastcc and ccc.
4595   auto isTailCallableCC  = [] (CallingConv::ID CC){
4596       return  CC == CallingConv::C || CC == CallingConv::Fast;
4597   };
4598   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4599     return false;
4600 
4601   // We can safely tail call both fastcc and ccc callees from a c calling
4602   // convention caller. If the caller is fastcc, we may have less stack space
4603   // than a non-fastcc caller with the same signature so disable tail-calls in
4604   // that case.
4605   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4606 }
4607 
4608 bool
4609 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4610                                     SDValue Callee,
4611                                     CallingConv::ID CalleeCC,
4612                                     ImmutableCallSite CS,
4613                                     bool isVarArg,
4614                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4615                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4616                                     SelectionDAG& DAG) const {
4617   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4618 
4619   if (DisableSCO && !TailCallOpt) return false;
4620 
4621   // Variadic argument functions are not supported.
4622   if (isVarArg) return false;
4623 
4624   auto &Caller = DAG.getMachineFunction().getFunction();
4625   // Check that the calling conventions are compatible for tco.
4626   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4627     return false;
4628 
4629   // Caller contains any byval parameter is not supported.
4630   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4631     return false;
4632 
4633   // Callee contains any byval parameter is not supported, too.
4634   // Note: This is a quick work around, because in some cases, e.g.
4635   // caller's stack size > callee's stack size, we are still able to apply
4636   // sibling call optimization. For example, gcc is able to do SCO for caller1
4637   // in the following example, but not for caller2.
4638   //   struct test {
4639   //     long int a;
4640   //     char ary[56];
4641   //   } gTest;
4642   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4643   //     b->a = v.a;
4644   //     return 0;
4645   //   }
4646   //   void caller1(struct test a, struct test c, struct test *b) {
4647   //     callee(gTest, b); }
4648   //   void caller2(struct test *b) { callee(gTest, b); }
4649   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4650     return false;
4651 
4652   // If callee and caller use different calling conventions, we cannot pass
4653   // parameters on stack since offsets for the parameter area may be different.
4654   if (Caller.getCallingConv() != CalleeCC &&
4655       needStackSlotPassParameters(Subtarget, Outs))
4656     return false;
4657 
4658   // No TCO/SCO on indirect call because Caller have to restore its TOC
4659   if (!isFunctionGlobalAddress(Callee) &&
4660       !isa<ExternalSymbolSDNode>(Callee))
4661     return false;
4662 
4663   // If the caller and callee potentially have different TOC bases then we
4664   // cannot tail call since we need to restore the TOC pointer after the call.
4665   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4666   if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4667     return false;
4668 
4669   // TCO allows altering callee ABI, so we don't have to check further.
4670   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4671     return true;
4672 
4673   if (DisableSCO) return false;
4674 
4675   // If callee use the same argument list that caller is using, then we can
4676   // apply SCO on this case. If it is not, then we need to check if callee needs
4677   // stack for passing arguments.
4678   if (!hasSameArgumentList(&Caller, CS) &&
4679       needStackSlotPassParameters(Subtarget, Outs)) {
4680     return false;
4681   }
4682 
4683   return true;
4684 }
4685 
4686 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4687 /// for tail call optimization. Targets which want to do tail call
4688 /// optimization should implement this function.
4689 bool
4690 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4691                                                      CallingConv::ID CalleeCC,
4692                                                      bool isVarArg,
4693                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4694                                                      SelectionDAG& DAG) const {
4695   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4696     return false;
4697 
4698   // Variable argument functions are not supported.
4699   if (isVarArg)
4700     return false;
4701 
4702   MachineFunction &MF = DAG.getMachineFunction();
4703   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4704   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4705     // Functions containing by val parameters are not supported.
4706     for (unsigned i = 0; i != Ins.size(); i++) {
4707        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4708        if (Flags.isByVal()) return false;
4709     }
4710 
4711     // Non-PIC/GOT tail calls are supported.
4712     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4713       return true;
4714 
4715     // At the moment we can only do local tail calls (in same module, hidden
4716     // or protected) if we are generating PIC.
4717     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4718       return G->getGlobal()->hasHiddenVisibility()
4719           || G->getGlobal()->hasProtectedVisibility();
4720   }
4721 
4722   return false;
4723 }
4724 
4725 /// isCallCompatibleAddress - Return the immediate to use if the specified
4726 /// 32-bit value is representable in the immediate field of a BxA instruction.
4727 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4728   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4729   if (!C) return nullptr;
4730 
4731   int Addr = C->getZExtValue();
4732   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4733       SignExtend32<26>(Addr) != Addr)
4734     return nullptr;  // Top 6 bits have to be sext of immediate.
4735 
4736   return DAG
4737       .getConstant(
4738           (int)C->getZExtValue() >> 2, SDLoc(Op),
4739           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4740       .getNode();
4741 }
4742 
4743 namespace {
4744 
4745 struct TailCallArgumentInfo {
4746   SDValue Arg;
4747   SDValue FrameIdxOp;
4748   int FrameIdx = 0;
4749 
4750   TailCallArgumentInfo() = default;
4751 };
4752 
4753 } // end anonymous namespace
4754 
4755 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4756 static void StoreTailCallArgumentsToStackSlot(
4757     SelectionDAG &DAG, SDValue Chain,
4758     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4759     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4760   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4761     SDValue Arg = TailCallArgs[i].Arg;
4762     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4763     int FI = TailCallArgs[i].FrameIdx;
4764     // Store relative to framepointer.
4765     MemOpChains.push_back(DAG.getStore(
4766         Chain, dl, Arg, FIN,
4767         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4768   }
4769 }
4770 
4771 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4772 /// the appropriate stack slot for the tail call optimized function call.
4773 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4774                                              SDValue OldRetAddr, SDValue OldFP,
4775                                              int SPDiff, const SDLoc &dl) {
4776   if (SPDiff) {
4777     // Calculate the new stack slot for the return address.
4778     MachineFunction &MF = DAG.getMachineFunction();
4779     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4780     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4781     bool isPPC64 = Subtarget.isPPC64();
4782     int SlotSize = isPPC64 ? 8 : 4;
4783     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4784     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4785                                                          NewRetAddrLoc, true);
4786     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4787     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4788     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4789                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4790 
4791     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4792     // slot as the FP is never overwritten.
4793     if (Subtarget.isDarwinABI()) {
4794       int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4795       int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4796                                                          true);
4797       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4798       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4799                            MachinePointerInfo::getFixedStack(
4800                                DAG.getMachineFunction(), NewFPIdx));
4801     }
4802   }
4803   return Chain;
4804 }
4805 
4806 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4807 /// the position of the argument.
4808 static void
4809 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4810                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4811                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4812   int Offset = ArgOffset + SPDiff;
4813   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4814   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4815   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4816   SDValue FIN = DAG.getFrameIndex(FI, VT);
4817   TailCallArgumentInfo Info;
4818   Info.Arg = Arg;
4819   Info.FrameIdxOp = FIN;
4820   Info.FrameIdx = FI;
4821   TailCallArguments.push_back(Info);
4822 }
4823 
4824 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4825 /// stack slot. Returns the chain as result and the loaded frame pointers in
4826 /// LROpOut/FPOpout. Used when tail calling.
4827 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4828     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4829     SDValue &FPOpOut, const SDLoc &dl) const {
4830   if (SPDiff) {
4831     // Load the LR and FP stack slot for later adjusting.
4832     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4833     LROpOut = getReturnAddrFrameIndex(DAG);
4834     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4835     Chain = SDValue(LROpOut.getNode(), 1);
4836 
4837     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4838     // slot as the FP is never overwritten.
4839     if (Subtarget.isDarwinABI()) {
4840       FPOpOut = getFramePointerFrameIndex(DAG);
4841       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4842       Chain = SDValue(FPOpOut.getNode(), 1);
4843     }
4844   }
4845   return Chain;
4846 }
4847 
4848 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4849 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4850 /// specified by the specific parameter attribute. The copy will be passed as
4851 /// a byval function parameter.
4852 /// Sometimes what we are copying is the end of a larger object, the part that
4853 /// does not fit in registers.
4854 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4855                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4856                                          SelectionDAG &DAG, const SDLoc &dl) {
4857   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4858   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4859                        false, false, false, MachinePointerInfo(),
4860                        MachinePointerInfo());
4861 }
4862 
4863 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4864 /// tail calls.
4865 static void LowerMemOpCallTo(
4866     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4867     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4868     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4869     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4870   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4871   if (!isTailCall) {
4872     if (isVector) {
4873       SDValue StackPtr;
4874       if (isPPC64)
4875         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4876       else
4877         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4878       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4879                            DAG.getConstant(ArgOffset, dl, PtrVT));
4880     }
4881     MemOpChains.push_back(
4882         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4883     // Calculate and remember argument location.
4884   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4885                                   TailCallArguments);
4886 }
4887 
4888 static void
4889 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4890                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4891                 SDValue FPOp,
4892                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4893   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4894   // might overwrite each other in case of tail call optimization.
4895   SmallVector<SDValue, 8> MemOpChains2;
4896   // Do not flag preceding copytoreg stuff together with the following stuff.
4897   InFlag = SDValue();
4898   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4899                                     MemOpChains2, dl);
4900   if (!MemOpChains2.empty())
4901     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4902 
4903   // Store the return address to the appropriate stack slot.
4904   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4905 
4906   // Emit callseq_end just before tailcall node.
4907   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4908                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4909   InFlag = Chain.getValue(1);
4910 }
4911 
4912 // Is this global address that of a function that can be called by name? (as
4913 // opposed to something that must hold a descriptor for an indirect call).
4914 static bool isFunctionGlobalAddress(SDValue Callee) {
4915   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4916     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4917         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4918       return false;
4919 
4920     return G->getGlobal()->getValueType()->isFunctionTy();
4921   }
4922 
4923   return false;
4924 }
4925 
4926 static unsigned
4927 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
4928             SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
4929             bool isPatchPoint, bool hasNest,
4930             SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
4931             SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
4932             ImmutableCallSite CS, const PPCSubtarget &Subtarget) {
4933   bool isPPC64 = Subtarget.isPPC64();
4934   bool isSVR4ABI = Subtarget.isSVR4ABI();
4935   bool isELFv2ABI = Subtarget.isELFv2ABI();
4936   bool isAIXABI = Subtarget.isAIXABI();
4937 
4938   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4939   NodeTys.push_back(MVT::Other);   // Returns a chain
4940   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
4941 
4942   unsigned CallOpc = PPCISD::CALL;
4943 
4944   bool needIndirectCall = true;
4945   if (!isSVR4ABI || !isPPC64)
4946     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
4947       // If this is an absolute destination address, use the munged value.
4948       Callee = SDValue(Dest, 0);
4949       needIndirectCall = false;
4950     }
4951 
4952   // PC-relative references to external symbols should go through $stub, unless
4953   // we're building with the leopard linker or later, which automatically
4954   // synthesizes these stubs.
4955   const TargetMachine &TM = DAG.getTarget();
4956   const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
4957   const GlobalValue *GV = nullptr;
4958   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
4959     GV = G->getGlobal();
4960   bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
4961   bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
4962 
4963   // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
4964   // every direct call is) turn it into a TargetGlobalAddress /
4965   // TargetExternalSymbol node so that legalize doesn't hack it.
4966   if (isFunctionGlobalAddress(Callee)) {
4967     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
4968 
4969     // A call to a TLS address is actually an indirect call to a
4970     // thread-specific pointer.
4971     unsigned OpFlags = 0;
4972     if (UsePlt)
4973       OpFlags = PPCII::MO_PLT;
4974 
4975     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
4976                                         Callee.getValueType(), 0, OpFlags);
4977     needIndirectCall = false;
4978   }
4979 
4980   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
4981     unsigned char OpFlags = 0;
4982 
4983     if (UsePlt)
4984       OpFlags = PPCII::MO_PLT;
4985 
4986     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
4987                                          OpFlags);
4988     needIndirectCall = false;
4989   }
4990 
4991   if (isPatchPoint) {
4992     // We'll form an invalid direct call when lowering a patchpoint; the full
4993     // sequence for an indirect call is complicated, and many of the
4994     // instructions introduced might have side effects (and, thus, can't be
4995     // removed later). The call itself will be removed as soon as the
4996     // argument/return lowering is complete, so the fact that it has the wrong
4997     // kind of operands should not really matter.
4998     needIndirectCall = false;
4999   }
5000 
5001   if (needIndirectCall) {
5002     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
5003     // to do the call, we can't use PPCISD::CALL.
5004     SDValue MTCTROps[] = {Chain, Callee, InFlag};
5005 
5006     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
5007       // Function pointers in the 64-bit SVR4 ABI do not point to the function
5008       // entry point, but to the function descriptor (the function entry point
5009       // address is part of the function descriptor though).
5010       // The function descriptor is a three doubleword structure with the
5011       // following fields: function entry point, TOC base address and
5012       // environment pointer.
5013       // Thus for a call through a function pointer, the following actions need
5014       // to be performed:
5015       //   1. Save the TOC of the caller in the TOC save area of its stack
5016       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5017       //   2. Load the address of the function entry point from the function
5018       //      descriptor.
5019       //   3. Load the TOC of the callee from the function descriptor into r2.
5020       //   4. Load the environment pointer from the function descriptor into
5021       //      r11.
5022       //   5. Branch to the function entry point address.
5023       //   6. On return of the callee, the TOC of the caller needs to be
5024       //      restored (this is done in FinishCall()).
5025       //
5026       // The loads are scheduled at the beginning of the call sequence, and the
5027       // register copies are flagged together to ensure that no other
5028       // operations can be scheduled in between. E.g. without flagging the
5029       // copies together, a TOC access in the caller could be scheduled between
5030       // the assignment of the callee TOC and the branch to the callee, which
5031       // results in the TOC access going through the TOC of the callee instead
5032       // of going through the TOC of the caller, which leads to incorrect code.
5033 
5034       // Load the address of the function entry point from the function
5035       // descriptor.
5036       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
5037       if (LDChain.getValueType() == MVT::Glue)
5038         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
5039 
5040       auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5041                           ? (MachineMemOperand::MODereferenceable |
5042                              MachineMemOperand::MOInvariant)
5043                           : MachineMemOperand::MONone;
5044 
5045       MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5046       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
5047                                         /* Alignment = */ 8, MMOFlags);
5048 
5049       // Load environment pointer into r11.
5050       SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
5051       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
5052       SDValue LoadEnvPtr =
5053           DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
5054                       /* Alignment = */ 8, MMOFlags);
5055 
5056       SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
5057       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
5058       SDValue TOCPtr =
5059           DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
5060                       /* Alignment = */ 8, MMOFlags);
5061 
5062       setUsesTOCBasePtr(DAG);
5063       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
5064                                         InFlag);
5065       Chain = TOCVal.getValue(0);
5066       InFlag = TOCVal.getValue(1);
5067 
5068       // If the function call has an explicit 'nest' parameter, it takes the
5069       // place of the environment pointer.
5070       if (!hasNest) {
5071         SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
5072                                           InFlag);
5073 
5074         Chain = EnvVal.getValue(0);
5075         InFlag = EnvVal.getValue(1);
5076       }
5077 
5078       MTCTROps[0] = Chain;
5079       MTCTROps[1] = LoadFuncPtr;
5080       MTCTROps[2] = InFlag;
5081     }
5082 
5083     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
5084                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
5085     InFlag = Chain.getValue(1);
5086 
5087     NodeTys.clear();
5088     NodeTys.push_back(MVT::Other);
5089     NodeTys.push_back(MVT::Glue);
5090     Ops.push_back(Chain);
5091     CallOpc = PPCISD::BCTRL;
5092     Callee.setNode(nullptr);
5093     // Add use of X11 (holding environment pointer)
5094     if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
5095       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
5096     // Add CTR register as callee so a bctr can be emitted later.
5097     if (isTailCall)
5098       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
5099   }
5100 
5101   // If this is a direct call, pass the chain and the callee.
5102   if (Callee.getNode()) {
5103     Ops.push_back(Chain);
5104     Ops.push_back(Callee);
5105   }
5106   // If this is a tail call add stack pointer delta.
5107   if (isTailCall)
5108     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5109 
5110   // Add argument registers to the end of the list so that they are known live
5111   // into the call.
5112   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5113     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5114                                   RegsToPass[i].second.getValueType()));
5115 
5116   // All calls, in the AIX ABI and 64-bit ELF ABIs, need the TOC register
5117   // live into the call.
5118   // We do need to reserve R2/X2 to appease the verifier for the PATCHPOINT.
5119   if ((isSVR4ABI && isPPC64) || isAIXABI) {
5120     setUsesTOCBasePtr(DAG);
5121 
5122     // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5123     // no way to mark dependencies as implicit here.
5124     // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5125     if (!isPatchPoint)
5126       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2
5127                                             : PPC::R2, PtrVT));
5128   }
5129 
5130   return CallOpc;
5131 }
5132 
5133 SDValue PPCTargetLowering::LowerCallResult(
5134     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5135     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5136     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5137   SmallVector<CCValAssign, 16> RVLocs;
5138   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5139                     *DAG.getContext());
5140 
5141   CCRetInfo.AnalyzeCallResult(
5142       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5143                ? RetCC_PPC_Cold
5144                : RetCC_PPC);
5145 
5146   // Copy all of the result registers out of their specified physreg.
5147   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5148     CCValAssign &VA = RVLocs[i];
5149     assert(VA.isRegLoc() && "Can only return in registers!");
5150 
5151     SDValue Val;
5152 
5153     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5154       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5155                                       InFlag);
5156       Chain = Lo.getValue(1);
5157       InFlag = Lo.getValue(2);
5158       VA = RVLocs[++i]; // skip ahead to next loc
5159       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5160                                       InFlag);
5161       Chain = Hi.getValue(1);
5162       InFlag = Hi.getValue(2);
5163       if (!Subtarget.isLittleEndian())
5164         std::swap (Lo, Hi);
5165       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5166     } else {
5167       Val = DAG.getCopyFromReg(Chain, dl,
5168                                VA.getLocReg(), VA.getLocVT(), InFlag);
5169       Chain = Val.getValue(1);
5170       InFlag = Val.getValue(2);
5171     }
5172 
5173     switch (VA.getLocInfo()) {
5174     default: llvm_unreachable("Unknown loc info!");
5175     case CCValAssign::Full: break;
5176     case CCValAssign::AExt:
5177       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5178       break;
5179     case CCValAssign::ZExt:
5180       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5181                         DAG.getValueType(VA.getValVT()));
5182       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5183       break;
5184     case CCValAssign::SExt:
5185       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5186                         DAG.getValueType(VA.getValVT()));
5187       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5188       break;
5189     }
5190 
5191     InVals.push_back(Val);
5192   }
5193 
5194   return Chain;
5195 }
5196 
5197 SDValue PPCTargetLowering::FinishCall(
5198     CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
5199     bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5200     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
5201     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5202     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5203     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5204   std::vector<EVT> NodeTys;
5205   SmallVector<SDValue, 8> Ops;
5206   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
5207                                  SPDiff, isTailCall, isPatchPoint, hasNest,
5208                                  RegsToPass, Ops, NodeTys, CS, Subtarget);
5209 
5210   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5211   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
5212     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5213 
5214   // When performing tail call optimization the callee pops its arguments off
5215   // the stack. Account for this here so these bytes can be pushed back on in
5216   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5217   int BytesCalleePops =
5218     (CallConv == CallingConv::Fast &&
5219      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
5220 
5221   // Add a register mask operand representing the call-preserved registers.
5222   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5223   const uint32_t *Mask =
5224       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5225   assert(Mask && "Missing call preserved mask for calling convention");
5226   Ops.push_back(DAG.getRegisterMask(Mask));
5227 
5228   if (InFlag.getNode())
5229     Ops.push_back(InFlag);
5230 
5231   // Emit tail call.
5232   if (isTailCall) {
5233     assert(((Callee.getOpcode() == ISD::Register &&
5234              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5235             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5236             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5237             isa<ConstantSDNode>(Callee)) &&
5238     "Expecting an global address, external symbol, absolute value or register");
5239 
5240     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5241     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
5242   }
5243 
5244   // Add a NOP immediately after the branch instruction when using the 64-bit
5245   // SVR4 or the AIX ABI.
5246   // At link time, if caller and callee are in a different module and
5247   // thus have a different TOC, the call will be replaced with a call to a stub
5248   // function which saves the current TOC, loads the TOC of the callee and
5249   // branches to the callee. The NOP will be replaced with a load instruction
5250   // which restores the TOC of the caller from the TOC save slot of the current
5251   // stack frame. If caller and callee belong to the same module (and have the
5252   // same TOC), the NOP will remain unchanged, or become some other NOP.
5253 
5254   MachineFunction &MF = DAG.getMachineFunction();
5255   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5256   if (!isTailCall && !isPatchPoint &&
5257       ((Subtarget.isSVR4ABI() && Subtarget.isPPC64()) ||
5258        Subtarget.isAIXABI())) {
5259     if (CallOpc == PPCISD::BCTRL) {
5260       if (Subtarget.isAIXABI())
5261         report_fatal_error("Indirect call on AIX is not implemented.");
5262 
5263       // This is a call through a function pointer.
5264       // Restore the caller TOC from the save area into R2.
5265       // See PrepareCall() for more information about calls through function
5266       // pointers in the 64-bit SVR4 ABI.
5267       // We are using a target-specific load with r2 hard coded, because the
5268       // result of a target-independent load would never go directly into r2,
5269       // since r2 is a reserved register (which prevents the register allocator
5270       // from allocating it), resulting in an additional register being
5271       // allocated and an unnecessary move instruction being generated.
5272       CallOpc = PPCISD::BCTRL_LOAD_TOC;
5273 
5274       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
5275       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5276       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5277       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
5278 
5279       // The address needs to go after the chain input but before the flag (or
5280       // any other variadic arguments).
5281       Ops.insert(std::next(Ops.begin()), AddTOC);
5282     } else if (CallOpc == PPCISD::CALL &&
5283       !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
5284       // Otherwise insert NOP for non-local calls.
5285       CallOpc = PPCISD::CALL_NOP;
5286     }
5287   }
5288 
5289   if (Subtarget.isAIXABI() && isFunctionGlobalAddress(Callee)) {
5290     // On AIX, direct function calls reference the symbol for the function's
5291     // entry point, which is named by inserting a "." before the function's
5292     // C-linkage name.
5293     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5294     auto &Context = DAG.getMachineFunction().getMMI().getContext();
5295     MCSymbol *S = Context.getOrCreateSymbol(Twine(".") +
5296                                             Twine(G->getGlobal()->getName()));
5297     Callee = DAG.getMCSymbol(S, PtrVT);
5298     // Replace the GlobalAddressSDNode Callee with the MCSymbolSDNode.
5299     Ops[1] = Callee;
5300   }
5301 
5302   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
5303   InFlag = Chain.getValue(1);
5304 
5305   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5306                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5307                              InFlag, dl);
5308   if (!Ins.empty())
5309     InFlag = Chain.getValue(1);
5310 
5311   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
5312                          Ins, dl, DAG, InVals);
5313 }
5314 
5315 SDValue
5316 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5317                              SmallVectorImpl<SDValue> &InVals) const {
5318   SelectionDAG &DAG                     = CLI.DAG;
5319   SDLoc &dl                             = CLI.DL;
5320   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5321   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5322   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5323   SDValue Chain                         = CLI.Chain;
5324   SDValue Callee                        = CLI.Callee;
5325   bool &isTailCall                      = CLI.IsTailCall;
5326   CallingConv::ID CallConv              = CLI.CallConv;
5327   bool isVarArg                         = CLI.IsVarArg;
5328   bool isPatchPoint                     = CLI.IsPatchPoint;
5329   ImmutableCallSite CS                  = CLI.CS;
5330 
5331   if (isTailCall) {
5332     if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5333       isTailCall = false;
5334     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5335       isTailCall =
5336         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5337                                                  isVarArg, Outs, Ins, DAG);
5338     else
5339       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5340                                                      Ins, DAG);
5341     if (isTailCall) {
5342       ++NumTailCalls;
5343       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5344         ++NumSiblingCalls;
5345 
5346       assert(isa<GlobalAddressSDNode>(Callee) &&
5347              "Callee should be an llvm::Function object.");
5348       LLVM_DEBUG(
5349           const GlobalValue *GV =
5350               cast<GlobalAddressSDNode>(Callee)->getGlobal();
5351           const unsigned Width =
5352               80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5353           dbgs() << "TCO caller: "
5354                  << left_justify(DAG.getMachineFunction().getName(), Width)
5355                  << ", callee linkage: " << GV->getVisibility() << ", "
5356                  << GV->getLinkage() << "\n");
5357     }
5358   }
5359 
5360   if (!isTailCall && CS && CS.isMustTailCall())
5361     report_fatal_error("failed to perform tail call elimination on a call "
5362                        "site marked musttail");
5363 
5364   // When long calls (i.e. indirect calls) are always used, calls are always
5365   // made via function pointer. If we have a function name, first translate it
5366   // into a pointer.
5367   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5368       !isTailCall)
5369     Callee = LowerGlobalAddress(Callee, DAG);
5370 
5371   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5372     return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5373                             isTailCall, isPatchPoint, Outs, OutVals, Ins,
5374                             dl, DAG, InVals, CS);
5375 
5376   if (Subtarget.isSVR4ABI())
5377     return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5378                             isTailCall, isPatchPoint, Outs, OutVals, Ins,
5379                             dl, DAG, InVals, CS);
5380 
5381   if (Subtarget.isAIXABI())
5382     return LowerCall_AIX(Chain, Callee, CallConv, isVarArg,
5383                          isTailCall, isPatchPoint, Outs, OutVals, Ins,
5384                          dl, DAG, InVals, CS);
5385 
5386   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5387                           isTailCall, isPatchPoint, Outs, OutVals, Ins,
5388                           dl, DAG, InVals, CS);
5389 }
5390 
5391 SDValue PPCTargetLowering::LowerCall_32SVR4(
5392     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5393     bool isTailCall, bool isPatchPoint,
5394     const SmallVectorImpl<ISD::OutputArg> &Outs,
5395     const SmallVectorImpl<SDValue> &OutVals,
5396     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5397     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5398     ImmutableCallSite CS) const {
5399   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5400   // of the 32-bit SVR4 ABI stack frame layout.
5401 
5402   assert((CallConv == CallingConv::C ||
5403           CallConv == CallingConv::Cold ||
5404           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5405 
5406   unsigned PtrByteSize = 4;
5407 
5408   MachineFunction &MF = DAG.getMachineFunction();
5409 
5410   // Mark this function as potentially containing a function that contains a
5411   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5412   // and restoring the callers stack pointer in this functions epilog. This is
5413   // done because by tail calling the called function might overwrite the value
5414   // in this function's (MF) stack pointer stack slot 0(SP).
5415   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5416       CallConv == CallingConv::Fast)
5417     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5418 
5419   // Count how many bytes are to be pushed on the stack, including the linkage
5420   // area, parameter list area and the part of the local variable space which
5421   // contains copies of aggregates which are passed by value.
5422 
5423   // Assign locations to all of the outgoing arguments.
5424   SmallVector<CCValAssign, 16> ArgLocs;
5425   PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5426 
5427   // Reserve space for the linkage area on the stack.
5428   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5429                        PtrByteSize);
5430   if (useSoftFloat())
5431     CCInfo.PreAnalyzeCallOperands(Outs);
5432 
5433   if (isVarArg) {
5434     // Handle fixed and variable vector arguments differently.
5435     // Fixed vector arguments go into registers as long as registers are
5436     // available. Variable vector arguments always go into memory.
5437     unsigned NumArgs = Outs.size();
5438 
5439     for (unsigned i = 0; i != NumArgs; ++i) {
5440       MVT ArgVT = Outs[i].VT;
5441       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5442       bool Result;
5443 
5444       if (Outs[i].IsFixed) {
5445         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5446                                CCInfo);
5447       } else {
5448         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5449                                       ArgFlags, CCInfo);
5450       }
5451 
5452       if (Result) {
5453 #ifndef NDEBUG
5454         errs() << "Call operand #" << i << " has unhandled type "
5455              << EVT(ArgVT).getEVTString() << "\n";
5456 #endif
5457         llvm_unreachable(nullptr);
5458       }
5459     }
5460   } else {
5461     // All arguments are treated the same.
5462     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5463   }
5464   CCInfo.clearWasPPCF128();
5465 
5466   // Assign locations to all of the outgoing aggregate by value arguments.
5467   SmallVector<CCValAssign, 16> ByValArgLocs;
5468   CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5469 
5470   // Reserve stack space for the allocations in CCInfo.
5471   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5472 
5473   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5474 
5475   // Size of the linkage area, parameter list area and the part of the local
5476   // space variable where copies of aggregates which are passed by value are
5477   // stored.
5478   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5479 
5480   // Calculate by how many bytes the stack has to be adjusted in case of tail
5481   // call optimization.
5482   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5483 
5484   // Adjust the stack pointer for the new arguments...
5485   // These operations are automatically eliminated by the prolog/epilog pass
5486   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5487   SDValue CallSeqStart = Chain;
5488 
5489   // Load the return address and frame pointer so it can be moved somewhere else
5490   // later.
5491   SDValue LROp, FPOp;
5492   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5493 
5494   // Set up a copy of the stack pointer for use loading and storing any
5495   // arguments that may not fit in the registers available for argument
5496   // passing.
5497   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5498 
5499   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5500   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5501   SmallVector<SDValue, 8> MemOpChains;
5502 
5503   bool seenFloatArg = false;
5504   // Walk the register/memloc assignments, inserting copies/loads.
5505   // i - Tracks the index into the list of registers allocated for the call
5506   // RealArgIdx - Tracks the index into the list of actual function arguments
5507   // j - Tracks the index into the list of byval arguments
5508   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5509        i != e;
5510        ++i, ++RealArgIdx) {
5511     CCValAssign &VA = ArgLocs[i];
5512     SDValue Arg = OutVals[RealArgIdx];
5513     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5514 
5515     if (Flags.isByVal()) {
5516       // Argument is an aggregate which is passed by value, thus we need to
5517       // create a copy of it in the local variable space of the current stack
5518       // frame (which is the stack frame of the caller) and pass the address of
5519       // this copy to the callee.
5520       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5521       CCValAssign &ByValVA = ByValArgLocs[j++];
5522       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5523 
5524       // Memory reserved in the local variable space of the callers stack frame.
5525       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5526 
5527       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5528       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5529                            StackPtr, PtrOff);
5530 
5531       // Create a copy of the argument in the local area of the current
5532       // stack frame.
5533       SDValue MemcpyCall =
5534         CreateCopyOfByValArgument(Arg, PtrOff,
5535                                   CallSeqStart.getNode()->getOperand(0),
5536                                   Flags, DAG, dl);
5537 
5538       // This must go outside the CALLSEQ_START..END.
5539       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5540                                                      SDLoc(MemcpyCall));
5541       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5542                              NewCallSeqStart.getNode());
5543       Chain = CallSeqStart = NewCallSeqStart;
5544 
5545       // Pass the address of the aggregate copy on the stack either in a
5546       // physical register or in the parameter list area of the current stack
5547       // frame to the callee.
5548       Arg = PtrOff;
5549     }
5550 
5551     // When useCRBits() is true, there can be i1 arguments.
5552     // It is because getRegisterType(MVT::i1) => MVT::i1,
5553     // and for other integer types getRegisterType() => MVT::i32.
5554     // Extend i1 and ensure callee will get i32.
5555     if (Arg.getValueType() == MVT::i1)
5556       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5557                         dl, MVT::i32, Arg);
5558 
5559     if (VA.isRegLoc()) {
5560       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5561       // Put argument in a physical register.
5562       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5563         bool IsLE = Subtarget.isLittleEndian();
5564         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5565                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5566         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5567         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5568                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5569         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5570                              SVal.getValue(0)));
5571       } else
5572         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5573     } else {
5574       // Put argument in the parameter list area of the current stack frame.
5575       assert(VA.isMemLoc());
5576       unsigned LocMemOffset = VA.getLocMemOffset();
5577 
5578       if (!isTailCall) {
5579         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5580         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5581                              StackPtr, PtrOff);
5582 
5583         MemOpChains.push_back(
5584             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5585       } else {
5586         // Calculate and remember argument location.
5587         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5588                                  TailCallArguments);
5589       }
5590     }
5591   }
5592 
5593   if (!MemOpChains.empty())
5594     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5595 
5596   // Build a sequence of copy-to-reg nodes chained together with token chain
5597   // and flag operands which copy the outgoing args into the appropriate regs.
5598   SDValue InFlag;
5599   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5600     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5601                              RegsToPass[i].second, InFlag);
5602     InFlag = Chain.getValue(1);
5603   }
5604 
5605   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5606   // registers.
5607   if (isVarArg) {
5608     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5609     SDValue Ops[] = { Chain, InFlag };
5610 
5611     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5612                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5613 
5614     InFlag = Chain.getValue(1);
5615   }
5616 
5617   if (isTailCall)
5618     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5619                     TailCallArguments);
5620 
5621   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5622                     /* unused except on PPC64 ELFv1 */ false, DAG,
5623                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5624                     NumBytes, Ins, InVals, CS);
5625 }
5626 
5627 // Copy an argument into memory, being careful to do this outside the
5628 // call sequence for the call to which the argument belongs.
5629 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5630     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5631     SelectionDAG &DAG, const SDLoc &dl) const {
5632   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5633                         CallSeqStart.getNode()->getOperand(0),
5634                         Flags, DAG, dl);
5635   // The MEMCPY must go outside the CALLSEQ_START..END.
5636   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5637   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5638                                                  SDLoc(MemcpyCall));
5639   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5640                          NewCallSeqStart.getNode());
5641   return NewCallSeqStart;
5642 }
5643 
5644 SDValue PPCTargetLowering::LowerCall_64SVR4(
5645     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5646     bool isTailCall, bool isPatchPoint,
5647     const SmallVectorImpl<ISD::OutputArg> &Outs,
5648     const SmallVectorImpl<SDValue> &OutVals,
5649     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5650     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5651     ImmutableCallSite CS) const {
5652   bool isELFv2ABI = Subtarget.isELFv2ABI();
5653   bool isLittleEndian = Subtarget.isLittleEndian();
5654   unsigned NumOps = Outs.size();
5655   bool hasNest = false;
5656   bool IsSibCall = false;
5657 
5658   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5659   unsigned PtrByteSize = 8;
5660 
5661   MachineFunction &MF = DAG.getMachineFunction();
5662 
5663   if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5664     IsSibCall = true;
5665 
5666   // Mark this function as potentially containing a function that contains a
5667   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5668   // and restoring the callers stack pointer in this functions epilog. This is
5669   // done because by tail calling the called function might overwrite the value
5670   // in this function's (MF) stack pointer stack slot 0(SP).
5671   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5672       CallConv == CallingConv::Fast)
5673     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5674 
5675   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5676          "fastcc not supported on varargs functions");
5677 
5678   // Count how many bytes are to be pushed on the stack, including the linkage
5679   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5680   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5681   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5682   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5683   unsigned NumBytes = LinkageSize;
5684   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5685   unsigned &QFPR_idx = FPR_idx;
5686 
5687   static const MCPhysReg GPR[] = {
5688     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5689     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5690   };
5691   static const MCPhysReg VR[] = {
5692     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5693     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5694   };
5695 
5696   const unsigned NumGPRs = array_lengthof(GPR);
5697   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5698   const unsigned NumVRs  = array_lengthof(VR);
5699   const unsigned NumQFPRs = NumFPRs;
5700 
5701   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5702   // can be passed to the callee in registers.
5703   // For the fast calling convention, there is another check below.
5704   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5705   bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5706   if (!HasParameterArea) {
5707     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5708     unsigned AvailableFPRs = NumFPRs;
5709     unsigned AvailableVRs = NumVRs;
5710     unsigned NumBytesTmp = NumBytes;
5711     for (unsigned i = 0; i != NumOps; ++i) {
5712       if (Outs[i].Flags.isNest()) continue;
5713       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5714                                 PtrByteSize, LinkageSize, ParamAreaSize,
5715                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
5716                                 Subtarget.hasQPX()))
5717         HasParameterArea = true;
5718     }
5719   }
5720 
5721   // When using the fast calling convention, we don't provide backing for
5722   // arguments that will be in registers.
5723   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5724 
5725   // Avoid allocating parameter area for fastcc functions if all the arguments
5726   // can be passed in the registers.
5727   if (CallConv == CallingConv::Fast)
5728     HasParameterArea = false;
5729 
5730   // Add up all the space actually used.
5731   for (unsigned i = 0; i != NumOps; ++i) {
5732     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5733     EVT ArgVT = Outs[i].VT;
5734     EVT OrigVT = Outs[i].ArgVT;
5735 
5736     if (Flags.isNest())
5737       continue;
5738 
5739     if (CallConv == CallingConv::Fast) {
5740       if (Flags.isByVal()) {
5741         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5742         if (NumGPRsUsed > NumGPRs)
5743           HasParameterArea = true;
5744       } else {
5745         switch (ArgVT.getSimpleVT().SimpleTy) {
5746         default: llvm_unreachable("Unexpected ValueType for argument!");
5747         case MVT::i1:
5748         case MVT::i32:
5749         case MVT::i64:
5750           if (++NumGPRsUsed <= NumGPRs)
5751             continue;
5752           break;
5753         case MVT::v4i32:
5754         case MVT::v8i16:
5755         case MVT::v16i8:
5756         case MVT::v2f64:
5757         case MVT::v2i64:
5758         case MVT::v1i128:
5759         case MVT::f128:
5760           if (++NumVRsUsed <= NumVRs)
5761             continue;
5762           break;
5763         case MVT::v4f32:
5764           // When using QPX, this is handled like a FP register, otherwise, it
5765           // is an Altivec register.
5766           if (Subtarget.hasQPX()) {
5767             if (++NumFPRsUsed <= NumFPRs)
5768               continue;
5769           } else {
5770             if (++NumVRsUsed <= NumVRs)
5771               continue;
5772           }
5773           break;
5774         case MVT::f32:
5775         case MVT::f64:
5776         case MVT::v4f64: // QPX
5777         case MVT::v4i1:  // QPX
5778           if (++NumFPRsUsed <= NumFPRs)
5779             continue;
5780           break;
5781         }
5782         HasParameterArea = true;
5783       }
5784     }
5785 
5786     /* Respect alignment of argument on the stack.  */
5787     unsigned Align =
5788       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5789     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5790 
5791     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5792     if (Flags.isInConsecutiveRegsLast())
5793       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5794   }
5795 
5796   unsigned NumBytesActuallyUsed = NumBytes;
5797 
5798   // In the old ELFv1 ABI,
5799   // the prolog code of the callee may store up to 8 GPR argument registers to
5800   // the stack, allowing va_start to index over them in memory if its varargs.
5801   // Because we cannot tell if this is needed on the caller side, we have to
5802   // conservatively assume that it is needed.  As such, make sure we have at
5803   // least enough stack space for the caller to store the 8 GPRs.
5804   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5805   // really requires memory operands, e.g. a vararg function.
5806   if (HasParameterArea)
5807     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5808   else
5809     NumBytes = LinkageSize;
5810 
5811   // Tail call needs the stack to be aligned.
5812   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5813       CallConv == CallingConv::Fast)
5814     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5815 
5816   int SPDiff = 0;
5817 
5818   // Calculate by how many bytes the stack has to be adjusted in case of tail
5819   // call optimization.
5820   if (!IsSibCall)
5821     SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5822 
5823   // To protect arguments on the stack from being clobbered in a tail call,
5824   // force all the loads to happen before doing any other lowering.
5825   if (isTailCall)
5826     Chain = DAG.getStackArgumentTokenFactor(Chain);
5827 
5828   // Adjust the stack pointer for the new arguments...
5829   // These operations are automatically eliminated by the prolog/epilog pass
5830   if (!IsSibCall)
5831     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5832   SDValue CallSeqStart = Chain;
5833 
5834   // Load the return address and frame pointer so it can be move somewhere else
5835   // later.
5836   SDValue LROp, FPOp;
5837   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5838 
5839   // Set up a copy of the stack pointer for use loading and storing any
5840   // arguments that may not fit in the registers available for argument
5841   // passing.
5842   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5843 
5844   // Figure out which arguments are going to go in registers, and which in
5845   // memory.  Also, if this is a vararg function, floating point operations
5846   // must be stored to our stack, and loaded into integer regs as well, if
5847   // any integer regs are available for argument passing.
5848   unsigned ArgOffset = LinkageSize;
5849 
5850   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5851   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5852 
5853   SmallVector<SDValue, 8> MemOpChains;
5854   for (unsigned i = 0; i != NumOps; ++i) {
5855     SDValue Arg = OutVals[i];
5856     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5857     EVT ArgVT = Outs[i].VT;
5858     EVT OrigVT = Outs[i].ArgVT;
5859 
5860     // PtrOff will be used to store the current argument to the stack if a
5861     // register cannot be found for it.
5862     SDValue PtrOff;
5863 
5864     // We re-align the argument offset for each argument, except when using the
5865     // fast calling convention, when we need to make sure we do that only when
5866     // we'll actually use a stack slot.
5867     auto ComputePtrOff = [&]() {
5868       /* Respect alignment of argument on the stack.  */
5869       unsigned Align =
5870         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5871       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5872 
5873       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5874 
5875       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5876     };
5877 
5878     if (CallConv != CallingConv::Fast) {
5879       ComputePtrOff();
5880 
5881       /* Compute GPR index associated with argument offset.  */
5882       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5883       GPR_idx = std::min(GPR_idx, NumGPRs);
5884     }
5885 
5886     // Promote integers to 64-bit values.
5887     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5888       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5889       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5890       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5891     }
5892 
5893     // FIXME memcpy is used way more than necessary.  Correctness first.
5894     // Note: "by value" is code for passing a structure by value, not
5895     // basic types.
5896     if (Flags.isByVal()) {
5897       // Note: Size includes alignment padding, so
5898       //   struct x { short a; char b; }
5899       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5900       // These are the proper values we need for right-justifying the
5901       // aggregate in a parameter register.
5902       unsigned Size = Flags.getByValSize();
5903 
5904       // An empty aggregate parameter takes up no storage and no
5905       // registers.
5906       if (Size == 0)
5907         continue;
5908 
5909       if (CallConv == CallingConv::Fast)
5910         ComputePtrOff();
5911 
5912       // All aggregates smaller than 8 bytes must be passed right-justified.
5913       if (Size==1 || Size==2 || Size==4) {
5914         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5915         if (GPR_idx != NumGPRs) {
5916           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5917                                         MachinePointerInfo(), VT);
5918           MemOpChains.push_back(Load.getValue(1));
5919           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5920 
5921           ArgOffset += PtrByteSize;
5922           continue;
5923         }
5924       }
5925 
5926       if (GPR_idx == NumGPRs && Size < 8) {
5927         SDValue AddPtr = PtrOff;
5928         if (!isLittleEndian) {
5929           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5930                                           PtrOff.getValueType());
5931           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5932         }
5933         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5934                                                           CallSeqStart,
5935                                                           Flags, DAG, dl);
5936         ArgOffset += PtrByteSize;
5937         continue;
5938       }
5939       // Copy entire object into memory.  There are cases where gcc-generated
5940       // code assumes it is there, even if it could be put entirely into
5941       // registers.  (This is not what the doc says.)
5942 
5943       // FIXME: The above statement is likely due to a misunderstanding of the
5944       // documents.  All arguments must be copied into the parameter area BY
5945       // THE CALLEE in the event that the callee takes the address of any
5946       // formal argument.  That has not yet been implemented.  However, it is
5947       // reasonable to use the stack area as a staging area for the register
5948       // load.
5949 
5950       // Skip this for small aggregates, as we will use the same slot for a
5951       // right-justified copy, below.
5952       if (Size >= 8)
5953         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5954                                                           CallSeqStart,
5955                                                           Flags, DAG, dl);
5956 
5957       // When a register is available, pass a small aggregate right-justified.
5958       if (Size < 8 && GPR_idx != NumGPRs) {
5959         // The easiest way to get this right-justified in a register
5960         // is to copy the structure into the rightmost portion of a
5961         // local variable slot, then load the whole slot into the
5962         // register.
5963         // FIXME: The memcpy seems to produce pretty awful code for
5964         // small aggregates, particularly for packed ones.
5965         // FIXME: It would be preferable to use the slot in the
5966         // parameter save area instead of a new local variable.
5967         SDValue AddPtr = PtrOff;
5968         if (!isLittleEndian) {
5969           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5970           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5971         }
5972         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5973                                                           CallSeqStart,
5974                                                           Flags, DAG, dl);
5975 
5976         // Load the slot into the register.
5977         SDValue Load =
5978             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5979         MemOpChains.push_back(Load.getValue(1));
5980         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5981 
5982         // Done with this argument.
5983         ArgOffset += PtrByteSize;
5984         continue;
5985       }
5986 
5987       // For aggregates larger than PtrByteSize, copy the pieces of the
5988       // object that fit into registers from the parameter save area.
5989       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5990         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5991         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5992         if (GPR_idx != NumGPRs) {
5993           SDValue Load =
5994               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5995           MemOpChains.push_back(Load.getValue(1));
5996           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5997           ArgOffset += PtrByteSize;
5998         } else {
5999           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6000           break;
6001         }
6002       }
6003       continue;
6004     }
6005 
6006     switch (Arg.getSimpleValueType().SimpleTy) {
6007     default: llvm_unreachable("Unexpected ValueType for argument!");
6008     case MVT::i1:
6009     case MVT::i32:
6010     case MVT::i64:
6011       if (Flags.isNest()) {
6012         // The 'nest' parameter, if any, is passed in R11.
6013         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6014         hasNest = true;
6015         break;
6016       }
6017 
6018       // These can be scalar arguments or elements of an integer array type
6019       // passed directly.  Clang may use those instead of "byval" aggregate
6020       // types to avoid forcing arguments to memory unnecessarily.
6021       if (GPR_idx != NumGPRs) {
6022         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6023       } else {
6024         if (CallConv == CallingConv::Fast)
6025           ComputePtrOff();
6026 
6027         assert(HasParameterArea &&
6028                "Parameter area must exist to pass an argument in memory.");
6029         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6030                          true, isTailCall, false, MemOpChains,
6031                          TailCallArguments, dl);
6032         if (CallConv == CallingConv::Fast)
6033           ArgOffset += PtrByteSize;
6034       }
6035       if (CallConv != CallingConv::Fast)
6036         ArgOffset += PtrByteSize;
6037       break;
6038     case MVT::f32:
6039     case MVT::f64: {
6040       // These can be scalar arguments or elements of a float array type
6041       // passed directly.  The latter are used to implement ELFv2 homogenous
6042       // float aggregates.
6043 
6044       // Named arguments go into FPRs first, and once they overflow, the
6045       // remaining arguments go into GPRs and then the parameter save area.
6046       // Unnamed arguments for vararg functions always go to GPRs and
6047       // then the parameter save area.  For now, put all arguments to vararg
6048       // routines always in both locations (FPR *and* GPR or stack slot).
6049       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
6050       bool NeededLoad = false;
6051 
6052       // First load the argument into the next available FPR.
6053       if (FPR_idx != NumFPRs)
6054         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6055 
6056       // Next, load the argument into GPR or stack slot if needed.
6057       if (!NeedGPROrStack)
6058         ;
6059       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
6060         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6061         // once we support fp <-> gpr moves.
6062 
6063         // In the non-vararg case, this can only ever happen in the
6064         // presence of f32 array types, since otherwise we never run
6065         // out of FPRs before running out of GPRs.
6066         SDValue ArgVal;
6067 
6068         // Double values are always passed in a single GPR.
6069         if (Arg.getValueType() != MVT::f32) {
6070           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6071 
6072         // Non-array float values are extended and passed in a GPR.
6073         } else if (!Flags.isInConsecutiveRegs()) {
6074           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6075           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6076 
6077         // If we have an array of floats, we collect every odd element
6078         // together with its predecessor into one GPR.
6079         } else if (ArgOffset % PtrByteSize != 0) {
6080           SDValue Lo, Hi;
6081           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6082           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6083           if (!isLittleEndian)
6084             std::swap(Lo, Hi);
6085           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6086 
6087         // The final element, if even, goes into the first half of a GPR.
6088         } else if (Flags.isInConsecutiveRegsLast()) {
6089           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6090           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6091           if (!isLittleEndian)
6092             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6093                                  DAG.getConstant(32, dl, MVT::i32));
6094 
6095         // Non-final even elements are skipped; they will be handled
6096         // together the with subsequent argument on the next go-around.
6097         } else
6098           ArgVal = SDValue();
6099 
6100         if (ArgVal.getNode())
6101           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6102       } else {
6103         if (CallConv == CallingConv::Fast)
6104           ComputePtrOff();
6105 
6106         // Single-precision floating-point values are mapped to the
6107         // second (rightmost) word of the stack doubleword.
6108         if (Arg.getValueType() == MVT::f32 &&
6109             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6110           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6111           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6112         }
6113 
6114         assert(HasParameterArea &&
6115                "Parameter area must exist to pass an argument in memory.");
6116         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6117                          true, isTailCall, false, MemOpChains,
6118                          TailCallArguments, dl);
6119 
6120         NeededLoad = true;
6121       }
6122       // When passing an array of floats, the array occupies consecutive
6123       // space in the argument area; only round up to the next doubleword
6124       // at the end of the array.  Otherwise, each float takes 8 bytes.
6125       if (CallConv != CallingConv::Fast || NeededLoad) {
6126         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6127                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6128         if (Flags.isInConsecutiveRegsLast())
6129           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6130       }
6131       break;
6132     }
6133     case MVT::v4f32:
6134     case MVT::v4i32:
6135     case MVT::v8i16:
6136     case MVT::v16i8:
6137     case MVT::v2f64:
6138     case MVT::v2i64:
6139     case MVT::v1i128:
6140     case MVT::f128:
6141       if (!Subtarget.hasQPX()) {
6142       // These can be scalar arguments or elements of a vector array type
6143       // passed directly.  The latter are used to implement ELFv2 homogenous
6144       // vector aggregates.
6145 
6146       // For a varargs call, named arguments go into VRs or on the stack as
6147       // usual; unnamed arguments always go to the stack or the corresponding
6148       // GPRs when within range.  For now, we always put the value in both
6149       // locations (or even all three).
6150       if (isVarArg) {
6151         assert(HasParameterArea &&
6152                "Parameter area must exist if we have a varargs call.");
6153         // We could elide this store in the case where the object fits
6154         // entirely in R registers.  Maybe later.
6155         SDValue Store =
6156             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6157         MemOpChains.push_back(Store);
6158         if (VR_idx != NumVRs) {
6159           SDValue Load =
6160               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6161           MemOpChains.push_back(Load.getValue(1));
6162           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6163         }
6164         ArgOffset += 16;
6165         for (unsigned i=0; i<16; i+=PtrByteSize) {
6166           if (GPR_idx == NumGPRs)
6167             break;
6168           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6169                                    DAG.getConstant(i, dl, PtrVT));
6170           SDValue Load =
6171               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6172           MemOpChains.push_back(Load.getValue(1));
6173           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6174         }
6175         break;
6176       }
6177 
6178       // Non-varargs Altivec params go into VRs or on the stack.
6179       if (VR_idx != NumVRs) {
6180         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6181       } else {
6182         if (CallConv == CallingConv::Fast)
6183           ComputePtrOff();
6184 
6185         assert(HasParameterArea &&
6186                "Parameter area must exist to pass an argument in memory.");
6187         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6188                          true, isTailCall, true, MemOpChains,
6189                          TailCallArguments, dl);
6190         if (CallConv == CallingConv::Fast)
6191           ArgOffset += 16;
6192       }
6193 
6194       if (CallConv != CallingConv::Fast)
6195         ArgOffset += 16;
6196       break;
6197       } // not QPX
6198 
6199       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6200              "Invalid QPX parameter type");
6201 
6202       LLVM_FALLTHROUGH;
6203     case MVT::v4f64:
6204     case MVT::v4i1: {
6205       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6206       if (isVarArg) {
6207         assert(HasParameterArea &&
6208                "Parameter area must exist if we have a varargs call.");
6209         // We could elide this store in the case where the object fits
6210         // entirely in R registers.  Maybe later.
6211         SDValue Store =
6212             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6213         MemOpChains.push_back(Store);
6214         if (QFPR_idx != NumQFPRs) {
6215           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6216                                      PtrOff, MachinePointerInfo());
6217           MemOpChains.push_back(Load.getValue(1));
6218           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6219         }
6220         ArgOffset += (IsF32 ? 16 : 32);
6221         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6222           if (GPR_idx == NumGPRs)
6223             break;
6224           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6225                                    DAG.getConstant(i, dl, PtrVT));
6226           SDValue Load =
6227               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6228           MemOpChains.push_back(Load.getValue(1));
6229           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6230         }
6231         break;
6232       }
6233 
6234       // Non-varargs QPX params go into registers or on the stack.
6235       if (QFPR_idx != NumQFPRs) {
6236         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6237       } else {
6238         if (CallConv == CallingConv::Fast)
6239           ComputePtrOff();
6240 
6241         assert(HasParameterArea &&
6242                "Parameter area must exist to pass an argument in memory.");
6243         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6244                          true, isTailCall, true, MemOpChains,
6245                          TailCallArguments, dl);
6246         if (CallConv == CallingConv::Fast)
6247           ArgOffset += (IsF32 ? 16 : 32);
6248       }
6249 
6250       if (CallConv != CallingConv::Fast)
6251         ArgOffset += (IsF32 ? 16 : 32);
6252       break;
6253       }
6254     }
6255   }
6256 
6257   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6258          "mismatch in size of parameter area");
6259   (void)NumBytesActuallyUsed;
6260 
6261   if (!MemOpChains.empty())
6262     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6263 
6264   // Check if this is an indirect call (MTCTR/BCTRL).
6265   // See PrepareCall() for more information about calls through function
6266   // pointers in the 64-bit SVR4 ABI.
6267   if (!isTailCall && !isPatchPoint &&
6268       !isFunctionGlobalAddress(Callee) &&
6269       !isa<ExternalSymbolSDNode>(Callee)) {
6270     // Load r2 into a virtual register and store it to the TOC save area.
6271     setUsesTOCBasePtr(DAG);
6272     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6273     // TOC save area offset.
6274     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6275     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6276     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6277     Chain = DAG.getStore(
6278         Val.getValue(1), dl, Val, AddPtr,
6279         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6280     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6281     // This does not mean the MTCTR instruction must use R12; it's easier
6282     // to model this as an extra parameter, so do that.
6283     if (isELFv2ABI && !isPatchPoint)
6284       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6285   }
6286 
6287   // Build a sequence of copy-to-reg nodes chained together with token chain
6288   // and flag operands which copy the outgoing args into the appropriate regs.
6289   SDValue InFlag;
6290   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6291     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6292                              RegsToPass[i].second, InFlag);
6293     InFlag = Chain.getValue(1);
6294   }
6295 
6296   if (isTailCall && !IsSibCall)
6297     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6298                     TailCallArguments);
6299 
6300   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6301                     DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6302                     SPDiff, NumBytes, Ins, InVals, CS);
6303 }
6304 
6305 SDValue PPCTargetLowering::LowerCall_Darwin(
6306     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6307     bool isTailCall, bool isPatchPoint,
6308     const SmallVectorImpl<ISD::OutputArg> &Outs,
6309     const SmallVectorImpl<SDValue> &OutVals,
6310     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6311     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6312     ImmutableCallSite CS) const {
6313   unsigned NumOps = Outs.size();
6314 
6315   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6316   bool isPPC64 = PtrVT == MVT::i64;
6317   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6318 
6319   MachineFunction &MF = DAG.getMachineFunction();
6320 
6321   // Mark this function as potentially containing a function that contains a
6322   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6323   // and restoring the callers stack pointer in this functions epilog. This is
6324   // done because by tail calling the called function might overwrite the value
6325   // in this function's (MF) stack pointer stack slot 0(SP).
6326   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6327       CallConv == CallingConv::Fast)
6328     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6329 
6330   // Count how many bytes are to be pushed on the stack, including the linkage
6331   // area, and parameter passing area.  We start with 24/48 bytes, which is
6332   // prereserved space for [SP][CR][LR][3 x unused].
6333   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6334   unsigned NumBytes = LinkageSize;
6335 
6336   // Add up all the space actually used.
6337   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6338   // they all go in registers, but we must reserve stack space for them for
6339   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6340   // assigned stack space in order, with padding so Altivec parameters are
6341   // 16-byte aligned.
6342   unsigned nAltivecParamsAtEnd = 0;
6343   for (unsigned i = 0; i != NumOps; ++i) {
6344     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6345     EVT ArgVT = Outs[i].VT;
6346     // Varargs Altivec parameters are padded to a 16 byte boundary.
6347     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6348         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6349         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6350       if (!isVarArg && !isPPC64) {
6351         // Non-varargs Altivec parameters go after all the non-Altivec
6352         // parameters; handle those later so we know how much padding we need.
6353         nAltivecParamsAtEnd++;
6354         continue;
6355       }
6356       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6357       NumBytes = ((NumBytes+15)/16)*16;
6358     }
6359     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6360   }
6361 
6362   // Allow for Altivec parameters at the end, if needed.
6363   if (nAltivecParamsAtEnd) {
6364     NumBytes = ((NumBytes+15)/16)*16;
6365     NumBytes += 16*nAltivecParamsAtEnd;
6366   }
6367 
6368   // The prolog code of the callee may store up to 8 GPR argument registers to
6369   // the stack, allowing va_start to index over them in memory if its varargs.
6370   // Because we cannot tell if this is needed on the caller side, we have to
6371   // conservatively assume that it is needed.  As such, make sure we have at
6372   // least enough stack space for the caller to store the 8 GPRs.
6373   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6374 
6375   // Tail call needs the stack to be aligned.
6376   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6377       CallConv == CallingConv::Fast)
6378     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6379 
6380   // Calculate by how many bytes the stack has to be adjusted in case of tail
6381   // call optimization.
6382   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6383 
6384   // To protect arguments on the stack from being clobbered in a tail call,
6385   // force all the loads to happen before doing any other lowering.
6386   if (isTailCall)
6387     Chain = DAG.getStackArgumentTokenFactor(Chain);
6388 
6389   // Adjust the stack pointer for the new arguments...
6390   // These operations are automatically eliminated by the prolog/epilog pass
6391   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6392   SDValue CallSeqStart = Chain;
6393 
6394   // Load the return address and frame pointer so it can be move somewhere else
6395   // later.
6396   SDValue LROp, FPOp;
6397   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6398 
6399   // Set up a copy of the stack pointer for use loading and storing any
6400   // arguments that may not fit in the registers available for argument
6401   // passing.
6402   SDValue StackPtr;
6403   if (isPPC64)
6404     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6405   else
6406     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6407 
6408   // Figure out which arguments are going to go in registers, and which in
6409   // memory.  Also, if this is a vararg function, floating point operations
6410   // must be stored to our stack, and loaded into integer regs as well, if
6411   // any integer regs are available for argument passing.
6412   unsigned ArgOffset = LinkageSize;
6413   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6414 
6415   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6416     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6417     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6418   };
6419   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6420     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6421     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6422   };
6423   static const MCPhysReg VR[] = {
6424     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6425     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6426   };
6427   const unsigned NumGPRs = array_lengthof(GPR_32);
6428   const unsigned NumFPRs = 13;
6429   const unsigned NumVRs  = array_lengthof(VR);
6430 
6431   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6432 
6433   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6434   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6435 
6436   SmallVector<SDValue, 8> MemOpChains;
6437   for (unsigned i = 0; i != NumOps; ++i) {
6438     SDValue Arg = OutVals[i];
6439     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6440 
6441     // PtrOff will be used to store the current argument to the stack if a
6442     // register cannot be found for it.
6443     SDValue PtrOff;
6444 
6445     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6446 
6447     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6448 
6449     // On PPC64, promote integers to 64-bit values.
6450     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6451       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6452       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6453       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6454     }
6455 
6456     // FIXME memcpy is used way more than necessary.  Correctness first.
6457     // Note: "by value" is code for passing a structure by value, not
6458     // basic types.
6459     if (Flags.isByVal()) {
6460       unsigned Size = Flags.getByValSize();
6461       // Very small objects are passed right-justified.  Everything else is
6462       // passed left-justified.
6463       if (Size==1 || Size==2) {
6464         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6465         if (GPR_idx != NumGPRs) {
6466           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6467                                         MachinePointerInfo(), VT);
6468           MemOpChains.push_back(Load.getValue(1));
6469           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6470 
6471           ArgOffset += PtrByteSize;
6472         } else {
6473           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6474                                           PtrOff.getValueType());
6475           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6476           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6477                                                             CallSeqStart,
6478                                                             Flags, DAG, dl);
6479           ArgOffset += PtrByteSize;
6480         }
6481         continue;
6482       }
6483       // Copy entire object into memory.  There are cases where gcc-generated
6484       // code assumes it is there, even if it could be put entirely into
6485       // registers.  (This is not what the doc says.)
6486       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6487                                                         CallSeqStart,
6488                                                         Flags, DAG, dl);
6489 
6490       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6491       // copy the pieces of the object that fit into registers from the
6492       // parameter save area.
6493       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6494         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6495         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6496         if (GPR_idx != NumGPRs) {
6497           SDValue Load =
6498               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6499           MemOpChains.push_back(Load.getValue(1));
6500           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6501           ArgOffset += PtrByteSize;
6502         } else {
6503           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6504           break;
6505         }
6506       }
6507       continue;
6508     }
6509 
6510     switch (Arg.getSimpleValueType().SimpleTy) {
6511     default: llvm_unreachable("Unexpected ValueType for argument!");
6512     case MVT::i1:
6513     case MVT::i32:
6514     case MVT::i64:
6515       if (GPR_idx != NumGPRs) {
6516         if (Arg.getValueType() == MVT::i1)
6517           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6518 
6519         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6520       } else {
6521         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6522                          isPPC64, isTailCall, false, MemOpChains,
6523                          TailCallArguments, dl);
6524       }
6525       ArgOffset += PtrByteSize;
6526       break;
6527     case MVT::f32:
6528     case MVT::f64:
6529       if (FPR_idx != NumFPRs) {
6530         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6531 
6532         if (isVarArg) {
6533           SDValue Store =
6534               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6535           MemOpChains.push_back(Store);
6536 
6537           // Float varargs are always shadowed in available integer registers
6538           if (GPR_idx != NumGPRs) {
6539             SDValue Load =
6540                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6541             MemOpChains.push_back(Load.getValue(1));
6542             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6543           }
6544           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6545             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6546             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6547             SDValue Load =
6548                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6549             MemOpChains.push_back(Load.getValue(1));
6550             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6551           }
6552         } else {
6553           // If we have any FPRs remaining, we may also have GPRs remaining.
6554           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6555           // GPRs.
6556           if (GPR_idx != NumGPRs)
6557             ++GPR_idx;
6558           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6559               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6560             ++GPR_idx;
6561         }
6562       } else
6563         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6564                          isPPC64, isTailCall, false, MemOpChains,
6565                          TailCallArguments, dl);
6566       if (isPPC64)
6567         ArgOffset += 8;
6568       else
6569         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6570       break;
6571     case MVT::v4f32:
6572     case MVT::v4i32:
6573     case MVT::v8i16:
6574     case MVT::v16i8:
6575       if (isVarArg) {
6576         // These go aligned on the stack, or in the corresponding R registers
6577         // when within range.  The Darwin PPC ABI doc claims they also go in
6578         // V registers; in fact gcc does this only for arguments that are
6579         // prototyped, not for those that match the ...  We do it for all
6580         // arguments, seems to work.
6581         while (ArgOffset % 16 !=0) {
6582           ArgOffset += PtrByteSize;
6583           if (GPR_idx != NumGPRs)
6584             GPR_idx++;
6585         }
6586         // We could elide this store in the case where the object fits
6587         // entirely in R registers.  Maybe later.
6588         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6589                              DAG.getConstant(ArgOffset, dl, PtrVT));
6590         SDValue Store =
6591             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6592         MemOpChains.push_back(Store);
6593         if (VR_idx != NumVRs) {
6594           SDValue Load =
6595               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6596           MemOpChains.push_back(Load.getValue(1));
6597           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6598         }
6599         ArgOffset += 16;
6600         for (unsigned i=0; i<16; i+=PtrByteSize) {
6601           if (GPR_idx == NumGPRs)
6602             break;
6603           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6604                                    DAG.getConstant(i, dl, PtrVT));
6605           SDValue Load =
6606               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6607           MemOpChains.push_back(Load.getValue(1));
6608           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6609         }
6610         break;
6611       }
6612 
6613       // Non-varargs Altivec params generally go in registers, but have
6614       // stack space allocated at the end.
6615       if (VR_idx != NumVRs) {
6616         // Doesn't have GPR space allocated.
6617         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6618       } else if (nAltivecParamsAtEnd==0) {
6619         // We are emitting Altivec params in order.
6620         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6621                          isPPC64, isTailCall, true, MemOpChains,
6622                          TailCallArguments, dl);
6623         ArgOffset += 16;
6624       }
6625       break;
6626     }
6627   }
6628   // If all Altivec parameters fit in registers, as they usually do,
6629   // they get stack space following the non-Altivec parameters.  We
6630   // don't track this here because nobody below needs it.
6631   // If there are more Altivec parameters than fit in registers emit
6632   // the stores here.
6633   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6634     unsigned j = 0;
6635     // Offset is aligned; skip 1st 12 params which go in V registers.
6636     ArgOffset = ((ArgOffset+15)/16)*16;
6637     ArgOffset += 12*16;
6638     for (unsigned i = 0; i != NumOps; ++i) {
6639       SDValue Arg = OutVals[i];
6640       EVT ArgType = Outs[i].VT;
6641       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6642           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6643         if (++j > NumVRs) {
6644           SDValue PtrOff;
6645           // We are emitting Altivec params in order.
6646           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6647                            isPPC64, isTailCall, true, MemOpChains,
6648                            TailCallArguments, dl);
6649           ArgOffset += 16;
6650         }
6651       }
6652     }
6653   }
6654 
6655   if (!MemOpChains.empty())
6656     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6657 
6658   // On Darwin, R12 must contain the address of an indirect callee.  This does
6659   // not mean the MTCTR instruction must use R12; it's easier to model this as
6660   // an extra parameter, so do that.
6661   if (!isTailCall &&
6662       !isFunctionGlobalAddress(Callee) &&
6663       !isa<ExternalSymbolSDNode>(Callee) &&
6664       !isBLACompatibleAddress(Callee, DAG))
6665     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6666                                                    PPC::R12), Callee));
6667 
6668   // Build a sequence of copy-to-reg nodes chained together with token chain
6669   // and flag operands which copy the outgoing args into the appropriate regs.
6670   SDValue InFlag;
6671   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6672     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6673                              RegsToPass[i].second, InFlag);
6674     InFlag = Chain.getValue(1);
6675   }
6676 
6677   if (isTailCall)
6678     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6679                     TailCallArguments);
6680 
6681   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6682                     /* unused except on PPC64 ELFv1 */ false, DAG,
6683                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6684                     NumBytes, Ins, InVals, CS);
6685 }
6686 
6687 
6688 SDValue PPCTargetLowering::LowerCall_AIX(
6689     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6690     bool isTailCall, bool isPatchPoint,
6691     const SmallVectorImpl<ISD::OutputArg> &Outs,
6692     const SmallVectorImpl<SDValue> &OutVals,
6693     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6694     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6695     ImmutableCallSite CS) const {
6696 
6697   assert((CallConv == CallingConv::C || CallConv == CallingConv::Fast) &&
6698          "Unimplemented calling convention!");
6699   if (isVarArg || isPatchPoint)
6700     report_fatal_error("This call type is unimplemented on AIX.");
6701 
6702   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6703   bool isPPC64 = PtrVT == MVT::i64;
6704   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6705   unsigned NumOps = Outs.size();
6706 
6707 
6708   // Count how many bytes are to be pushed on the stack, including the linkage
6709   // area, parameter list area.
6710   // On XCOFF, we start with 24/48, which is reserved space for
6711   // [SP][CR][LR][2 x reserved][TOC].
6712   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6713 
6714   // The prolog code of the callee may store up to 8 GPR argument registers to
6715   // the stack, allowing va_start to index over them in memory if the callee
6716   // is variadic.
6717   // Because we cannot tell if this is needed on the caller side, we have to
6718   // conservatively assume that it is needed.  As such, make sure we have at
6719   // least enough stack space for the caller to store the 8 GPRs.
6720   unsigned NumBytes = LinkageSize + 8 * PtrByteSize;
6721 
6722   // Adjust the stack pointer for the new arguments...
6723   // These operations are automatically eliminated by the prolog/epilog
6724   // inserter pass.
6725   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6726   SDValue CallSeqStart = Chain;
6727 
6728   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6729     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6730     PPC::R7, PPC::R8, PPC::R9, PPC::R10
6731   };
6732   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6733     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6734     PPC::X7, PPC::X8, PPC::X9, PPC::X10
6735   };
6736 
6737   const unsigned NumGPRs = isPPC64 ? array_lengthof(GPR_64)
6738                                    : array_lengthof(GPR_32);
6739   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6740   unsigned GPR_idx = 0;
6741 
6742   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6743 
6744   if (isTailCall)
6745     report_fatal_error("Handling of tail call is unimplemented!");
6746   int SPDiff = 0;
6747 
6748   for (unsigned i = 0; i != NumOps; ++i) {
6749     SDValue Arg = OutVals[i];
6750     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6751 
6752     // Promote integers if needed.
6753     if (Arg.getValueType() == MVT::i1 ||
6754         (isPPC64 && Arg.getValueType() == MVT::i32)) {
6755       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6756       Arg = DAG.getNode(ExtOp, dl, PtrVT, Arg);
6757     }
6758 
6759     // Note: "by value" is code for passing a structure by value, not
6760     // basic types.
6761     if (Flags.isByVal())
6762       report_fatal_error("Passing structure by value is unimplemented!");
6763 
6764     switch (Arg.getSimpleValueType().SimpleTy) {
6765     default: llvm_unreachable("Unexpected ValueType for argument!");
6766     case MVT::i1:
6767     case MVT::i32:
6768     case MVT::i64:
6769       if (GPR_idx != NumGPRs)
6770         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6771       else
6772         report_fatal_error("Handling of placing parameters on the stack is "
6773                            "unimplemented!");
6774       break;
6775     case MVT::f32:
6776     case MVT::f64:
6777     case MVT::v4f32:
6778     case MVT::v4i32:
6779     case MVT::v8i16:
6780     case MVT::v16i8:
6781     case MVT::v2f64:
6782     case MVT::v2i64:
6783     case MVT::v1i128:
6784     case MVT::f128:
6785     case MVT::v4f64:
6786     case MVT::v4i1:
6787       report_fatal_error("Handling of this parameter type is unimplemented!");
6788     }
6789   }
6790 
6791   if (!isFunctionGlobalAddress(Callee) &&
6792       !isa<ExternalSymbolSDNode>(Callee))
6793     report_fatal_error("Handling of indirect call is unimplemented!");
6794 
6795   // Build a sequence of copy-to-reg nodes chained together with token chain
6796   // and flag operands which copy the outgoing args into the appropriate regs.
6797   SDValue InFlag;
6798   for (auto Reg : RegsToPass) {
6799     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
6800     InFlag = Chain.getValue(1);
6801   }
6802 
6803   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6804                     /* unused except on PPC64 ELFv1 */ false, DAG,
6805                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6806                     NumBytes, Ins, InVals, CS);
6807 }
6808 
6809 bool
6810 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
6811                                   MachineFunction &MF, bool isVarArg,
6812                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
6813                                   LLVMContext &Context) const {
6814   SmallVector<CCValAssign, 16> RVLocs;
6815   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6816   return CCInfo.CheckReturn(
6817       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6818                 ? RetCC_PPC_Cold
6819                 : RetCC_PPC);
6820 }
6821 
6822 SDValue
6823 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6824                                bool isVarArg,
6825                                const SmallVectorImpl<ISD::OutputArg> &Outs,
6826                                const SmallVectorImpl<SDValue> &OutVals,
6827                                const SDLoc &dl, SelectionDAG &DAG) const {
6828   SmallVector<CCValAssign, 16> RVLocs;
6829   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6830                  *DAG.getContext());
6831   CCInfo.AnalyzeReturn(Outs,
6832                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6833                            ? RetCC_PPC_Cold
6834                            : RetCC_PPC);
6835 
6836   SDValue Flag;
6837   SmallVector<SDValue, 4> RetOps(1, Chain);
6838 
6839   // Copy the result values into the output registers.
6840   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
6841     CCValAssign &VA = RVLocs[i];
6842     assert(VA.isRegLoc() && "Can only return in registers!");
6843 
6844     SDValue Arg = OutVals[RealResIdx];
6845 
6846     switch (VA.getLocInfo()) {
6847     default: llvm_unreachable("Unknown loc info!");
6848     case CCValAssign::Full: break;
6849     case CCValAssign::AExt:
6850       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
6851       break;
6852     case CCValAssign::ZExt:
6853       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6854       break;
6855     case CCValAssign::SExt:
6856       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6857       break;
6858     }
6859     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
6860       bool isLittleEndian = Subtarget.isLittleEndian();
6861       // Legalize ret f64 -> ret 2 x i32.
6862       SDValue SVal =
6863           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
6864                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
6865       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
6866       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6867       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
6868                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
6869       Flag = Chain.getValue(1);
6870       VA = RVLocs[++i]; // skip ahead to next loc
6871       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
6872     } else
6873       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
6874     Flag = Chain.getValue(1);
6875     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6876   }
6877 
6878   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
6879   const MCPhysReg *I =
6880     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
6881   if (I) {
6882     for (; *I; ++I) {
6883 
6884       if (PPC::G8RCRegClass.contains(*I))
6885         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
6886       else if (PPC::F8RCRegClass.contains(*I))
6887         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
6888       else if (PPC::CRRCRegClass.contains(*I))
6889         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
6890       else if (PPC::VRRCRegClass.contains(*I))
6891         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
6892       else
6893         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
6894     }
6895   }
6896 
6897   RetOps[0] = Chain;  // Update chain.
6898 
6899   // Add the flag if we have it.
6900   if (Flag.getNode())
6901     RetOps.push_back(Flag);
6902 
6903   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
6904 }
6905 
6906 SDValue
6907 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
6908                                                 SelectionDAG &DAG) const {
6909   SDLoc dl(Op);
6910 
6911   // Get the correct type for integers.
6912   EVT IntVT = Op.getValueType();
6913 
6914   // Get the inputs.
6915   SDValue Chain = Op.getOperand(0);
6916   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6917   // Build a DYNAREAOFFSET node.
6918   SDValue Ops[2] = {Chain, FPSIdx};
6919   SDVTList VTs = DAG.getVTList(IntVT);
6920   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
6921 }
6922 
6923 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
6924                                              SelectionDAG &DAG) const {
6925   // When we pop the dynamic allocation we need to restore the SP link.
6926   SDLoc dl(Op);
6927 
6928   // Get the correct type for pointers.
6929   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6930 
6931   // Construct the stack pointer operand.
6932   bool isPPC64 = Subtarget.isPPC64();
6933   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6934   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
6935 
6936   // Get the operands for the STACKRESTORE.
6937   SDValue Chain = Op.getOperand(0);
6938   SDValue SaveSP = Op.getOperand(1);
6939 
6940   // Load the old link SP.
6941   SDValue LoadLinkSP =
6942       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
6943 
6944   // Restore the stack pointer.
6945   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
6946 
6947   // Store the old link SP.
6948   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
6949 }
6950 
6951 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
6952   MachineFunction &MF = DAG.getMachineFunction();
6953   bool isPPC64 = Subtarget.isPPC64();
6954   EVT PtrVT = getPointerTy(MF.getDataLayout());
6955 
6956   // Get current frame pointer save index.  The users of this index will be
6957   // primarily DYNALLOC instructions.
6958   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6959   int RASI = FI->getReturnAddrSaveIndex();
6960 
6961   // If the frame pointer save index hasn't been defined yet.
6962   if (!RASI) {
6963     // Find out what the fix offset of the frame pointer save area.
6964     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6965     // Allocate the frame index for frame pointer save area.
6966     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
6967     // Save the result.
6968     FI->setReturnAddrSaveIndex(RASI);
6969   }
6970   return DAG.getFrameIndex(RASI, PtrVT);
6971 }
6972 
6973 SDValue
6974 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
6975   MachineFunction &MF = DAG.getMachineFunction();
6976   bool isPPC64 = Subtarget.isPPC64();
6977   EVT PtrVT = getPointerTy(MF.getDataLayout());
6978 
6979   // Get current frame pointer save index.  The users of this index will be
6980   // primarily DYNALLOC instructions.
6981   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6982   int FPSI = FI->getFramePointerSaveIndex();
6983 
6984   // If the frame pointer save index hasn't been defined yet.
6985   if (!FPSI) {
6986     // Find out what the fix offset of the frame pointer save area.
6987     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6988     // Allocate the frame index for frame pointer save area.
6989     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
6990     // Save the result.
6991     FI->setFramePointerSaveIndex(FPSI);
6992   }
6993   return DAG.getFrameIndex(FPSI, PtrVT);
6994 }
6995 
6996 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
6997                                                    SelectionDAG &DAG) const {
6998   // Get the inputs.
6999   SDValue Chain = Op.getOperand(0);
7000   SDValue Size  = Op.getOperand(1);
7001   SDLoc dl(Op);
7002 
7003   // Get the correct type for pointers.
7004   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7005   // Negate the size.
7006   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7007                                 DAG.getConstant(0, dl, PtrVT), Size);
7008   // Construct a node for the frame pointer save index.
7009   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7010   // Build a DYNALLOC node.
7011   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7012   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7013   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7014 }
7015 
7016 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7017                                                      SelectionDAG &DAG) const {
7018   MachineFunction &MF = DAG.getMachineFunction();
7019 
7020   bool isPPC64 = Subtarget.isPPC64();
7021   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7022 
7023   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7024   return DAG.getFrameIndex(FI, PtrVT);
7025 }
7026 
7027 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7028                                                SelectionDAG &DAG) const {
7029   SDLoc DL(Op);
7030   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7031                      DAG.getVTList(MVT::i32, MVT::Other),
7032                      Op.getOperand(0), Op.getOperand(1));
7033 }
7034 
7035 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7036                                                 SelectionDAG &DAG) const {
7037   SDLoc DL(Op);
7038   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7039                      Op.getOperand(0), Op.getOperand(1));
7040 }
7041 
7042 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7043   if (Op.getValueType().isVector())
7044     return LowerVectorLoad(Op, DAG);
7045 
7046   assert(Op.getValueType() == MVT::i1 &&
7047          "Custom lowering only for i1 loads");
7048 
7049   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7050 
7051   SDLoc dl(Op);
7052   LoadSDNode *LD = cast<LoadSDNode>(Op);
7053 
7054   SDValue Chain = LD->getChain();
7055   SDValue BasePtr = LD->getBasePtr();
7056   MachineMemOperand *MMO = LD->getMemOperand();
7057 
7058   SDValue NewLD =
7059       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7060                      BasePtr, MVT::i8, MMO);
7061   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7062 
7063   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7064   return DAG.getMergeValues(Ops, dl);
7065 }
7066 
7067 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7068   if (Op.getOperand(1).getValueType().isVector())
7069     return LowerVectorStore(Op, DAG);
7070 
7071   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7072          "Custom lowering only for i1 stores");
7073 
7074   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7075 
7076   SDLoc dl(Op);
7077   StoreSDNode *ST = cast<StoreSDNode>(Op);
7078 
7079   SDValue Chain = ST->getChain();
7080   SDValue BasePtr = ST->getBasePtr();
7081   SDValue Value = ST->getValue();
7082   MachineMemOperand *MMO = ST->getMemOperand();
7083 
7084   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7085                       Value);
7086   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7087 }
7088 
7089 // FIXME: Remove this once the ANDI glue bug is fixed:
7090 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7091   assert(Op.getValueType() == MVT::i1 &&
7092          "Custom lowering only for i1 results");
7093 
7094   SDLoc DL(Op);
7095   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
7096                      Op.getOperand(0));
7097 }
7098 
7099 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7100                                                SelectionDAG &DAG) const {
7101 
7102   // Implements a vector truncate that fits in a vector register as a shuffle.
7103   // We want to legalize vector truncates down to where the source fits in
7104   // a vector register (and target is therefore smaller than vector register
7105   // size).  At that point legalization will try to custom lower the sub-legal
7106   // result and get here - where we can contain the truncate as a single target
7107   // operation.
7108 
7109   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7110   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7111   //
7112   // We will implement it for big-endian ordering as this (where x denotes
7113   // undefined):
7114   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7115   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7116   //
7117   // The same operation in little-endian ordering will be:
7118   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7119   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7120 
7121   assert(Op.getValueType().isVector() && "Vector type expected.");
7122 
7123   SDLoc DL(Op);
7124   SDValue N1 = Op.getOperand(0);
7125   unsigned SrcSize = N1.getValueType().getSizeInBits();
7126   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
7127   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7128 
7129   EVT TrgVT = Op.getValueType();
7130   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7131   EVT EltVT = TrgVT.getVectorElementType();
7132   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7133   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7134 
7135   // First list the elements we want to keep.
7136   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7137   SmallVector<int, 16> ShuffV;
7138   if (Subtarget.isLittleEndian())
7139     for (unsigned i = 0; i < TrgNumElts; ++i)
7140       ShuffV.push_back(i * SizeMult);
7141   else
7142     for (unsigned i = 1; i <= TrgNumElts; ++i)
7143       ShuffV.push_back(i * SizeMult - 1);
7144 
7145   // Populate the remaining elements with undefs.
7146   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7147     // ShuffV.push_back(i + WideNumElts);
7148     ShuffV.push_back(WideNumElts + 1);
7149 
7150   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
7151   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
7152 }
7153 
7154 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7155 /// possible.
7156 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7157   // Not FP? Not a fsel.
7158   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7159       !Op.getOperand(2).getValueType().isFloatingPoint())
7160     return Op;
7161 
7162   // We might be able to do better than this under some circumstances, but in
7163   // general, fsel-based lowering of select is a finite-math-only optimization.
7164   // For more information, see section F.3 of the 2.06 ISA specification.
7165   if (!DAG.getTarget().Options.NoInfsFPMath ||
7166       !DAG.getTarget().Options.NoNaNsFPMath)
7167     return Op;
7168   // TODO: Propagate flags from the select rather than global settings.
7169   SDNodeFlags Flags;
7170   Flags.setNoInfs(true);
7171   Flags.setNoNaNs(true);
7172 
7173   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7174 
7175   EVT ResVT = Op.getValueType();
7176   EVT CmpVT = Op.getOperand(0).getValueType();
7177   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7178   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7179   SDLoc dl(Op);
7180 
7181   // If the RHS of the comparison is a 0.0, we don't need to do the
7182   // subtraction at all.
7183   SDValue Sel1;
7184   if (isFloatingPointZero(RHS))
7185     switch (CC) {
7186     default: break;       // SETUO etc aren't handled by fsel.
7187     case ISD::SETNE:
7188       std::swap(TV, FV);
7189       LLVM_FALLTHROUGH;
7190     case ISD::SETEQ:
7191       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7192         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7193       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7194       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7195         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7196       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7197                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7198     case ISD::SETULT:
7199     case ISD::SETLT:
7200       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7201       LLVM_FALLTHROUGH;
7202     case ISD::SETOGE:
7203     case ISD::SETGE:
7204       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7205         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7206       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7207     case ISD::SETUGT:
7208     case ISD::SETGT:
7209       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7210       LLVM_FALLTHROUGH;
7211     case ISD::SETOLE:
7212     case ISD::SETLE:
7213       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7214         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7215       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7216                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7217     }
7218 
7219   SDValue Cmp;
7220   switch (CC) {
7221   default: break;       // SETUO etc aren't handled by fsel.
7222   case ISD::SETNE:
7223     std::swap(TV, FV);
7224     LLVM_FALLTHROUGH;
7225   case ISD::SETEQ:
7226     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7227     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7228       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7229     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7230     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7231       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7232     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7233                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7234   case ISD::SETULT:
7235   case ISD::SETLT:
7236     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7237     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7238       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7239     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7240   case ISD::SETOGE:
7241   case ISD::SETGE:
7242     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7243     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7244       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7245     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7246   case ISD::SETUGT:
7247   case ISD::SETGT:
7248     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7249     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7250       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7251     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7252   case ISD::SETOLE:
7253   case ISD::SETLE:
7254     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7255     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7256       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7257     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7258   }
7259   return Op;
7260 }
7261 
7262 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7263                                                SelectionDAG &DAG,
7264                                                const SDLoc &dl) const {
7265   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7266   SDValue Src = Op.getOperand(0);
7267   if (Src.getValueType() == MVT::f32)
7268     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7269 
7270   SDValue Tmp;
7271   switch (Op.getSimpleValueType().SimpleTy) {
7272   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7273   case MVT::i32:
7274     Tmp = DAG.getNode(
7275         Op.getOpcode() == ISD::FP_TO_SINT
7276             ? PPCISD::FCTIWZ
7277             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7278         dl, MVT::f64, Src);
7279     break;
7280   case MVT::i64:
7281     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7282            "i64 FP_TO_UINT is supported only with FPCVT");
7283     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7284                                                         PPCISD::FCTIDUZ,
7285                       dl, MVT::f64, Src);
7286     break;
7287   }
7288 
7289   // Convert the FP value to an int value through memory.
7290   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7291     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7292   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7293   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7294   MachinePointerInfo MPI =
7295       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7296 
7297   // Emit a store to the stack slot.
7298   SDValue Chain;
7299   if (i32Stack) {
7300     MachineFunction &MF = DAG.getMachineFunction();
7301     MachineMemOperand *MMO =
7302       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
7303     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7304     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7305               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7306   } else
7307     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
7308 
7309   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7310   // add in a bias on big endian.
7311   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7312     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7313                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7314     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7315   }
7316 
7317   RLI.Chain = Chain;
7318   RLI.Ptr = FIPtr;
7319   RLI.MPI = MPI;
7320 }
7321 
7322 /// Custom lowers floating point to integer conversions to use
7323 /// the direct move instructions available in ISA 2.07 to avoid the
7324 /// need for load/store combinations.
7325 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7326                                                     SelectionDAG &DAG,
7327                                                     const SDLoc &dl) const {
7328   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7329   SDValue Src = Op.getOperand(0);
7330 
7331   if (Src.getValueType() == MVT::f32)
7332     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7333 
7334   SDValue Tmp;
7335   switch (Op.getSimpleValueType().SimpleTy) {
7336   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7337   case MVT::i32:
7338     Tmp = DAG.getNode(
7339         Op.getOpcode() == ISD::FP_TO_SINT
7340             ? PPCISD::FCTIWZ
7341             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7342         dl, MVT::f64, Src);
7343     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7344     break;
7345   case MVT::i64:
7346     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7347            "i64 FP_TO_UINT is supported only with FPCVT");
7348     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7349                                                         PPCISD::FCTIDUZ,
7350                       dl, MVT::f64, Src);
7351     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7352     break;
7353   }
7354   return Tmp;
7355 }
7356 
7357 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7358                                           const SDLoc &dl) const {
7359 
7360   // FP to INT conversions are legal for f128.
7361   if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7362     return Op;
7363 
7364   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7365   // PPC (the libcall is not available).
7366   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7367     if (Op.getValueType() == MVT::i32) {
7368       if (Op.getOpcode() == ISD::FP_TO_SINT) {
7369         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7370                                  MVT::f64, Op.getOperand(0),
7371                                  DAG.getIntPtrConstant(0, dl));
7372         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7373                                  MVT::f64, Op.getOperand(0),
7374                                  DAG.getIntPtrConstant(1, dl));
7375 
7376         // Add the two halves of the long double in round-to-zero mode.
7377         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7378 
7379         // Now use a smaller FP_TO_SINT.
7380         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7381       }
7382       if (Op.getOpcode() == ISD::FP_TO_UINT) {
7383         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7384         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7385         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
7386         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7387         // FIXME: generated code sucks.
7388         // TODO: Are there fast-math-flags to propagate to this FSUB?
7389         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
7390                                    Op.getOperand(0), Tmp);
7391         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7392         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
7393                            DAG.getConstant(0x80000000, dl, MVT::i32));
7394         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
7395                                     Op.getOperand(0));
7396         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
7397                                ISD::SETGE);
7398       }
7399     }
7400 
7401     return SDValue();
7402   }
7403 
7404   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7405     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7406 
7407   ReuseLoadInfo RLI;
7408   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7409 
7410   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7411                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7412 }
7413 
7414 // We're trying to insert a regular store, S, and then a load, L. If the
7415 // incoming value, O, is a load, we might just be able to have our load use the
7416 // address used by O. However, we don't know if anything else will store to
7417 // that address before we can load from it. To prevent this situation, we need
7418 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7419 // the same chain operand as O, we create a token factor from the chain results
7420 // of O and L, and we replace all uses of O's chain result with that token
7421 // factor (see spliceIntoChain below for this last part).
7422 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7423                                             ReuseLoadInfo &RLI,
7424                                             SelectionDAG &DAG,
7425                                             ISD::LoadExtType ET) const {
7426   SDLoc dl(Op);
7427   if (ET == ISD::NON_EXTLOAD &&
7428       (Op.getOpcode() == ISD::FP_TO_UINT ||
7429        Op.getOpcode() == ISD::FP_TO_SINT) &&
7430       isOperationLegalOrCustom(Op.getOpcode(),
7431                                Op.getOperand(0).getValueType())) {
7432 
7433     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7434     return true;
7435   }
7436 
7437   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7438   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7439       LD->isNonTemporal())
7440     return false;
7441   if (LD->getMemoryVT() != MemVT)
7442     return false;
7443 
7444   RLI.Ptr = LD->getBasePtr();
7445   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7446     assert(LD->getAddressingMode() == ISD::PRE_INC &&
7447            "Non-pre-inc AM on PPC?");
7448     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7449                           LD->getOffset());
7450   }
7451 
7452   RLI.Chain = LD->getChain();
7453   RLI.MPI = LD->getPointerInfo();
7454   RLI.IsDereferenceable = LD->isDereferenceable();
7455   RLI.IsInvariant = LD->isInvariant();
7456   RLI.Alignment = LD->getAlignment();
7457   RLI.AAInfo = LD->getAAInfo();
7458   RLI.Ranges = LD->getRanges();
7459 
7460   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7461   return true;
7462 }
7463 
7464 // Given the head of the old chain, ResChain, insert a token factor containing
7465 // it and NewResChain, and make users of ResChain now be users of that token
7466 // factor.
7467 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7468 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7469                                         SDValue NewResChain,
7470                                         SelectionDAG &DAG) const {
7471   if (!ResChain)
7472     return;
7473 
7474   SDLoc dl(NewResChain);
7475 
7476   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7477                            NewResChain, DAG.getUNDEF(MVT::Other));
7478   assert(TF.getNode() != NewResChain.getNode() &&
7479          "A new TF really is required here");
7480 
7481   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7482   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7483 }
7484 
7485 /// Analyze profitability of direct move
7486 /// prefer float load to int load plus direct move
7487 /// when there is no integer use of int load
7488 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7489   SDNode *Origin = Op.getOperand(0).getNode();
7490   if (Origin->getOpcode() != ISD::LOAD)
7491     return true;
7492 
7493   // If there is no LXSIBZX/LXSIHZX, like Power8,
7494   // prefer direct move if the memory size is 1 or 2 bytes.
7495   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7496   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7497     return true;
7498 
7499   for (SDNode::use_iterator UI = Origin->use_begin(),
7500                             UE = Origin->use_end();
7501        UI != UE; ++UI) {
7502 
7503     // Only look at the users of the loaded value.
7504     if (UI.getUse().get().getResNo() != 0)
7505       continue;
7506 
7507     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7508         UI->getOpcode() != ISD::UINT_TO_FP)
7509       return true;
7510   }
7511 
7512   return false;
7513 }
7514 
7515 /// Custom lowers integer to floating point conversions to use
7516 /// the direct move instructions available in ISA 2.07 to avoid the
7517 /// need for load/store combinations.
7518 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7519                                                     SelectionDAG &DAG,
7520                                                     const SDLoc &dl) const {
7521   assert((Op.getValueType() == MVT::f32 ||
7522           Op.getValueType() == MVT::f64) &&
7523          "Invalid floating point type as target of conversion");
7524   assert(Subtarget.hasFPCVT() &&
7525          "Int to FP conversions with direct moves require FPCVT");
7526   SDValue FP;
7527   SDValue Src = Op.getOperand(0);
7528   bool SinglePrec = Op.getValueType() == MVT::f32;
7529   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7530   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7531   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7532                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7533 
7534   if (WordInt) {
7535     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7536                      dl, MVT::f64, Src);
7537     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7538   }
7539   else {
7540     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7541     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7542   }
7543 
7544   return FP;
7545 }
7546 
7547 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
7548 
7549   EVT VecVT = Vec.getValueType();
7550   assert(VecVT.isVector() && "Expected a vector type.");
7551   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
7552 
7553   EVT EltVT = VecVT.getVectorElementType();
7554   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7555   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7556 
7557   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
7558   SmallVector<SDValue, 16> Ops(NumConcat);
7559   Ops[0] = Vec;
7560   SDValue UndefVec = DAG.getUNDEF(VecVT);
7561   for (unsigned i = 1; i < NumConcat; ++i)
7562     Ops[i] = UndefVec;
7563 
7564   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
7565 }
7566 
7567 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
7568                                                 const SDLoc &dl) const {
7569 
7570   unsigned Opc = Op.getOpcode();
7571   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
7572          "Unexpected conversion type");
7573   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
7574          "Supports conversions to v2f64/v4f32 only.");
7575 
7576   bool SignedConv = Opc == ISD::SINT_TO_FP;
7577   bool FourEltRes = Op.getValueType() == MVT::v4f32;
7578 
7579   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
7580   EVT WideVT = Wide.getValueType();
7581   unsigned WideNumElts = WideVT.getVectorNumElements();
7582   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
7583 
7584   SmallVector<int, 16> ShuffV;
7585   for (unsigned i = 0; i < WideNumElts; ++i)
7586     ShuffV.push_back(i + WideNumElts);
7587 
7588   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
7589   int SaveElts = FourEltRes ? 4 : 2;
7590   if (Subtarget.isLittleEndian())
7591     for (int i = 0; i < SaveElts; i++)
7592       ShuffV[i * Stride] = i;
7593   else
7594     for (int i = 1; i <= SaveElts; i++)
7595       ShuffV[i * Stride - 1] = i - 1;
7596 
7597   SDValue ShuffleSrc2 =
7598       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
7599   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
7600   unsigned ExtendOp =
7601       SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST;
7602 
7603   SDValue Extend;
7604   if (!Subtarget.hasP9Altivec() && SignedConv) {
7605     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
7606     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
7607                          DAG.getValueType(Op.getOperand(0).getValueType()));
7608   } else
7609     Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange);
7610 
7611   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
7612 }
7613 
7614 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7615                                           SelectionDAG &DAG) const {
7616   SDLoc dl(Op);
7617 
7618   EVT InVT = Op.getOperand(0).getValueType();
7619   EVT OutVT = Op.getValueType();
7620   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
7621       isOperationCustom(Op.getOpcode(), InVT))
7622     return LowerINT_TO_FPVector(Op, DAG, dl);
7623 
7624   // Conversions to f128 are legal.
7625   if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
7626     return Op;
7627 
7628   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
7629     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
7630       return SDValue();
7631 
7632     SDValue Value = Op.getOperand(0);
7633     // The values are now known to be -1 (false) or 1 (true). To convert this
7634     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
7635     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
7636     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
7637 
7638     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
7639 
7640     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7641 
7642     if (Op.getValueType() != MVT::v4f64)
7643       Value = DAG.getNode(ISD::FP_ROUND, dl,
7644                           Op.getValueType(), Value,
7645                           DAG.getIntPtrConstant(1, dl));
7646     return Value;
7647   }
7648 
7649   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7650   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7651     return SDValue();
7652 
7653   if (Op.getOperand(0).getValueType() == MVT::i1)
7654     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
7655                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
7656                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
7657 
7658   // If we have direct moves, we can do all the conversion, skip the store/load
7659   // however, without FPCVT we can't do most conversions.
7660   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
7661       Subtarget.isPPC64() && Subtarget.hasFPCVT())
7662     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
7663 
7664   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
7665          "UINT_TO_FP is supported only with FPCVT");
7666 
7667   // If we have FCFIDS, then use it when converting to single-precision.
7668   // Otherwise, convert to double-precision and then round.
7669   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7670                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
7671                                                             : PPCISD::FCFIDS)
7672                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
7673                                                             : PPCISD::FCFID);
7674   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7675                   ? MVT::f32
7676                   : MVT::f64;
7677 
7678   if (Op.getOperand(0).getValueType() == MVT::i64) {
7679     SDValue SINT = Op.getOperand(0);
7680     // When converting to single-precision, we actually need to convert
7681     // to double-precision first and then round to single-precision.
7682     // To avoid double-rounding effects during that operation, we have
7683     // to prepare the input operand.  Bits that might be truncated when
7684     // converting to double-precision are replaced by a bit that won't
7685     // be lost at this stage, but is below the single-precision rounding
7686     // position.
7687     //
7688     // However, if -enable-unsafe-fp-math is in effect, accept double
7689     // rounding to avoid the extra overhead.
7690     if (Op.getValueType() == MVT::f32 &&
7691         !Subtarget.hasFPCVT() &&
7692         !DAG.getTarget().Options.UnsafeFPMath) {
7693 
7694       // Twiddle input to make sure the low 11 bits are zero.  (If this
7695       // is the case, we are guaranteed the value will fit into the 53 bit
7696       // mantissa of an IEEE double-precision value without rounding.)
7697       // If any of those low 11 bits were not zero originally, make sure
7698       // bit 12 (value 2048) is set instead, so that the final rounding
7699       // to single-precision gets the correct result.
7700       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7701                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
7702       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
7703                           Round, DAG.getConstant(2047, dl, MVT::i64));
7704       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
7705       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7706                           Round, DAG.getConstant(-2048, dl, MVT::i64));
7707 
7708       // However, we cannot use that value unconditionally: if the magnitude
7709       // of the input value is small, the bit-twiddling we did above might
7710       // end up visibly changing the output.  Fortunately, in that case, we
7711       // don't need to twiddle bits since the original input will convert
7712       // exactly to double-precision floating-point already.  Therefore,
7713       // construct a conditional to use the original value if the top 11
7714       // bits are all sign-bit copies, and use the rounded value computed
7715       // above otherwise.
7716       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
7717                                  SINT, DAG.getConstant(53, dl, MVT::i32));
7718       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
7719                          Cond, DAG.getConstant(1, dl, MVT::i64));
7720       Cond = DAG.getSetCC(dl, MVT::i32,
7721                           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
7722 
7723       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
7724     }
7725 
7726     ReuseLoadInfo RLI;
7727     SDValue Bits;
7728 
7729     MachineFunction &MF = DAG.getMachineFunction();
7730     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
7731       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7732                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7733       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7734     } else if (Subtarget.hasLFIWAX() &&
7735                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
7736       MachineMemOperand *MMO =
7737         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7738                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7739       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7740       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
7741                                      DAG.getVTList(MVT::f64, MVT::Other),
7742                                      Ops, MVT::i32, MMO);
7743       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7744     } else if (Subtarget.hasFPCVT() &&
7745                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
7746       MachineMemOperand *MMO =
7747         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7748                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7749       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7750       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
7751                                      DAG.getVTList(MVT::f64, MVT::Other),
7752                                      Ops, MVT::i32, MMO);
7753       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7754     } else if (((Subtarget.hasLFIWAX() &&
7755                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
7756                 (Subtarget.hasFPCVT() &&
7757                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
7758                SINT.getOperand(0).getValueType() == MVT::i32) {
7759       MachineFrameInfo &MFI = MF.getFrameInfo();
7760       EVT PtrVT = getPointerTy(DAG.getDataLayout());
7761 
7762       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7763       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7764 
7765       SDValue Store =
7766           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
7767                        MachinePointerInfo::getFixedStack(
7768                            DAG.getMachineFunction(), FrameIdx));
7769 
7770       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7771              "Expected an i32 store");
7772 
7773       RLI.Ptr = FIdx;
7774       RLI.Chain = Store;
7775       RLI.MPI =
7776           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7777       RLI.Alignment = 4;
7778 
7779       MachineMemOperand *MMO =
7780         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7781                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7782       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7783       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
7784                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
7785                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
7786                                      Ops, MVT::i32, MMO);
7787     } else
7788       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
7789 
7790     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
7791 
7792     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7793       FP = DAG.getNode(ISD::FP_ROUND, dl,
7794                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
7795     return FP;
7796   }
7797 
7798   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
7799          "Unhandled INT_TO_FP type in custom expander!");
7800   // Since we only generate this in 64-bit mode, we can take advantage of
7801   // 64-bit registers.  In particular, sign extend the input value into the
7802   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
7803   // then lfd it and fcfid it.
7804   MachineFunction &MF = DAG.getMachineFunction();
7805   MachineFrameInfo &MFI = MF.getFrameInfo();
7806   EVT PtrVT = getPointerTy(MF.getDataLayout());
7807 
7808   SDValue Ld;
7809   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
7810     ReuseLoadInfo RLI;
7811     bool ReusingLoad;
7812     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
7813                                             DAG))) {
7814       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7815       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7816 
7817       SDValue Store =
7818           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
7819                        MachinePointerInfo::getFixedStack(
7820                            DAG.getMachineFunction(), FrameIdx));
7821 
7822       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7823              "Expected an i32 store");
7824 
7825       RLI.Ptr = FIdx;
7826       RLI.Chain = Store;
7827       RLI.MPI =
7828           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7829       RLI.Alignment = 4;
7830     }
7831 
7832     MachineMemOperand *MMO =
7833       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7834                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7835     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7836     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
7837                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
7838                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
7839                                  Ops, MVT::i32, MMO);
7840     if (ReusingLoad)
7841       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
7842   } else {
7843     assert(Subtarget.isPPC64() &&
7844            "i32->FP without LFIWAX supported only on PPC64");
7845 
7846     int FrameIdx = MFI.CreateStackObject(8, 8, false);
7847     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7848 
7849     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
7850                                 Op.getOperand(0));
7851 
7852     // STD the extended value into the stack slot.
7853     SDValue Store = DAG.getStore(
7854         DAG.getEntryNode(), dl, Ext64, FIdx,
7855         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7856 
7857     // Load the value as a double.
7858     Ld = DAG.getLoad(
7859         MVT::f64, dl, Store, FIdx,
7860         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7861   }
7862 
7863   // FCFID it and return it.
7864   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
7865   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7866     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
7867                      DAG.getIntPtrConstant(0, dl));
7868   return FP;
7869 }
7870 
7871 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
7872                                             SelectionDAG &DAG) const {
7873   SDLoc dl(Op);
7874   /*
7875    The rounding mode is in bits 30:31 of FPSR, and has the following
7876    settings:
7877      00 Round to nearest
7878      01 Round to 0
7879      10 Round to +inf
7880      11 Round to -inf
7881 
7882   FLT_ROUNDS, on the other hand, expects the following:
7883     -1 Undefined
7884      0 Round to 0
7885      1 Round to nearest
7886      2 Round to +inf
7887      3 Round to -inf
7888 
7889   To perform the conversion, we do:
7890     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
7891   */
7892 
7893   MachineFunction &MF = DAG.getMachineFunction();
7894   EVT VT = Op.getValueType();
7895   EVT PtrVT = getPointerTy(MF.getDataLayout());
7896 
7897   // Save FP Control Word to register
7898   EVT NodeTys[] = {
7899     MVT::f64,    // return register
7900     MVT::Glue    // unused in this context
7901   };
7902   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
7903 
7904   // Save FP register to stack slot
7905   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
7906   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
7907   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
7908                                MachinePointerInfo());
7909 
7910   // Load FP Control Word from low 32 bits of stack slot.
7911   SDValue Four = DAG.getConstant(4, dl, PtrVT);
7912   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
7913   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
7914 
7915   // Transform as necessary
7916   SDValue CWD1 =
7917     DAG.getNode(ISD::AND, dl, MVT::i32,
7918                 CWD, DAG.getConstant(3, dl, MVT::i32));
7919   SDValue CWD2 =
7920     DAG.getNode(ISD::SRL, dl, MVT::i32,
7921                 DAG.getNode(ISD::AND, dl, MVT::i32,
7922                             DAG.getNode(ISD::XOR, dl, MVT::i32,
7923                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
7924                             DAG.getConstant(3, dl, MVT::i32)),
7925                 DAG.getConstant(1, dl, MVT::i32));
7926 
7927   SDValue RetVal =
7928     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
7929 
7930   return DAG.getNode((VT.getSizeInBits() < 16 ?
7931                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
7932 }
7933 
7934 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7935   EVT VT = Op.getValueType();
7936   unsigned BitWidth = VT.getSizeInBits();
7937   SDLoc dl(Op);
7938   assert(Op.getNumOperands() == 3 &&
7939          VT == Op.getOperand(1).getValueType() &&
7940          "Unexpected SHL!");
7941 
7942   // Expand into a bunch of logical ops.  Note that these ops
7943   // depend on the PPC behavior for oversized shift amounts.
7944   SDValue Lo = Op.getOperand(0);
7945   SDValue Hi = Op.getOperand(1);
7946   SDValue Amt = Op.getOperand(2);
7947   EVT AmtVT = Amt.getValueType();
7948 
7949   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7950                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7951   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
7952   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
7953   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
7954   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7955                              DAG.getConstant(-BitWidth, dl, AmtVT));
7956   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
7957   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7958   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
7959   SDValue OutOps[] = { OutLo, OutHi };
7960   return DAG.getMergeValues(OutOps, dl);
7961 }
7962 
7963 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7964   EVT VT = Op.getValueType();
7965   SDLoc dl(Op);
7966   unsigned BitWidth = VT.getSizeInBits();
7967   assert(Op.getNumOperands() == 3 &&
7968          VT == Op.getOperand(1).getValueType() &&
7969          "Unexpected SRL!");
7970 
7971   // Expand into a bunch of logical ops.  Note that these ops
7972   // depend on the PPC behavior for oversized shift amounts.
7973   SDValue Lo = Op.getOperand(0);
7974   SDValue Hi = Op.getOperand(1);
7975   SDValue Amt = Op.getOperand(2);
7976   EVT AmtVT = Amt.getValueType();
7977 
7978   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7979                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7980   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7981   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7982   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7983   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7984                              DAG.getConstant(-BitWidth, dl, AmtVT));
7985   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
7986   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7987   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
7988   SDValue OutOps[] = { OutLo, OutHi };
7989   return DAG.getMergeValues(OutOps, dl);
7990 }
7991 
7992 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
7993   SDLoc dl(Op);
7994   EVT VT = Op.getValueType();
7995   unsigned BitWidth = VT.getSizeInBits();
7996   assert(Op.getNumOperands() == 3 &&
7997          VT == Op.getOperand(1).getValueType() &&
7998          "Unexpected SRA!");
7999 
8000   // Expand into a bunch of logical ops, followed by a select_cc.
8001   SDValue Lo = Op.getOperand(0);
8002   SDValue Hi = Op.getOperand(1);
8003   SDValue Amt = Op.getOperand(2);
8004   EVT AmtVT = Amt.getValueType();
8005 
8006   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8007                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8008   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8009   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8010   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8011   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8012                              DAG.getConstant(-BitWidth, dl, AmtVT));
8013   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8014   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8015   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8016                                   Tmp4, Tmp6, ISD::SETLE);
8017   SDValue OutOps[] = { OutLo, OutHi };
8018   return DAG.getMergeValues(OutOps, dl);
8019 }
8020 
8021 //===----------------------------------------------------------------------===//
8022 // Vector related lowering.
8023 //
8024 
8025 /// BuildSplatI - Build a canonical splati of Val with an element size of
8026 /// SplatSize.  Cast the result to VT.
8027 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
8028                            SelectionDAG &DAG, const SDLoc &dl) {
8029   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
8030 
8031   static const MVT VTys[] = { // canonical VT to use for each size.
8032     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8033   };
8034 
8035   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8036 
8037   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
8038   if (Val == -1)
8039     SplatSize = 1;
8040 
8041   EVT CanonicalVT = VTys[SplatSize-1];
8042 
8043   // Build a canonical splat for this value.
8044   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8045 }
8046 
8047 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8048 /// specified intrinsic ID.
8049 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8050                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8051   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8052   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8053                      DAG.getConstant(IID, dl, MVT::i32), Op);
8054 }
8055 
8056 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8057 /// specified intrinsic ID.
8058 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8059                                 SelectionDAG &DAG, const SDLoc &dl,
8060                                 EVT DestVT = MVT::Other) {
8061   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8062   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8063                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8064 }
8065 
8066 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8067 /// specified intrinsic ID.
8068 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8069                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8070                                 EVT DestVT = MVT::Other) {
8071   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8072   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8073                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8074 }
8075 
8076 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8077 /// amount.  The result has the specified value type.
8078 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8079                            SelectionDAG &DAG, const SDLoc &dl) {
8080   // Force LHS/RHS to be the right type.
8081   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8082   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8083 
8084   int Ops[16];
8085   for (unsigned i = 0; i != 16; ++i)
8086     Ops[i] = i + Amt;
8087   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8088   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8089 }
8090 
8091 /// Do we have an efficient pattern in a .td file for this node?
8092 ///
8093 /// \param V - pointer to the BuildVectorSDNode being matched
8094 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8095 ///
8096 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8097 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8098 /// the opposite is true (expansion is beneficial) are:
8099 /// - The node builds a vector out of integers that are not 32 or 64-bits
8100 /// - The node builds a vector out of constants
8101 /// - The node is a "load-and-splat"
8102 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8103 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8104                                             bool HasDirectMove,
8105                                             bool HasP8Vector) {
8106   EVT VecVT = V->getValueType(0);
8107   bool RightType = VecVT == MVT::v2f64 ||
8108     (HasP8Vector && VecVT == MVT::v4f32) ||
8109     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8110   if (!RightType)
8111     return false;
8112 
8113   bool IsSplat = true;
8114   bool IsLoad = false;
8115   SDValue Op0 = V->getOperand(0);
8116 
8117   // This function is called in a block that confirms the node is not a constant
8118   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8119   // different constants.
8120   if (V->isConstant())
8121     return false;
8122   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8123     if (V->getOperand(i).isUndef())
8124       return false;
8125     // We want to expand nodes that represent load-and-splat even if the
8126     // loaded value is a floating point truncation or conversion to int.
8127     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8128         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8129          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8130         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8131          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8132         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8133          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8134       IsLoad = true;
8135     // If the operands are different or the input is not a load and has more
8136     // uses than just this BV node, then it isn't a splat.
8137     if (V->getOperand(i) != Op0 ||
8138         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8139       IsSplat = false;
8140   }
8141   return !(IsSplat && IsLoad);
8142 }
8143 
8144 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8145 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8146 
8147   SDLoc dl(Op);
8148   SDValue Op0 = Op->getOperand(0);
8149 
8150   if (!EnableQuadPrecision ||
8151       (Op.getValueType() != MVT::f128 ) ||
8152       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8153       (Op0.getOperand(0).getValueType() !=  MVT::i64) ||
8154       (Op0.getOperand(1).getValueType() != MVT::i64))
8155     return SDValue();
8156 
8157   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8158                      Op0.getOperand(1));
8159 }
8160 
8161 // If this is a case we can't handle, return null and let the default
8162 // expansion code take care of it.  If we CAN select this case, and if it
8163 // selects to a single instruction, return Op.  Otherwise, if we can codegen
8164 // this case more efficiently than a constant pool load, lower it to the
8165 // sequence of ops that should be used.
8166 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8167                                              SelectionDAG &DAG) const {
8168   SDLoc dl(Op);
8169   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8170   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8171 
8172   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
8173     // We first build an i32 vector, load it into a QPX register,
8174     // then convert it to a floating-point vector and compare it
8175     // to a zero vector to get the boolean result.
8176     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8177     int FrameIdx = MFI.CreateStackObject(16, 16, false);
8178     MachinePointerInfo PtrInfo =
8179         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8180     EVT PtrVT = getPointerTy(DAG.getDataLayout());
8181     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8182 
8183     assert(BVN->getNumOperands() == 4 &&
8184       "BUILD_VECTOR for v4i1 does not have 4 operands");
8185 
8186     bool IsConst = true;
8187     for (unsigned i = 0; i < 4; ++i) {
8188       if (BVN->getOperand(i).isUndef()) continue;
8189       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
8190         IsConst = false;
8191         break;
8192       }
8193     }
8194 
8195     if (IsConst) {
8196       Constant *One =
8197         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
8198       Constant *NegOne =
8199         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
8200 
8201       Constant *CV[4];
8202       for (unsigned i = 0; i < 4; ++i) {
8203         if (BVN->getOperand(i).isUndef())
8204           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
8205         else if (isNullConstant(BVN->getOperand(i)))
8206           CV[i] = NegOne;
8207         else
8208           CV[i] = One;
8209       }
8210 
8211       Constant *CP = ConstantVector::get(CV);
8212       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
8213                                           16 /* alignment */);
8214 
8215       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
8216       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
8217       return DAG.getMemIntrinsicNode(
8218           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
8219           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
8220     }
8221 
8222     SmallVector<SDValue, 4> Stores;
8223     for (unsigned i = 0; i < 4; ++i) {
8224       if (BVN->getOperand(i).isUndef()) continue;
8225 
8226       unsigned Offset = 4*i;
8227       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8228       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8229 
8230       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
8231       if (StoreSize > 4) {
8232         Stores.push_back(
8233             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
8234                               PtrInfo.getWithOffset(Offset), MVT::i32));
8235       } else {
8236         SDValue StoreValue = BVN->getOperand(i);
8237         if (StoreSize < 4)
8238           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
8239 
8240         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
8241                                       PtrInfo.getWithOffset(Offset)));
8242       }
8243     }
8244 
8245     SDValue StoreChain;
8246     if (!Stores.empty())
8247       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8248     else
8249       StoreChain = DAG.getEntryNode();
8250 
8251     // Now load from v4i32 into the QPX register; this will extend it to
8252     // v4i64 but not yet convert it to a floating point. Nevertheless, this
8253     // is typed as v4f64 because the QPX register integer states are not
8254     // explicitly represented.
8255 
8256     SDValue Ops[] = {StoreChain,
8257                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
8258                      FIdx};
8259     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
8260 
8261     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
8262       dl, VTs, Ops, MVT::v4i32, PtrInfo);
8263     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8264       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
8265       LoadedVect);
8266 
8267     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
8268 
8269     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
8270   }
8271 
8272   // All other QPX vectors are handled by generic code.
8273   if (Subtarget.hasQPX())
8274     return SDValue();
8275 
8276   // Check if this is a splat of a constant value.
8277   APInt APSplatBits, APSplatUndef;
8278   unsigned SplatBitSize;
8279   bool HasAnyUndefs;
8280   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8281                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
8282       SplatBitSize > 32) {
8283     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
8284     // lowered to VSX instructions under certain conditions.
8285     // Without VSX, there is no pattern more efficient than expanding the node.
8286     if (Subtarget.hasVSX() &&
8287         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8288                                         Subtarget.hasP8Vector()))
8289       return Op;
8290     return SDValue();
8291   }
8292 
8293   unsigned SplatBits = APSplatBits.getZExtValue();
8294   unsigned SplatUndef = APSplatUndef.getZExtValue();
8295   unsigned SplatSize = SplatBitSize / 8;
8296 
8297   // First, handle single instruction cases.
8298 
8299   // All zeros?
8300   if (SplatBits == 0) {
8301     // Canonicalize all zero vectors to be v4i32.
8302     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8303       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8304       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8305     }
8306     return Op;
8307   }
8308 
8309   // We have XXSPLTIB for constant splats one byte wide
8310   if (Subtarget.hasP9Vector() && SplatSize == 1) {
8311     // This is a splat of 1-byte elements with some elements potentially undef.
8312     // Rather than trying to match undef in the SDAG patterns, ensure that all
8313     // elements are the same constant.
8314     if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) {
8315       SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits,
8316                                                        dl, MVT::i32));
8317       SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
8318       if (Op.getValueType() != MVT::v16i8)
8319         return DAG.getBitcast(Op.getValueType(), NewBV);
8320       return NewBV;
8321     }
8322 
8323     // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll
8324     // detect that constant splats like v8i16: 0xABAB are really just splats
8325     // of a 1-byte constant. In this case, we need to convert the node to a
8326     // splat of v16i8 and a bitcast.
8327     if (Op.getValueType() != MVT::v16i8)
8328       return DAG.getBitcast(Op.getValueType(),
8329                             DAG.getConstant(SplatBits, dl, MVT::v16i8));
8330 
8331     return Op;
8332   }
8333 
8334   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8335   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8336                     (32-SplatBitSize));
8337   if (SextVal >= -16 && SextVal <= 15)
8338     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
8339 
8340   // Two instruction sequences.
8341 
8342   // If this value is in the range [-32,30] and is even, use:
8343   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8344   // If this value is in the range [17,31] and is odd, use:
8345   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8346   // If this value is in the range [-31,-17] and is odd, use:
8347   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8348   // Note the last two are three-instruction sequences.
8349   if (SextVal >= -32 && SextVal <= 31) {
8350     // To avoid having these optimizations undone by constant folding,
8351     // we convert to a pseudo that will be expanded later into one of
8352     // the above forms.
8353     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8354     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8355               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8356     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8357     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8358     if (VT == Op.getValueType())
8359       return RetVal;
8360     else
8361       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8362   }
8363 
8364   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
8365   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
8366   // for fneg/fabs.
8367   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8368     // Make -1 and vspltisw -1:
8369     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
8370 
8371     // Make the VSLW intrinsic, computing 0x8000_0000.
8372     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8373                                    OnesV, DAG, dl);
8374 
8375     // xor by OnesV to invert it.
8376     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8377     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8378   }
8379 
8380   // Check to see if this is a wide variety of vsplti*, binop self cases.
8381   static const signed char SplatCsts[] = {
8382     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8383     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8384   };
8385 
8386   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8387     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8388     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
8389     int i = SplatCsts[idx];
8390 
8391     // Figure out what shift amount will be used by altivec if shifted by i in
8392     // this splat size.
8393     unsigned TypeShiftAmt = i & (SplatBitSize-1);
8394 
8395     // vsplti + shl self.
8396     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8397       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8398       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8399         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8400         Intrinsic::ppc_altivec_vslw
8401       };
8402       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8403       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8404     }
8405 
8406     // vsplti + srl self.
8407     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8408       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8409       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8410         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8411         Intrinsic::ppc_altivec_vsrw
8412       };
8413       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8414       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8415     }
8416 
8417     // vsplti + sra self.
8418     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8419       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8420       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8421         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
8422         Intrinsic::ppc_altivec_vsraw
8423       };
8424       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8425       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8426     }
8427 
8428     // vsplti + rol self.
8429     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8430                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8431       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8432       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8433         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8434         Intrinsic::ppc_altivec_vrlw
8435       };
8436       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8437       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8438     }
8439 
8440     // t = vsplti c, result = vsldoi t, t, 1
8441     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8442       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8443       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8444       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8445     }
8446     // t = vsplti c, result = vsldoi t, t, 2
8447     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8448       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8449       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8450       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8451     }
8452     // t = vsplti c, result = vsldoi t, t, 3
8453     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8454       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8455       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8456       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8457     }
8458   }
8459 
8460   return SDValue();
8461 }
8462 
8463 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8464 /// the specified operations to build the shuffle.
8465 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8466                                       SDValue RHS, SelectionDAG &DAG,
8467                                       const SDLoc &dl) {
8468   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8469   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8470   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8471 
8472   enum {
8473     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8474     OP_VMRGHW,
8475     OP_VMRGLW,
8476     OP_VSPLTISW0,
8477     OP_VSPLTISW1,
8478     OP_VSPLTISW2,
8479     OP_VSPLTISW3,
8480     OP_VSLDOI4,
8481     OP_VSLDOI8,
8482     OP_VSLDOI12
8483   };
8484 
8485   if (OpNum == OP_COPY) {
8486     if (LHSID == (1*9+2)*9+3) return LHS;
8487     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8488     return RHS;
8489   }
8490 
8491   SDValue OpLHS, OpRHS;
8492   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8493   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8494 
8495   int ShufIdxs[16];
8496   switch (OpNum) {
8497   default: llvm_unreachable("Unknown i32 permute!");
8498   case OP_VMRGHW:
8499     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
8500     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8501     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
8502     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8503     break;
8504   case OP_VMRGLW:
8505     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8506     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8507     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8508     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8509     break;
8510   case OP_VSPLTISW0:
8511     for (unsigned i = 0; i != 16; ++i)
8512       ShufIdxs[i] = (i&3)+0;
8513     break;
8514   case OP_VSPLTISW1:
8515     for (unsigned i = 0; i != 16; ++i)
8516       ShufIdxs[i] = (i&3)+4;
8517     break;
8518   case OP_VSPLTISW2:
8519     for (unsigned i = 0; i != 16; ++i)
8520       ShufIdxs[i] = (i&3)+8;
8521     break;
8522   case OP_VSPLTISW3:
8523     for (unsigned i = 0; i != 16; ++i)
8524       ShufIdxs[i] = (i&3)+12;
8525     break;
8526   case OP_VSLDOI4:
8527     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8528   case OP_VSLDOI8:
8529     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8530   case OP_VSLDOI12:
8531     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8532   }
8533   EVT VT = OpLHS.getValueType();
8534   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8535   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8536   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8537   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8538 }
8539 
8540 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8541 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8542 /// SDValue.
8543 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8544                                            SelectionDAG &DAG) const {
8545   const unsigned BytesInVector = 16;
8546   bool IsLE = Subtarget.isLittleEndian();
8547   SDLoc dl(N);
8548   SDValue V1 = N->getOperand(0);
8549   SDValue V2 = N->getOperand(1);
8550   unsigned ShiftElts = 0, InsertAtByte = 0;
8551   bool Swap = false;
8552 
8553   // Shifts required to get the byte we want at element 7.
8554   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
8555                                    0, 15, 14, 13, 12, 11, 10, 9};
8556   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8557                                 1, 2,  3,  4,  5,  6,  7,  8};
8558 
8559   ArrayRef<int> Mask = N->getMask();
8560   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8561 
8562   // For each mask element, find out if we're just inserting something
8563   // from V2 into V1 or vice versa.
8564   // Possible permutations inserting an element from V2 into V1:
8565   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8566   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8567   //   ...
8568   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
8569   // Inserting from V1 into V2 will be similar, except mask range will be
8570   // [16,31].
8571 
8572   bool FoundCandidate = false;
8573   // If both vector operands for the shuffle are the same vector, the mask
8574   // will contain only elements from the first one and the second one will be
8575   // undef.
8576   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8577   // Go through the mask of half-words to find an element that's being moved
8578   // from one vector to the other.
8579   for (unsigned i = 0; i < BytesInVector; ++i) {
8580     unsigned CurrentElement = Mask[i];
8581     // If 2nd operand is undefined, we should only look for element 7 in the
8582     // Mask.
8583     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
8584       continue;
8585 
8586     bool OtherElementsInOrder = true;
8587     // Examine the other elements in the Mask to see if they're in original
8588     // order.
8589     for (unsigned j = 0; j < BytesInVector; ++j) {
8590       if (j == i)
8591         continue;
8592       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8593       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8594       // in which we always assume we're always picking from the 1st operand.
8595       int MaskOffset =
8596           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8597       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8598         OtherElementsInOrder = false;
8599         break;
8600       }
8601     }
8602     // If other elements are in original order, we record the number of shifts
8603     // we need to get the element we want into element 7. Also record which byte
8604     // in the vector we should insert into.
8605     if (OtherElementsInOrder) {
8606       // If 2nd operand is undefined, we assume no shifts and no swapping.
8607       if (V2.isUndef()) {
8608         ShiftElts = 0;
8609         Swap = false;
8610       } else {
8611         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8612         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8613                          : BigEndianShifts[CurrentElement & 0xF];
8614         Swap = CurrentElement < BytesInVector;
8615       }
8616       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8617       FoundCandidate = true;
8618       break;
8619     }
8620   }
8621 
8622   if (!FoundCandidate)
8623     return SDValue();
8624 
8625   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8626   // optionally with VECSHL if shift is required.
8627   if (Swap)
8628     std::swap(V1, V2);
8629   if (V2.isUndef())
8630     V2 = V1;
8631   if (ShiftElts) {
8632     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8633                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8634     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8635                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8636   }
8637   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8638                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8639 }
8640 
8641 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8642 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
8643 /// SDValue.
8644 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
8645                                            SelectionDAG &DAG) const {
8646   const unsigned NumHalfWords = 8;
8647   const unsigned BytesInVector = NumHalfWords * 2;
8648   // Check that the shuffle is on half-words.
8649   if (!isNByteElemShuffleMask(N, 2, 1))
8650     return SDValue();
8651 
8652   bool IsLE = Subtarget.isLittleEndian();
8653   SDLoc dl(N);
8654   SDValue V1 = N->getOperand(0);
8655   SDValue V2 = N->getOperand(1);
8656   unsigned ShiftElts = 0, InsertAtByte = 0;
8657   bool Swap = false;
8658 
8659   // Shifts required to get the half-word we want at element 3.
8660   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
8661   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
8662 
8663   uint32_t Mask = 0;
8664   uint32_t OriginalOrderLow = 0x1234567;
8665   uint32_t OriginalOrderHigh = 0x89ABCDEF;
8666   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
8667   // 32-bit space, only need 4-bit nibbles per element.
8668   for (unsigned i = 0; i < NumHalfWords; ++i) {
8669     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8670     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
8671   }
8672 
8673   // For each mask element, find out if we're just inserting something
8674   // from V2 into V1 or vice versa.  Possible permutations inserting an element
8675   // from V2 into V1:
8676   //   X, 1, 2, 3, 4, 5, 6, 7
8677   //   0, X, 2, 3, 4, 5, 6, 7
8678   //   0, 1, X, 3, 4, 5, 6, 7
8679   //   0, 1, 2, X, 4, 5, 6, 7
8680   //   0, 1, 2, 3, X, 5, 6, 7
8681   //   0, 1, 2, 3, 4, X, 6, 7
8682   //   0, 1, 2, 3, 4, 5, X, 7
8683   //   0, 1, 2, 3, 4, 5, 6, X
8684   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
8685 
8686   bool FoundCandidate = false;
8687   // Go through the mask of half-words to find an element that's being moved
8688   // from one vector to the other.
8689   for (unsigned i = 0; i < NumHalfWords; ++i) {
8690     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8691     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
8692     uint32_t MaskOtherElts = ~(0xF << MaskShift);
8693     uint32_t TargetOrder = 0x0;
8694 
8695     // If both vector operands for the shuffle are the same vector, the mask
8696     // will contain only elements from the first one and the second one will be
8697     // undef.
8698     if (V2.isUndef()) {
8699       ShiftElts = 0;
8700       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
8701       TargetOrder = OriginalOrderLow;
8702       Swap = false;
8703       // Skip if not the correct element or mask of other elements don't equal
8704       // to our expected order.
8705       if (MaskOneElt == VINSERTHSrcElem &&
8706           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8707         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8708         FoundCandidate = true;
8709         break;
8710       }
8711     } else { // If both operands are defined.
8712       // Target order is [8,15] if the current mask is between [0,7].
8713       TargetOrder =
8714           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
8715       // Skip if mask of other elements don't equal our expected order.
8716       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8717         // We only need the last 3 bits for the number of shifts.
8718         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
8719                          : BigEndianShifts[MaskOneElt & 0x7];
8720         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8721         Swap = MaskOneElt < NumHalfWords;
8722         FoundCandidate = true;
8723         break;
8724       }
8725     }
8726   }
8727 
8728   if (!FoundCandidate)
8729     return SDValue();
8730 
8731   // Candidate found, construct the proper SDAG sequence with VINSERTH,
8732   // optionally with VECSHL if shift is required.
8733   if (Swap)
8734     std::swap(V1, V2);
8735   if (V2.isUndef())
8736     V2 = V1;
8737   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8738   if (ShiftElts) {
8739     // Double ShiftElts because we're left shifting on v16i8 type.
8740     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8741                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
8742     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
8743     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8744                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8745     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8746   }
8747   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
8748   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8749                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
8750   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8751 }
8752 
8753 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
8754 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
8755 /// return the code it can be lowered into.  Worst case, it can always be
8756 /// lowered into a vperm.
8757 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
8758                                                SelectionDAG &DAG) const {
8759   SDLoc dl(Op);
8760   SDValue V1 = Op.getOperand(0);
8761   SDValue V2 = Op.getOperand(1);
8762   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8763   EVT VT = Op.getValueType();
8764   bool isLittleEndian = Subtarget.isLittleEndian();
8765 
8766   unsigned ShiftElts, InsertAtByte;
8767   bool Swap = false;
8768   if (Subtarget.hasP9Vector() &&
8769       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
8770                            isLittleEndian)) {
8771     if (Swap)
8772       std::swap(V1, V2);
8773     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8774     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
8775     if (ShiftElts) {
8776       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
8777                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
8778       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
8779                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
8780       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8781     }
8782     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
8783                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8784     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8785   }
8786 
8787   if (Subtarget.hasP9Altivec()) {
8788     SDValue NewISDNode;
8789     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
8790       return NewISDNode;
8791 
8792     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
8793       return NewISDNode;
8794   }
8795 
8796   if (Subtarget.hasVSX() &&
8797       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8798     if (Swap)
8799       std::swap(V1, V2);
8800     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8801     SDValue Conv2 =
8802         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
8803 
8804     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
8805                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8806     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
8807   }
8808 
8809   if (Subtarget.hasVSX() &&
8810     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8811     if (Swap)
8812       std::swap(V1, V2);
8813     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8814     SDValue Conv2 =
8815         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
8816 
8817     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
8818                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8819     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
8820   }
8821 
8822   if (Subtarget.hasP9Vector()) {
8823      if (PPC::isXXBRHShuffleMask(SVOp)) {
8824       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8825       SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv);
8826       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
8827     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
8828       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8829       SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv);
8830       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
8831     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
8832       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8833       SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv);
8834       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
8835     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
8836       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
8837       SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv);
8838       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
8839     }
8840   }
8841 
8842   if (Subtarget.hasVSX()) {
8843     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
8844       int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
8845 
8846       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8847       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
8848                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
8849       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
8850     }
8851 
8852     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
8853     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
8854       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
8855       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
8856       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
8857     }
8858   }
8859 
8860   if (Subtarget.hasQPX()) {
8861     if (VT.getVectorNumElements() != 4)
8862       return SDValue();
8863 
8864     if (V2.isUndef()) V2 = V1;
8865 
8866     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
8867     if (AlignIdx != -1) {
8868       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
8869                          DAG.getConstant(AlignIdx, dl, MVT::i32));
8870     } else if (SVOp->isSplat()) {
8871       int SplatIdx = SVOp->getSplatIndex();
8872       if (SplatIdx >= 4) {
8873         std::swap(V1, V2);
8874         SplatIdx -= 4;
8875       }
8876 
8877       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
8878                          DAG.getConstant(SplatIdx, dl, MVT::i32));
8879     }
8880 
8881     // Lower this into a qvgpci/qvfperm pair.
8882 
8883     // Compute the qvgpci literal
8884     unsigned idx = 0;
8885     for (unsigned i = 0; i < 4; ++i) {
8886       int m = SVOp->getMaskElt(i);
8887       unsigned mm = m >= 0 ? (unsigned) m : i;
8888       idx |= mm << (3-i)*3;
8889     }
8890 
8891     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
8892                              DAG.getConstant(idx, dl, MVT::i32));
8893     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
8894   }
8895 
8896   // Cases that are handled by instructions that take permute immediates
8897   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
8898   // selected by the instruction selector.
8899   if (V2.isUndef()) {
8900     if (PPC::isSplatShuffleMask(SVOp, 1) ||
8901         PPC::isSplatShuffleMask(SVOp, 2) ||
8902         PPC::isSplatShuffleMask(SVOp, 4) ||
8903         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
8904         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
8905         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
8906         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
8907         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
8908         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
8909         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
8910         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
8911         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
8912         (Subtarget.hasP8Altivec() && (
8913          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
8914          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
8915          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
8916       return Op;
8917     }
8918   }
8919 
8920   // Altivec has a variety of "shuffle immediates" that take two vector inputs
8921   // and produce a fixed permutation.  If any of these match, do not lower to
8922   // VPERM.
8923   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
8924   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8925       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8926       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
8927       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8928       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8929       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8930       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8931       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8932       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8933       (Subtarget.hasP8Altivec() && (
8934        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8935        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
8936        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
8937     return Op;
8938 
8939   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
8940   // perfect shuffle table to emit an optimal matching sequence.
8941   ArrayRef<int> PermMask = SVOp->getMask();
8942 
8943   unsigned PFIndexes[4];
8944   bool isFourElementShuffle = true;
8945   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
8946     unsigned EltNo = 8;   // Start out undef.
8947     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
8948       if (PermMask[i*4+j] < 0)
8949         continue;   // Undef, ignore it.
8950 
8951       unsigned ByteSource = PermMask[i*4+j];
8952       if ((ByteSource & 3) != j) {
8953         isFourElementShuffle = false;
8954         break;
8955       }
8956 
8957       if (EltNo == 8) {
8958         EltNo = ByteSource/4;
8959       } else if (EltNo != ByteSource/4) {
8960         isFourElementShuffle = false;
8961         break;
8962       }
8963     }
8964     PFIndexes[i] = EltNo;
8965   }
8966 
8967   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
8968   // perfect shuffle vector to determine if it is cost effective to do this as
8969   // discrete instructions, or whether we should use a vperm.
8970   // For now, we skip this for little endian until such time as we have a
8971   // little-endian perfect shuffle table.
8972   if (isFourElementShuffle && !isLittleEndian) {
8973     // Compute the index in the perfect shuffle table.
8974     unsigned PFTableIndex =
8975       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8976 
8977     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8978     unsigned Cost  = (PFEntry >> 30);
8979 
8980     // Determining when to avoid vperm is tricky.  Many things affect the cost
8981     // of vperm, particularly how many times the perm mask needs to be computed.
8982     // For example, if the perm mask can be hoisted out of a loop or is already
8983     // used (perhaps because there are multiple permutes with the same shuffle
8984     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
8985     // the loop requires an extra register.
8986     //
8987     // As a compromise, we only emit discrete instructions if the shuffle can be
8988     // generated in 3 or fewer operations.  When we have loop information
8989     // available, if this block is within a loop, we should avoid using vperm
8990     // for 3-operation perms and use a constant pool load instead.
8991     if (Cost < 3)
8992       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8993   }
8994 
8995   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
8996   // vector that will get spilled to the constant pool.
8997   if (V2.isUndef()) V2 = V1;
8998 
8999   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9000   // that it is in input element units, not in bytes.  Convert now.
9001 
9002   // For little endian, the order of the input vectors is reversed, and
9003   // the permutation mask is complemented with respect to 31.  This is
9004   // necessary to produce proper semantics with the big-endian-biased vperm
9005   // instruction.
9006   EVT EltVT = V1.getValueType().getVectorElementType();
9007   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9008 
9009   SmallVector<SDValue, 16> ResultMask;
9010   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9011     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9012 
9013     for (unsigned j = 0; j != BytesPerElement; ++j)
9014       if (isLittleEndian)
9015         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9016                                              dl, MVT::i32));
9017       else
9018         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9019                                              MVT::i32));
9020   }
9021 
9022   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9023   if (isLittleEndian)
9024     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9025                        V2, V1, VPermMask);
9026   else
9027     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9028                        V1, V2, VPermMask);
9029 }
9030 
9031 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9032 /// vector comparison.  If it is, return true and fill in Opc/isDot with
9033 /// information about the intrinsic.
9034 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9035                                  bool &isDot, const PPCSubtarget &Subtarget) {
9036   unsigned IntrinsicID =
9037       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9038   CompareOpc = -1;
9039   isDot = false;
9040   switch (IntrinsicID) {
9041   default:
9042     return false;
9043   // Comparison predicates.
9044   case Intrinsic::ppc_altivec_vcmpbfp_p:
9045     CompareOpc = 966;
9046     isDot = true;
9047     break;
9048   case Intrinsic::ppc_altivec_vcmpeqfp_p:
9049     CompareOpc = 198;
9050     isDot = true;
9051     break;
9052   case Intrinsic::ppc_altivec_vcmpequb_p:
9053     CompareOpc = 6;
9054     isDot = true;
9055     break;
9056   case Intrinsic::ppc_altivec_vcmpequh_p:
9057     CompareOpc = 70;
9058     isDot = true;
9059     break;
9060   case Intrinsic::ppc_altivec_vcmpequw_p:
9061     CompareOpc = 134;
9062     isDot = true;
9063     break;
9064   case Intrinsic::ppc_altivec_vcmpequd_p:
9065     if (Subtarget.hasP8Altivec()) {
9066       CompareOpc = 199;
9067       isDot = true;
9068     } else
9069       return false;
9070     break;
9071   case Intrinsic::ppc_altivec_vcmpneb_p:
9072   case Intrinsic::ppc_altivec_vcmpneh_p:
9073   case Intrinsic::ppc_altivec_vcmpnew_p:
9074   case Intrinsic::ppc_altivec_vcmpnezb_p:
9075   case Intrinsic::ppc_altivec_vcmpnezh_p:
9076   case Intrinsic::ppc_altivec_vcmpnezw_p:
9077     if (Subtarget.hasP9Altivec()) {
9078       switch (IntrinsicID) {
9079       default:
9080         llvm_unreachable("Unknown comparison intrinsic.");
9081       case Intrinsic::ppc_altivec_vcmpneb_p:
9082         CompareOpc = 7;
9083         break;
9084       case Intrinsic::ppc_altivec_vcmpneh_p:
9085         CompareOpc = 71;
9086         break;
9087       case Intrinsic::ppc_altivec_vcmpnew_p:
9088         CompareOpc = 135;
9089         break;
9090       case Intrinsic::ppc_altivec_vcmpnezb_p:
9091         CompareOpc = 263;
9092         break;
9093       case Intrinsic::ppc_altivec_vcmpnezh_p:
9094         CompareOpc = 327;
9095         break;
9096       case Intrinsic::ppc_altivec_vcmpnezw_p:
9097         CompareOpc = 391;
9098         break;
9099       }
9100       isDot = true;
9101     } else
9102       return false;
9103     break;
9104   case Intrinsic::ppc_altivec_vcmpgefp_p:
9105     CompareOpc = 454;
9106     isDot = true;
9107     break;
9108   case Intrinsic::ppc_altivec_vcmpgtfp_p:
9109     CompareOpc = 710;
9110     isDot = true;
9111     break;
9112   case Intrinsic::ppc_altivec_vcmpgtsb_p:
9113     CompareOpc = 774;
9114     isDot = true;
9115     break;
9116   case Intrinsic::ppc_altivec_vcmpgtsh_p:
9117     CompareOpc = 838;
9118     isDot = true;
9119     break;
9120   case Intrinsic::ppc_altivec_vcmpgtsw_p:
9121     CompareOpc = 902;
9122     isDot = true;
9123     break;
9124   case Intrinsic::ppc_altivec_vcmpgtsd_p:
9125     if (Subtarget.hasP8Altivec()) {
9126       CompareOpc = 967;
9127       isDot = true;
9128     } else
9129       return false;
9130     break;
9131   case Intrinsic::ppc_altivec_vcmpgtub_p:
9132     CompareOpc = 518;
9133     isDot = true;
9134     break;
9135   case Intrinsic::ppc_altivec_vcmpgtuh_p:
9136     CompareOpc = 582;
9137     isDot = true;
9138     break;
9139   case Intrinsic::ppc_altivec_vcmpgtuw_p:
9140     CompareOpc = 646;
9141     isDot = true;
9142     break;
9143   case Intrinsic::ppc_altivec_vcmpgtud_p:
9144     if (Subtarget.hasP8Altivec()) {
9145       CompareOpc = 711;
9146       isDot = true;
9147     } else
9148       return false;
9149     break;
9150 
9151   // VSX predicate comparisons use the same infrastructure
9152   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9153   case Intrinsic::ppc_vsx_xvcmpgedp_p:
9154   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9155   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9156   case Intrinsic::ppc_vsx_xvcmpgesp_p:
9157   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9158     if (Subtarget.hasVSX()) {
9159       switch (IntrinsicID) {
9160       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9161         CompareOpc = 99;
9162         break;
9163       case Intrinsic::ppc_vsx_xvcmpgedp_p:
9164         CompareOpc = 115;
9165         break;
9166       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9167         CompareOpc = 107;
9168         break;
9169       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9170         CompareOpc = 67;
9171         break;
9172       case Intrinsic::ppc_vsx_xvcmpgesp_p:
9173         CompareOpc = 83;
9174         break;
9175       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9176         CompareOpc = 75;
9177         break;
9178       }
9179       isDot = true;
9180     } else
9181       return false;
9182     break;
9183 
9184   // Normal Comparisons.
9185   case Intrinsic::ppc_altivec_vcmpbfp:
9186     CompareOpc = 966;
9187     break;
9188   case Intrinsic::ppc_altivec_vcmpeqfp:
9189     CompareOpc = 198;
9190     break;
9191   case Intrinsic::ppc_altivec_vcmpequb:
9192     CompareOpc = 6;
9193     break;
9194   case Intrinsic::ppc_altivec_vcmpequh:
9195     CompareOpc = 70;
9196     break;
9197   case Intrinsic::ppc_altivec_vcmpequw:
9198     CompareOpc = 134;
9199     break;
9200   case Intrinsic::ppc_altivec_vcmpequd:
9201     if (Subtarget.hasP8Altivec())
9202       CompareOpc = 199;
9203     else
9204       return false;
9205     break;
9206   case Intrinsic::ppc_altivec_vcmpneb:
9207   case Intrinsic::ppc_altivec_vcmpneh:
9208   case Intrinsic::ppc_altivec_vcmpnew:
9209   case Intrinsic::ppc_altivec_vcmpnezb:
9210   case Intrinsic::ppc_altivec_vcmpnezh:
9211   case Intrinsic::ppc_altivec_vcmpnezw:
9212     if (Subtarget.hasP9Altivec())
9213       switch (IntrinsicID) {
9214       default:
9215         llvm_unreachable("Unknown comparison intrinsic.");
9216       case Intrinsic::ppc_altivec_vcmpneb:
9217         CompareOpc = 7;
9218         break;
9219       case Intrinsic::ppc_altivec_vcmpneh:
9220         CompareOpc = 71;
9221         break;
9222       case Intrinsic::ppc_altivec_vcmpnew:
9223         CompareOpc = 135;
9224         break;
9225       case Intrinsic::ppc_altivec_vcmpnezb:
9226         CompareOpc = 263;
9227         break;
9228       case Intrinsic::ppc_altivec_vcmpnezh:
9229         CompareOpc = 327;
9230         break;
9231       case Intrinsic::ppc_altivec_vcmpnezw:
9232         CompareOpc = 391;
9233         break;
9234       }
9235     else
9236       return false;
9237     break;
9238   case Intrinsic::ppc_altivec_vcmpgefp:
9239     CompareOpc = 454;
9240     break;
9241   case Intrinsic::ppc_altivec_vcmpgtfp:
9242     CompareOpc = 710;
9243     break;
9244   case Intrinsic::ppc_altivec_vcmpgtsb:
9245     CompareOpc = 774;
9246     break;
9247   case Intrinsic::ppc_altivec_vcmpgtsh:
9248     CompareOpc = 838;
9249     break;
9250   case Intrinsic::ppc_altivec_vcmpgtsw:
9251     CompareOpc = 902;
9252     break;
9253   case Intrinsic::ppc_altivec_vcmpgtsd:
9254     if (Subtarget.hasP8Altivec())
9255       CompareOpc = 967;
9256     else
9257       return false;
9258     break;
9259   case Intrinsic::ppc_altivec_vcmpgtub:
9260     CompareOpc = 518;
9261     break;
9262   case Intrinsic::ppc_altivec_vcmpgtuh:
9263     CompareOpc = 582;
9264     break;
9265   case Intrinsic::ppc_altivec_vcmpgtuw:
9266     CompareOpc = 646;
9267     break;
9268   case Intrinsic::ppc_altivec_vcmpgtud:
9269     if (Subtarget.hasP8Altivec())
9270       CompareOpc = 711;
9271     else
9272       return false;
9273     break;
9274   }
9275   return true;
9276 }
9277 
9278 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9279 /// lower, do it, otherwise return null.
9280 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9281                                                    SelectionDAG &DAG) const {
9282   unsigned IntrinsicID =
9283     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9284 
9285   SDLoc dl(Op);
9286 
9287   if (IntrinsicID == Intrinsic::thread_pointer) {
9288     // Reads the thread pointer register, used for __builtin_thread_pointer.
9289     if (Subtarget.isPPC64())
9290       return DAG.getRegister(PPC::X13, MVT::i64);
9291     return DAG.getRegister(PPC::R2, MVT::i32);
9292   }
9293 
9294   // If this is a lowered altivec predicate compare, CompareOpc is set to the
9295   // opcode number of the comparison.
9296   int CompareOpc;
9297   bool isDot;
9298   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9299     return SDValue();    // Don't custom lower most intrinsics.
9300 
9301   // If this is a non-dot comparison, make the VCMP node and we are done.
9302   if (!isDot) {
9303     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9304                               Op.getOperand(1), Op.getOperand(2),
9305                               DAG.getConstant(CompareOpc, dl, MVT::i32));
9306     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9307   }
9308 
9309   // Create the PPCISD altivec 'dot' comparison node.
9310   SDValue Ops[] = {
9311     Op.getOperand(2),  // LHS
9312     Op.getOperand(3),  // RHS
9313     DAG.getConstant(CompareOpc, dl, MVT::i32)
9314   };
9315   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
9316   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
9317 
9318   // Now that we have the comparison, emit a copy from the CR to a GPR.
9319   // This is flagged to the above dot comparison.
9320   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
9321                                 DAG.getRegister(PPC::CR6, MVT::i32),
9322                                 CompNode.getValue(1));
9323 
9324   // Unpack the result based on how the target uses it.
9325   unsigned BitNo;   // Bit # of CR6.
9326   bool InvertBit;   // Invert result?
9327   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
9328   default:  // Can't happen, don't crash on invalid number though.
9329   case 0:   // Return the value of the EQ bit of CR6.
9330     BitNo = 0; InvertBit = false;
9331     break;
9332   case 1:   // Return the inverted value of the EQ bit of CR6.
9333     BitNo = 0; InvertBit = true;
9334     break;
9335   case 2:   // Return the value of the LT bit of CR6.
9336     BitNo = 2; InvertBit = false;
9337     break;
9338   case 3:   // Return the inverted value of the LT bit of CR6.
9339     BitNo = 2; InvertBit = true;
9340     break;
9341   }
9342 
9343   // Shift the bit into the low position.
9344   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9345                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9346   // Isolate the bit.
9347   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9348                       DAG.getConstant(1, dl, MVT::i32));
9349 
9350   // If we are supposed to, toggle the bit.
9351   if (InvertBit)
9352     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9353                         DAG.getConstant(1, dl, MVT::i32));
9354   return Flags;
9355 }
9356 
9357 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9358                                                SelectionDAG &DAG) const {
9359   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9360   // the beginning of the argument list.
9361   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9362   SDLoc DL(Op);
9363   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9364   case Intrinsic::ppc_cfence: {
9365     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9366     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9367     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9368                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9369                                                   Op.getOperand(ArgStart + 1)),
9370                                       Op.getOperand(0)),
9371                    0);
9372   }
9373   default:
9374     break;
9375   }
9376   return SDValue();
9377 }
9378 
9379 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
9380   // Check for a DIV with the same operands as this REM.
9381   for (auto UI : Op.getOperand(1)->uses()) {
9382     if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
9383         (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
9384       if (UI->getOperand(0) == Op.getOperand(0) &&
9385           UI->getOperand(1) == Op.getOperand(1))
9386         return SDValue();
9387   }
9388   return Op;
9389 }
9390 
9391 // Lower scalar BSWAP64 to xxbrd.
9392 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9393   SDLoc dl(Op);
9394   // MTVSRDD
9395   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9396                    Op.getOperand(0));
9397   // XXBRD
9398   Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op);
9399   // MFVSRD
9400   int VectorIndex = 0;
9401   if (Subtarget.isLittleEndian())
9402     VectorIndex = 1;
9403   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9404                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9405   return Op;
9406 }
9407 
9408 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9409 // compared to a value that is atomically loaded (atomic loads zero-extend).
9410 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9411                                                 SelectionDAG &DAG) const {
9412   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9413          "Expecting an atomic compare-and-swap here.");
9414   SDLoc dl(Op);
9415   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9416   EVT MemVT = AtomicNode->getMemoryVT();
9417   if (MemVT.getSizeInBits() >= 32)
9418     return Op;
9419 
9420   SDValue CmpOp = Op.getOperand(2);
9421   // If this is already correctly zero-extended, leave it alone.
9422   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9423   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9424     return Op;
9425 
9426   // Clear the high bits of the compare operand.
9427   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9428   SDValue NewCmpOp =
9429     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9430                 DAG.getConstant(MaskVal, dl, MVT::i32));
9431 
9432   // Replace the existing compare operand with the properly zero-extended one.
9433   SmallVector<SDValue, 4> Ops;
9434   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9435     Ops.push_back(AtomicNode->getOperand(i));
9436   Ops[2] = NewCmpOp;
9437   MachineMemOperand *MMO = AtomicNode->getMemOperand();
9438   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9439   auto NodeTy =
9440     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9441   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9442 }
9443 
9444 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9445                                                  SelectionDAG &DAG) const {
9446   SDLoc dl(Op);
9447   // Create a stack slot that is 16-byte aligned.
9448   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9449   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9450   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9451   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9452 
9453   // Store the input value into Value#0 of the stack slot.
9454   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
9455                                MachinePointerInfo());
9456   // Load it out.
9457   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
9458 }
9459 
9460 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
9461                                                   SelectionDAG &DAG) const {
9462   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
9463          "Should only be called for ISD::INSERT_VECTOR_ELT");
9464 
9465   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
9466   // We have legal lowering for constant indices but not for variable ones.
9467   if (!C)
9468     return SDValue();
9469 
9470   EVT VT = Op.getValueType();
9471   SDLoc dl(Op);
9472   SDValue V1 = Op.getOperand(0);
9473   SDValue V2 = Op.getOperand(1);
9474   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
9475   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
9476     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
9477     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
9478     unsigned InsertAtElement = C->getZExtValue();
9479     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9480     if (Subtarget.isLittleEndian()) {
9481       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9482     }
9483     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
9484                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9485   }
9486   return Op;
9487 }
9488 
9489 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
9490                                                    SelectionDAG &DAG) const {
9491   SDLoc dl(Op);
9492   SDNode *N = Op.getNode();
9493 
9494   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
9495          "Unknown extract_vector_elt type");
9496 
9497   SDValue Value = N->getOperand(0);
9498 
9499   // The first part of this is like the store lowering except that we don't
9500   // need to track the chain.
9501 
9502   // The values are now known to be -1 (false) or 1 (true). To convert this
9503   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9504   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9505   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9506 
9507   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9508   // understand how to form the extending load.
9509   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9510 
9511   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9512 
9513   // Now convert to an integer and store.
9514   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9515     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9516     Value);
9517 
9518   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9519   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9520   MachinePointerInfo PtrInfo =
9521       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9522   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9523   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9524 
9525   SDValue StoreChain = DAG.getEntryNode();
9526   SDValue Ops[] = {StoreChain,
9527                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9528                    Value, FIdx};
9529   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9530 
9531   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9532     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9533 
9534   // Extract the value requested.
9535   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
9536   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9537   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9538 
9539   SDValue IntVal =
9540       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
9541 
9542   if (!Subtarget.useCRBits())
9543     return IntVal;
9544 
9545   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
9546 }
9547 
9548 /// Lowering for QPX v4i1 loads
9549 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
9550                                            SelectionDAG &DAG) const {
9551   SDLoc dl(Op);
9552   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
9553   SDValue LoadChain = LN->getChain();
9554   SDValue BasePtr = LN->getBasePtr();
9555 
9556   if (Op.getValueType() == MVT::v4f64 ||
9557       Op.getValueType() == MVT::v4f32) {
9558     EVT MemVT = LN->getMemoryVT();
9559     unsigned Alignment = LN->getAlignment();
9560 
9561     // If this load is properly aligned, then it is legal.
9562     if (Alignment >= MemVT.getStoreSize())
9563       return Op;
9564 
9565     EVT ScalarVT = Op.getValueType().getScalarType(),
9566         ScalarMemVT = MemVT.getScalarType();
9567     unsigned Stride = ScalarMemVT.getStoreSize();
9568 
9569     SDValue Vals[4], LoadChains[4];
9570     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9571       SDValue Load;
9572       if (ScalarVT != ScalarMemVT)
9573         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
9574                               BasePtr,
9575                               LN->getPointerInfo().getWithOffset(Idx * Stride),
9576                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9577                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
9578       else
9579         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
9580                            LN->getPointerInfo().getWithOffset(Idx * Stride),
9581                            MinAlign(Alignment, Idx * Stride),
9582                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
9583 
9584       if (Idx == 0 && LN->isIndexed()) {
9585         assert(LN->getAddressingMode() == ISD::PRE_INC &&
9586                "Unknown addressing mode on vector load");
9587         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
9588                                   LN->getAddressingMode());
9589       }
9590 
9591       Vals[Idx] = Load;
9592       LoadChains[Idx] = Load.getValue(1);
9593 
9594       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9595                             DAG.getConstant(Stride, dl,
9596                                             BasePtr.getValueType()));
9597     }
9598 
9599     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9600     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
9601 
9602     if (LN->isIndexed()) {
9603       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
9604       return DAG.getMergeValues(RetOps, dl);
9605     }
9606 
9607     SDValue RetOps[] = { Value, TF };
9608     return DAG.getMergeValues(RetOps, dl);
9609   }
9610 
9611   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
9612   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
9613 
9614   // To lower v4i1 from a byte array, we load the byte elements of the
9615   // vector and then reuse the BUILD_VECTOR logic.
9616 
9617   SDValue VectElmts[4], VectElmtChains[4];
9618   for (unsigned i = 0; i < 4; ++i) {
9619     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9620     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9621 
9622     VectElmts[i] = DAG.getExtLoad(
9623         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
9624         LN->getPointerInfo().getWithOffset(i), MVT::i8,
9625         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
9626     VectElmtChains[i] = VectElmts[i].getValue(1);
9627   }
9628 
9629   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
9630   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
9631 
9632   SDValue RVals[] = { Value, LoadChain };
9633   return DAG.getMergeValues(RVals, dl);
9634 }
9635 
9636 /// Lowering for QPX v4i1 stores
9637 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
9638                                             SelectionDAG &DAG) const {
9639   SDLoc dl(Op);
9640   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
9641   SDValue StoreChain = SN->getChain();
9642   SDValue BasePtr = SN->getBasePtr();
9643   SDValue Value = SN->getValue();
9644 
9645   if (Value.getValueType() == MVT::v4f64 ||
9646       Value.getValueType() == MVT::v4f32) {
9647     EVT MemVT = SN->getMemoryVT();
9648     unsigned Alignment = SN->getAlignment();
9649 
9650     // If this store is properly aligned, then it is legal.
9651     if (Alignment >= MemVT.getStoreSize())
9652       return Op;
9653 
9654     EVT ScalarVT = Value.getValueType().getScalarType(),
9655         ScalarMemVT = MemVT.getScalarType();
9656     unsigned Stride = ScalarMemVT.getStoreSize();
9657 
9658     SDValue Stores[4];
9659     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9660       SDValue Ex = DAG.getNode(
9661           ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
9662           DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
9663       SDValue Store;
9664       if (ScalarVT != ScalarMemVT)
9665         Store =
9666             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
9667                               SN->getPointerInfo().getWithOffset(Idx * Stride),
9668                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9669                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
9670       else
9671         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
9672                              SN->getPointerInfo().getWithOffset(Idx * Stride),
9673                              MinAlign(Alignment, Idx * Stride),
9674                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
9675 
9676       if (Idx == 0 && SN->isIndexed()) {
9677         assert(SN->getAddressingMode() == ISD::PRE_INC &&
9678                "Unknown addressing mode on vector store");
9679         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
9680                                     SN->getAddressingMode());
9681       }
9682 
9683       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9684                             DAG.getConstant(Stride, dl,
9685                                             BasePtr.getValueType()));
9686       Stores[Idx] = Store;
9687     }
9688 
9689     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9690 
9691     if (SN->isIndexed()) {
9692       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
9693       return DAG.getMergeValues(RetOps, dl);
9694     }
9695 
9696     return TF;
9697   }
9698 
9699   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
9700   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
9701 
9702   // The values are now known to be -1 (false) or 1 (true). To convert this
9703   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9704   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9705   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9706 
9707   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9708   // understand how to form the extending load.
9709   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9710 
9711   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9712 
9713   // Now convert to an integer and store.
9714   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9715     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9716     Value);
9717 
9718   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9719   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9720   MachinePointerInfo PtrInfo =
9721       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9722   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9723   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9724 
9725   SDValue Ops[] = {StoreChain,
9726                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9727                    Value, FIdx};
9728   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9729 
9730   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9731     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9732 
9733   // Move data into the byte array.
9734   SDValue Loads[4], LoadChains[4];
9735   for (unsigned i = 0; i < 4; ++i) {
9736     unsigned Offset = 4*i;
9737     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9738     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9739 
9740     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
9741                            PtrInfo.getWithOffset(Offset));
9742     LoadChains[i] = Loads[i].getValue(1);
9743   }
9744 
9745   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9746 
9747   SDValue Stores[4];
9748   for (unsigned i = 0; i < 4; ++i) {
9749     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9750     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9751 
9752     Stores[i] = DAG.getTruncStore(
9753         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
9754         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
9755         SN->getAAInfo());
9756   }
9757 
9758   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9759 
9760   return StoreChain;
9761 }
9762 
9763 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
9764   SDLoc dl(Op);
9765   if (Op.getValueType() == MVT::v4i32) {
9766     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9767 
9768     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
9769     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
9770 
9771     SDValue RHSSwap =   // = vrlw RHS, 16
9772       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
9773 
9774     // Shrinkify inputs to v8i16.
9775     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
9776     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
9777     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
9778 
9779     // Low parts multiplied together, generating 32-bit results (we ignore the
9780     // top parts).
9781     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
9782                                         LHS, RHS, DAG, dl, MVT::v4i32);
9783 
9784     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
9785                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
9786     // Shift the high parts up 16 bits.
9787     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
9788                               Neg16, DAG, dl);
9789     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
9790   } else if (Op.getValueType() == MVT::v8i16) {
9791     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9792 
9793     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
9794 
9795     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
9796                             LHS, RHS, Zero, DAG, dl);
9797   } else if (Op.getValueType() == MVT::v16i8) {
9798     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9799     bool isLittleEndian = Subtarget.isLittleEndian();
9800 
9801     // Multiply the even 8-bit parts, producing 16-bit sums.
9802     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
9803                                            LHS, RHS, DAG, dl, MVT::v8i16);
9804     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
9805 
9806     // Multiply the odd 8-bit parts, producing 16-bit sums.
9807     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
9808                                           LHS, RHS, DAG, dl, MVT::v8i16);
9809     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
9810 
9811     // Merge the results together.  Because vmuleub and vmuloub are
9812     // instructions with a big-endian bias, we must reverse the
9813     // element numbering and reverse the meaning of "odd" and "even"
9814     // when generating little endian code.
9815     int Ops[16];
9816     for (unsigned i = 0; i != 8; ++i) {
9817       if (isLittleEndian) {
9818         Ops[i*2  ] = 2*i;
9819         Ops[i*2+1] = 2*i+16;
9820       } else {
9821         Ops[i*2  ] = 2*i+1;
9822         Ops[i*2+1] = 2*i+1+16;
9823       }
9824     }
9825     if (isLittleEndian)
9826       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
9827     else
9828       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
9829   } else {
9830     llvm_unreachable("Unknown mul to lower!");
9831   }
9832 }
9833 
9834 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
9835 
9836   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
9837 
9838   EVT VT = Op.getValueType();
9839   assert(VT.isVector() &&
9840          "Only set vector abs as custom, scalar abs shouldn't reach here!");
9841   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
9842           VT == MVT::v16i8) &&
9843          "Unexpected vector element type!");
9844   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
9845          "Current subtarget doesn't support smax v2i64!");
9846 
9847   // For vector abs, it can be lowered to:
9848   // abs x
9849   // ==>
9850   // y = -x
9851   // smax(x, y)
9852 
9853   SDLoc dl(Op);
9854   SDValue X = Op.getOperand(0);
9855   SDValue Zero = DAG.getConstant(0, dl, VT);
9856   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
9857 
9858   // SMAX patch https://reviews.llvm.org/D47332
9859   // hasn't landed yet, so use intrinsic first here.
9860   // TODO: Should use SMAX directly once SMAX patch landed
9861   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
9862   if (VT == MVT::v2i64)
9863     BifID = Intrinsic::ppc_altivec_vmaxsd;
9864   else if (VT == MVT::v8i16)
9865     BifID = Intrinsic::ppc_altivec_vmaxsh;
9866   else if (VT == MVT::v16i8)
9867     BifID = Intrinsic::ppc_altivec_vmaxsb;
9868 
9869   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
9870 }
9871 
9872 // Custom lowering for fpext vf32 to v2f64
9873 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
9874 
9875   assert(Op.getOpcode() == ISD::FP_EXTEND &&
9876          "Should only be called for ISD::FP_EXTEND");
9877 
9878   // We only want to custom lower an extend from v2f32 to v2f64.
9879   if (Op.getValueType() != MVT::v2f64 ||
9880       Op.getOperand(0).getValueType() != MVT::v2f32)
9881     return SDValue();
9882 
9883   SDLoc dl(Op);
9884   SDValue Op0 = Op.getOperand(0);
9885 
9886   switch (Op0.getOpcode()) {
9887   default:
9888     return SDValue();
9889   case ISD::FADD:
9890   case ISD::FMUL:
9891   case ISD::FSUB: {
9892     SDValue NewLoad[2];
9893     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
9894       // Ensure both input are loads.
9895       SDValue LdOp = Op0.getOperand(i);
9896       if (LdOp.getOpcode() != ISD::LOAD)
9897         return SDValue();
9898       // Generate new load node.
9899       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
9900       SDValue LoadOps[] = { LD->getChain(), LD->getBasePtr() };
9901       NewLoad[i] =
9902         DAG.getMemIntrinsicNode(PPCISD::LD_VSX_LH, dl,
9903                                 DAG.getVTList(MVT::v4f32, MVT::Other),
9904                                 LoadOps, LD->getMemoryVT(),
9905                                 LD->getMemOperand());
9906     }
9907     SDValue NewOp = DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32,
9908                               NewLoad[0], NewLoad[1],
9909                               Op0.getNode()->getFlags());
9910     return DAG.getNode(PPCISD::FP_EXTEND_LH, dl, MVT::v2f64, NewOp);
9911   }
9912   case ISD::LOAD: {
9913     LoadSDNode *LD = cast<LoadSDNode>(Op0);
9914     SDValue LoadOps[] = { LD->getChain(), LD->getBasePtr() };
9915     SDValue NewLd =
9916       DAG.getMemIntrinsicNode(PPCISD::LD_VSX_LH, dl,
9917                               DAG.getVTList(MVT::v4f32, MVT::Other),
9918                               LoadOps, LD->getMemoryVT(), LD->getMemOperand());
9919     return DAG.getNode(PPCISD::FP_EXTEND_LH, dl, MVT::v2f64, NewLd);
9920   }
9921   }
9922   llvm_unreachable("ERROR:Should return for all cases within swtich.");
9923 }
9924 
9925 /// LowerOperation - Provide custom lowering hooks for some operations.
9926 ///
9927 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9928   switch (Op.getOpcode()) {
9929   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
9930   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
9931   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
9932   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
9933   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
9934   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
9935   case ISD::SETCC:              return LowerSETCC(Op, DAG);
9936   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
9937   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
9938 
9939   // Variable argument lowering.
9940   case ISD::VASTART:            return LowerVASTART(Op, DAG);
9941   case ISD::VAARG:              return LowerVAARG(Op, DAG);
9942   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
9943 
9944   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
9945   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
9946   case ISD::GET_DYNAMIC_AREA_OFFSET:
9947     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
9948 
9949   // Exception handling lowering.
9950   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
9951   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
9952   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
9953 
9954   case ISD::LOAD:               return LowerLOAD(Op, DAG);
9955   case ISD::STORE:              return LowerSTORE(Op, DAG);
9956   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
9957   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
9958   case ISD::FP_TO_UINT:
9959   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
9960   case ISD::UINT_TO_FP:
9961   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
9962   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
9963 
9964   // Lower 64-bit shifts.
9965   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
9966   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
9967   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
9968 
9969   // Vector-related lowering.
9970   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
9971   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
9972   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
9973   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
9974   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
9975   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
9976   case ISD::MUL:                return LowerMUL(Op, DAG);
9977   case ISD::ABS:                return LowerABS(Op, DAG);
9978   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
9979 
9980   // For counter-based loop handling.
9981   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
9982 
9983   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
9984 
9985   // Frame & Return address.
9986   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
9987   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
9988 
9989   case ISD::INTRINSIC_VOID:
9990     return LowerINTRINSIC_VOID(Op, DAG);
9991   case ISD::SREM:
9992   case ISD::UREM:
9993     return LowerREM(Op, DAG);
9994   case ISD::BSWAP:
9995     return LowerBSWAP(Op, DAG);
9996   case ISD::ATOMIC_CMP_SWAP:
9997     return LowerATOMIC_CMP_SWAP(Op, DAG);
9998   }
9999 }
10000 
10001 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10002                                            SmallVectorImpl<SDValue>&Results,
10003                                            SelectionDAG &DAG) const {
10004   SDLoc dl(N);
10005   switch (N->getOpcode()) {
10006   default:
10007     llvm_unreachable("Do not know how to custom type legalize this operation!");
10008   case ISD::READCYCLECOUNTER: {
10009     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10010     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10011 
10012     Results.push_back(RTB);
10013     Results.push_back(RTB.getValue(1));
10014     Results.push_back(RTB.getValue(2));
10015     break;
10016   }
10017   case ISD::INTRINSIC_W_CHAIN: {
10018     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10019         Intrinsic::loop_decrement)
10020       break;
10021 
10022     assert(N->getValueType(0) == MVT::i1 &&
10023            "Unexpected result type for CTR decrement intrinsic");
10024     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10025                                  N->getValueType(0));
10026     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10027     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10028                                  N->getOperand(1));
10029 
10030     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10031     Results.push_back(NewInt.getValue(1));
10032     break;
10033   }
10034   case ISD::VAARG: {
10035     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10036       return;
10037 
10038     EVT VT = N->getValueType(0);
10039 
10040     if (VT == MVT::i64) {
10041       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10042 
10043       Results.push_back(NewNode);
10044       Results.push_back(NewNode.getValue(1));
10045     }
10046     return;
10047   }
10048   case ISD::FP_TO_SINT:
10049   case ISD::FP_TO_UINT:
10050     // LowerFP_TO_INT() can only handle f32 and f64.
10051     if (N->getOperand(0).getValueType() == MVT::ppcf128)
10052       return;
10053     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10054     return;
10055   case ISD::TRUNCATE: {
10056     EVT TrgVT = N->getValueType(0);
10057     EVT OpVT = N->getOperand(0).getValueType();
10058     if (TrgVT.isVector() &&
10059         isOperationCustom(N->getOpcode(), TrgVT) &&
10060         OpVT.getSizeInBits() <= 128 &&
10061         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
10062       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
10063     return;
10064   }
10065   case ISD::BITCAST:
10066     // Don't handle bitcast here.
10067     return;
10068   }
10069 }
10070 
10071 //===----------------------------------------------------------------------===//
10072 //  Other Lowering Code
10073 //===----------------------------------------------------------------------===//
10074 
10075 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10076   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10077   Function *Func = Intrinsic::getDeclaration(M, Id);
10078   return Builder.CreateCall(Func, {});
10079 }
10080 
10081 // The mappings for emitLeading/TrailingFence is taken from
10082 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10083 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10084                                                  Instruction *Inst,
10085                                                  AtomicOrdering Ord) const {
10086   if (Ord == AtomicOrdering::SequentiallyConsistent)
10087     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10088   if (isReleaseOrStronger(Ord))
10089     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10090   return nullptr;
10091 }
10092 
10093 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10094                                                   Instruction *Inst,
10095                                                   AtomicOrdering Ord) const {
10096   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10097     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10098     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10099     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10100     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10101       return Builder.CreateCall(
10102           Intrinsic::getDeclaration(
10103               Builder.GetInsertBlock()->getParent()->getParent(),
10104               Intrinsic::ppc_cfence, {Inst->getType()}),
10105           {Inst});
10106     // FIXME: Can use isync for rmw operation.
10107     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10108   }
10109   return nullptr;
10110 }
10111 
10112 MachineBasicBlock *
10113 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10114                                     unsigned AtomicSize,
10115                                     unsigned BinOpcode,
10116                                     unsigned CmpOpcode,
10117                                     unsigned CmpPred) const {
10118   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10119   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10120 
10121   auto LoadMnemonic = PPC::LDARX;
10122   auto StoreMnemonic = PPC::STDCX;
10123   switch (AtomicSize) {
10124   default:
10125     llvm_unreachable("Unexpected size of atomic entity");
10126   case 1:
10127     LoadMnemonic = PPC::LBARX;
10128     StoreMnemonic = PPC::STBCX;
10129     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10130     break;
10131   case 2:
10132     LoadMnemonic = PPC::LHARX;
10133     StoreMnemonic = PPC::STHCX;
10134     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10135     break;
10136   case 4:
10137     LoadMnemonic = PPC::LWARX;
10138     StoreMnemonic = PPC::STWCX;
10139     break;
10140   case 8:
10141     LoadMnemonic = PPC::LDARX;
10142     StoreMnemonic = PPC::STDCX;
10143     break;
10144   }
10145 
10146   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10147   MachineFunction *F = BB->getParent();
10148   MachineFunction::iterator It = ++BB->getIterator();
10149 
10150   Register dest = MI.getOperand(0).getReg();
10151   Register ptrA = MI.getOperand(1).getReg();
10152   Register ptrB = MI.getOperand(2).getReg();
10153   Register incr = MI.getOperand(3).getReg();
10154   DebugLoc dl = MI.getDebugLoc();
10155 
10156   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10157   MachineBasicBlock *loop2MBB =
10158     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10159   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10160   F->insert(It, loopMBB);
10161   if (CmpOpcode)
10162     F->insert(It, loop2MBB);
10163   F->insert(It, exitMBB);
10164   exitMBB->splice(exitMBB->begin(), BB,
10165                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10166   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10167 
10168   MachineRegisterInfo &RegInfo = F->getRegInfo();
10169   Register TmpReg = (!BinOpcode) ? incr :
10170     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10171                                            : &PPC::GPRCRegClass);
10172 
10173   //  thisMBB:
10174   //   ...
10175   //   fallthrough --> loopMBB
10176   BB->addSuccessor(loopMBB);
10177 
10178   //  loopMBB:
10179   //   l[wd]arx dest, ptr
10180   //   add r0, dest, incr
10181   //   st[wd]cx. r0, ptr
10182   //   bne- loopMBB
10183   //   fallthrough --> exitMBB
10184 
10185   // For max/min...
10186   //  loopMBB:
10187   //   l[wd]arx dest, ptr
10188   //   cmpl?[wd] incr, dest
10189   //   bgt exitMBB
10190   //  loop2MBB:
10191   //   st[wd]cx. dest, ptr
10192   //   bne- loopMBB
10193   //   fallthrough --> exitMBB
10194 
10195   BB = loopMBB;
10196   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10197     .addReg(ptrA).addReg(ptrB);
10198   if (BinOpcode)
10199     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10200   if (CmpOpcode) {
10201     // Signed comparisons of byte or halfword values must be sign-extended.
10202     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10203       unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10204       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10205               ExtReg).addReg(dest);
10206       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10207         .addReg(incr).addReg(ExtReg);
10208     } else
10209       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10210         .addReg(incr).addReg(dest);
10211 
10212     BuildMI(BB, dl, TII->get(PPC::BCC))
10213       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10214     BB->addSuccessor(loop2MBB);
10215     BB->addSuccessor(exitMBB);
10216     BB = loop2MBB;
10217   }
10218   BuildMI(BB, dl, TII->get(StoreMnemonic))
10219     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10220   BuildMI(BB, dl, TII->get(PPC::BCC))
10221     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10222   BB->addSuccessor(loopMBB);
10223   BB->addSuccessor(exitMBB);
10224 
10225   //  exitMBB:
10226   //   ...
10227   BB = exitMBB;
10228   return BB;
10229 }
10230 
10231 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10232     MachineInstr &MI, MachineBasicBlock *BB,
10233     bool is8bit, // operation
10234     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10235   // If we support part-word atomic mnemonics, just use them
10236   if (Subtarget.hasPartwordAtomics())
10237     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10238                             CmpPred);
10239 
10240   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10241   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10242   // In 64 bit mode we have to use 64 bits for addresses, even though the
10243   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
10244   // registers without caring whether they're 32 or 64, but here we're
10245   // doing actual arithmetic on the addresses.
10246   bool is64bit = Subtarget.isPPC64();
10247   bool isLittleEndian = Subtarget.isLittleEndian();
10248   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10249 
10250   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10251   MachineFunction *F = BB->getParent();
10252   MachineFunction::iterator It = ++BB->getIterator();
10253 
10254   unsigned dest = MI.getOperand(0).getReg();
10255   unsigned ptrA = MI.getOperand(1).getReg();
10256   unsigned ptrB = MI.getOperand(2).getReg();
10257   unsigned incr = MI.getOperand(3).getReg();
10258   DebugLoc dl = MI.getDebugLoc();
10259 
10260   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10261   MachineBasicBlock *loop2MBB =
10262       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10263   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10264   F->insert(It, loopMBB);
10265   if (CmpOpcode)
10266     F->insert(It, loop2MBB);
10267   F->insert(It, exitMBB);
10268   exitMBB->splice(exitMBB->begin(), BB,
10269                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10270   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10271 
10272   MachineRegisterInfo &RegInfo = F->getRegInfo();
10273   const TargetRegisterClass *RC =
10274       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10275   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10276 
10277   Register PtrReg = RegInfo.createVirtualRegister(RC);
10278   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10279   Register ShiftReg =
10280       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10281   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10282   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10283   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10284   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10285   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10286   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10287   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10288   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10289   Register Ptr1Reg;
10290   Register TmpReg =
10291       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10292 
10293   //  thisMBB:
10294   //   ...
10295   //   fallthrough --> loopMBB
10296   BB->addSuccessor(loopMBB);
10297 
10298   // The 4-byte load must be aligned, while a char or short may be
10299   // anywhere in the word.  Hence all this nasty bookkeeping code.
10300   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10301   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10302   //   xori shift, shift1, 24 [16]
10303   //   rlwinm ptr, ptr1, 0, 0, 29
10304   //   slw incr2, incr, shift
10305   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10306   //   slw mask, mask2, shift
10307   //  loopMBB:
10308   //   lwarx tmpDest, ptr
10309   //   add tmp, tmpDest, incr2
10310   //   andc tmp2, tmpDest, mask
10311   //   and tmp3, tmp, mask
10312   //   or tmp4, tmp3, tmp2
10313   //   stwcx. tmp4, ptr
10314   //   bne- loopMBB
10315   //   fallthrough --> exitMBB
10316   //   srw dest, tmpDest, shift
10317   if (ptrA != ZeroReg) {
10318     Ptr1Reg = RegInfo.createVirtualRegister(RC);
10319     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10320         .addReg(ptrA)
10321         .addReg(ptrB);
10322   } else {
10323     Ptr1Reg = ptrB;
10324   }
10325   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10326   // mode.
10327   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10328       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10329       .addImm(3)
10330       .addImm(27)
10331       .addImm(is8bit ? 28 : 27);
10332   if (!isLittleEndian)
10333     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10334         .addReg(Shift1Reg)
10335         .addImm(is8bit ? 24 : 16);
10336   if (is64bit)
10337     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10338         .addReg(Ptr1Reg)
10339         .addImm(0)
10340         .addImm(61);
10341   else
10342     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10343         .addReg(Ptr1Reg)
10344         .addImm(0)
10345         .addImm(0)
10346         .addImm(29);
10347   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10348   if (is8bit)
10349     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10350   else {
10351     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10352     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10353         .addReg(Mask3Reg)
10354         .addImm(65535);
10355   }
10356   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10357       .addReg(Mask2Reg)
10358       .addReg(ShiftReg);
10359 
10360   BB = loopMBB;
10361   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10362       .addReg(ZeroReg)
10363       .addReg(PtrReg);
10364   if (BinOpcode)
10365     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10366         .addReg(Incr2Reg)
10367         .addReg(TmpDestReg);
10368   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10369       .addReg(TmpDestReg)
10370       .addReg(MaskReg);
10371   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10372   if (CmpOpcode) {
10373     // For unsigned comparisons, we can directly compare the shifted values.
10374     // For signed comparisons we shift and sign extend.
10375     unsigned SReg = RegInfo.createVirtualRegister(GPRC);
10376     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10377         .addReg(TmpDestReg)
10378         .addReg(MaskReg);
10379     unsigned ValueReg = SReg;
10380     unsigned CmpReg = Incr2Reg;
10381     if (CmpOpcode == PPC::CMPW) {
10382       ValueReg = RegInfo.createVirtualRegister(GPRC);
10383       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10384           .addReg(SReg)
10385           .addReg(ShiftReg);
10386       unsigned ValueSReg = RegInfo.createVirtualRegister(GPRC);
10387       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10388           .addReg(ValueReg);
10389       ValueReg = ValueSReg;
10390       CmpReg = incr;
10391     }
10392     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10393         .addReg(CmpReg)
10394         .addReg(ValueReg);
10395     BuildMI(BB, dl, TII->get(PPC::BCC))
10396         .addImm(CmpPred)
10397         .addReg(PPC::CR0)
10398         .addMBB(exitMBB);
10399     BB->addSuccessor(loop2MBB);
10400     BB->addSuccessor(exitMBB);
10401     BB = loop2MBB;
10402   }
10403   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10404   BuildMI(BB, dl, TII->get(PPC::STWCX))
10405       .addReg(Tmp4Reg)
10406       .addReg(ZeroReg)
10407       .addReg(PtrReg);
10408   BuildMI(BB, dl, TII->get(PPC::BCC))
10409       .addImm(PPC::PRED_NE)
10410       .addReg(PPC::CR0)
10411       .addMBB(loopMBB);
10412   BB->addSuccessor(loopMBB);
10413   BB->addSuccessor(exitMBB);
10414 
10415   //  exitMBB:
10416   //   ...
10417   BB = exitMBB;
10418   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10419       .addReg(TmpDestReg)
10420       .addReg(ShiftReg);
10421   return BB;
10422 }
10423 
10424 llvm::MachineBasicBlock *
10425 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10426                                     MachineBasicBlock *MBB) const {
10427   DebugLoc DL = MI.getDebugLoc();
10428   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10429   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10430 
10431   MachineFunction *MF = MBB->getParent();
10432   MachineRegisterInfo &MRI = MF->getRegInfo();
10433 
10434   const BasicBlock *BB = MBB->getBasicBlock();
10435   MachineFunction::iterator I = ++MBB->getIterator();
10436 
10437   unsigned DstReg = MI.getOperand(0).getReg();
10438   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10439   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10440   unsigned mainDstReg = MRI.createVirtualRegister(RC);
10441   unsigned restoreDstReg = MRI.createVirtualRegister(RC);
10442 
10443   MVT PVT = getPointerTy(MF->getDataLayout());
10444   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10445          "Invalid Pointer Size!");
10446   // For v = setjmp(buf), we generate
10447   //
10448   // thisMBB:
10449   //  SjLjSetup mainMBB
10450   //  bl mainMBB
10451   //  v_restore = 1
10452   //  b sinkMBB
10453   //
10454   // mainMBB:
10455   //  buf[LabelOffset] = LR
10456   //  v_main = 0
10457   //
10458   // sinkMBB:
10459   //  v = phi(main, restore)
10460   //
10461 
10462   MachineBasicBlock *thisMBB = MBB;
10463   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10464   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10465   MF->insert(I, mainMBB);
10466   MF->insert(I, sinkMBB);
10467 
10468   MachineInstrBuilder MIB;
10469 
10470   // Transfer the remainder of BB and its successor edges to sinkMBB.
10471   sinkMBB->splice(sinkMBB->begin(), MBB,
10472                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10473   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10474 
10475   // Note that the structure of the jmp_buf used here is not compatible
10476   // with that used by libc, and is not designed to be. Specifically, it
10477   // stores only those 'reserved' registers that LLVM does not otherwise
10478   // understand how to spill. Also, by convention, by the time this
10479   // intrinsic is called, Clang has already stored the frame address in the
10480   // first slot of the buffer and stack address in the third. Following the
10481   // X86 target code, we'll store the jump address in the second slot. We also
10482   // need to save the TOC pointer (R2) to handle jumps between shared
10483   // libraries, and that will be stored in the fourth slot. The thread
10484   // identifier (R13) is not affected.
10485 
10486   // thisMBB:
10487   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10488   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10489   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10490 
10491   // Prepare IP either in reg.
10492   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10493   unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
10494   unsigned BufReg = MI.getOperand(1).getReg();
10495 
10496   if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
10497     setUsesTOCBasePtr(*MBB->getParent());
10498     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10499               .addReg(PPC::X2)
10500               .addImm(TOCOffset)
10501               .addReg(BufReg)
10502               .cloneMemRefs(MI);
10503   }
10504 
10505   // Naked functions never have a base pointer, and so we use r1. For all
10506   // other functions, this decision must be delayed until during PEI.
10507   unsigned BaseReg;
10508   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
10509     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10510   else
10511     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10512 
10513   MIB = BuildMI(*thisMBB, MI, DL,
10514                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10515             .addReg(BaseReg)
10516             .addImm(BPOffset)
10517             .addReg(BufReg)
10518             .cloneMemRefs(MI);
10519 
10520   // Setup
10521   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
10522   MIB.addRegMask(TRI->getNoPreservedMask());
10523 
10524   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
10525 
10526   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
10527           .addMBB(mainMBB);
10528   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
10529 
10530   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
10531   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
10532 
10533   // mainMBB:
10534   //  mainDstReg = 0
10535   MIB =
10536       BuildMI(mainMBB, DL,
10537               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10538 
10539   // Store IP
10540   if (Subtarget.isPPC64()) {
10541     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
10542             .addReg(LabelReg)
10543             .addImm(LabelOffset)
10544             .addReg(BufReg);
10545   } else {
10546     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
10547             .addReg(LabelReg)
10548             .addImm(LabelOffset)
10549             .addReg(BufReg);
10550   }
10551   MIB.cloneMemRefs(MI);
10552 
10553   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
10554   mainMBB->addSuccessor(sinkMBB);
10555 
10556   // sinkMBB:
10557   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10558           TII->get(PPC::PHI), DstReg)
10559     .addReg(mainDstReg).addMBB(mainMBB)
10560     .addReg(restoreDstReg).addMBB(thisMBB);
10561 
10562   MI.eraseFromParent();
10563   return sinkMBB;
10564 }
10565 
10566 MachineBasicBlock *
10567 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
10568                                      MachineBasicBlock *MBB) const {
10569   DebugLoc DL = MI.getDebugLoc();
10570   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10571 
10572   MachineFunction *MF = MBB->getParent();
10573   MachineRegisterInfo &MRI = MF->getRegInfo();
10574 
10575   MVT PVT = getPointerTy(MF->getDataLayout());
10576   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10577          "Invalid Pointer Size!");
10578 
10579   const TargetRegisterClass *RC =
10580     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10581   unsigned Tmp = MRI.createVirtualRegister(RC);
10582   // Since FP is only updated here but NOT referenced, it's treated as GPR.
10583   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
10584   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
10585   unsigned BP =
10586       (PVT == MVT::i64)
10587           ? PPC::X30
10588           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
10589                                                               : PPC::R30);
10590 
10591   MachineInstrBuilder MIB;
10592 
10593   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10594   const int64_t SPOffset    = 2 * PVT.getStoreSize();
10595   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10596   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10597 
10598   unsigned BufReg = MI.getOperand(0).getReg();
10599 
10600   // Reload FP (the jumped-to function may not have had a
10601   // frame pointer, and if so, then its r31 will be restored
10602   // as necessary).
10603   if (PVT == MVT::i64) {
10604     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
10605             .addImm(0)
10606             .addReg(BufReg);
10607   } else {
10608     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
10609             .addImm(0)
10610             .addReg(BufReg);
10611   }
10612   MIB.cloneMemRefs(MI);
10613 
10614   // Reload IP
10615   if (PVT == MVT::i64) {
10616     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10617             .addImm(LabelOffset)
10618             .addReg(BufReg);
10619   } else {
10620     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10621             .addImm(LabelOffset)
10622             .addReg(BufReg);
10623   }
10624   MIB.cloneMemRefs(MI);
10625 
10626   // Reload SP
10627   if (PVT == MVT::i64) {
10628     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10629             .addImm(SPOffset)
10630             .addReg(BufReg);
10631   } else {
10632     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10633             .addImm(SPOffset)
10634             .addReg(BufReg);
10635   }
10636   MIB.cloneMemRefs(MI);
10637 
10638   // Reload BP
10639   if (PVT == MVT::i64) {
10640     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
10641             .addImm(BPOffset)
10642             .addReg(BufReg);
10643   } else {
10644     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
10645             .addImm(BPOffset)
10646             .addReg(BufReg);
10647   }
10648   MIB.cloneMemRefs(MI);
10649 
10650   // Reload TOC
10651   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
10652     setUsesTOCBasePtr(*MBB->getParent());
10653     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
10654               .addImm(TOCOffset)
10655               .addReg(BufReg)
10656               .cloneMemRefs(MI);
10657   }
10658 
10659   // Jump
10660   BuildMI(*MBB, MI, DL,
10661           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
10662   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
10663 
10664   MI.eraseFromParent();
10665   return MBB;
10666 }
10667 
10668 MachineBasicBlock *
10669 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10670                                                MachineBasicBlock *BB) const {
10671   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
10672       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10673     if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
10674         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10675       // Call lowering should have added an r2 operand to indicate a dependence
10676       // on the TOC base pointer value. It can't however, because there is no
10677       // way to mark the dependence as implicit there, and so the stackmap code
10678       // will confuse it with a regular operand. Instead, add the dependence
10679       // here.
10680       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
10681     }
10682 
10683     return emitPatchPoint(MI, BB);
10684   }
10685 
10686   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
10687       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
10688     return emitEHSjLjSetJmp(MI, BB);
10689   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
10690              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
10691     return emitEHSjLjLongJmp(MI, BB);
10692   }
10693 
10694   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10695 
10696   // To "insert" these instructions we actually have to insert their
10697   // control-flow patterns.
10698   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10699   MachineFunction::iterator It = ++BB->getIterator();
10700 
10701   MachineFunction *F = BB->getParent();
10702 
10703   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10704       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
10705       MI.getOpcode() == PPC::SELECT_I8) {
10706     SmallVector<MachineOperand, 2> Cond;
10707     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10708         MI.getOpcode() == PPC::SELECT_CC_I8)
10709       Cond.push_back(MI.getOperand(4));
10710     else
10711       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
10712     Cond.push_back(MI.getOperand(1));
10713 
10714     DebugLoc dl = MI.getDebugLoc();
10715     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
10716                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
10717   } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10718              MI.getOpcode() == PPC::SELECT_CC_I8 ||
10719              MI.getOpcode() == PPC::SELECT_CC_F4 ||
10720              MI.getOpcode() == PPC::SELECT_CC_F8 ||
10721              MI.getOpcode() == PPC::SELECT_CC_F16 ||
10722              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
10723              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
10724              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
10725              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
10726              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
10727              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
10728              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
10729              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
10730              MI.getOpcode() == PPC::SELECT_CC_SPE ||
10731              MI.getOpcode() == PPC::SELECT_I4 ||
10732              MI.getOpcode() == PPC::SELECT_I8 ||
10733              MI.getOpcode() == PPC::SELECT_F4 ||
10734              MI.getOpcode() == PPC::SELECT_F8 ||
10735              MI.getOpcode() == PPC::SELECT_F16 ||
10736              MI.getOpcode() == PPC::SELECT_QFRC ||
10737              MI.getOpcode() == PPC::SELECT_QSRC ||
10738              MI.getOpcode() == PPC::SELECT_QBRC ||
10739              MI.getOpcode() == PPC::SELECT_SPE ||
10740              MI.getOpcode() == PPC::SELECT_SPE4 ||
10741              MI.getOpcode() == PPC::SELECT_VRRC ||
10742              MI.getOpcode() == PPC::SELECT_VSFRC ||
10743              MI.getOpcode() == PPC::SELECT_VSSRC ||
10744              MI.getOpcode() == PPC::SELECT_VSRC) {
10745     // The incoming instruction knows the destination vreg to set, the
10746     // condition code register to branch on, the true/false values to
10747     // select between, and a branch opcode to use.
10748 
10749     //  thisMBB:
10750     //  ...
10751     //   TrueVal = ...
10752     //   cmpTY ccX, r1, r2
10753     //   bCC copy1MBB
10754     //   fallthrough --> copy0MBB
10755     MachineBasicBlock *thisMBB = BB;
10756     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10757     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10758     DebugLoc dl = MI.getDebugLoc();
10759     F->insert(It, copy0MBB);
10760     F->insert(It, sinkMBB);
10761 
10762     // Transfer the remainder of BB and its successor edges to sinkMBB.
10763     sinkMBB->splice(sinkMBB->begin(), BB,
10764                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10765     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10766 
10767     // Next, add the true and fallthrough blocks as its successors.
10768     BB->addSuccessor(copy0MBB);
10769     BB->addSuccessor(sinkMBB);
10770 
10771     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
10772         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
10773         MI.getOpcode() == PPC::SELECT_F16 ||
10774         MI.getOpcode() == PPC::SELECT_SPE4 ||
10775         MI.getOpcode() == PPC::SELECT_SPE ||
10776         MI.getOpcode() == PPC::SELECT_QFRC ||
10777         MI.getOpcode() == PPC::SELECT_QSRC ||
10778         MI.getOpcode() == PPC::SELECT_QBRC ||
10779         MI.getOpcode() == PPC::SELECT_VRRC ||
10780         MI.getOpcode() == PPC::SELECT_VSFRC ||
10781         MI.getOpcode() == PPC::SELECT_VSSRC ||
10782         MI.getOpcode() == PPC::SELECT_VSRC) {
10783       BuildMI(BB, dl, TII->get(PPC::BC))
10784           .addReg(MI.getOperand(1).getReg())
10785           .addMBB(sinkMBB);
10786     } else {
10787       unsigned SelectPred = MI.getOperand(4).getImm();
10788       BuildMI(BB, dl, TII->get(PPC::BCC))
10789           .addImm(SelectPred)
10790           .addReg(MI.getOperand(1).getReg())
10791           .addMBB(sinkMBB);
10792     }
10793 
10794     //  copy0MBB:
10795     //   %FalseValue = ...
10796     //   # fallthrough to sinkMBB
10797     BB = copy0MBB;
10798 
10799     // Update machine-CFG edges
10800     BB->addSuccessor(sinkMBB);
10801 
10802     //  sinkMBB:
10803     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10804     //  ...
10805     BB = sinkMBB;
10806     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
10807         .addReg(MI.getOperand(3).getReg())
10808         .addMBB(copy0MBB)
10809         .addReg(MI.getOperand(2).getReg())
10810         .addMBB(thisMBB);
10811   } else if (MI.getOpcode() == PPC::ReadTB) {
10812     // To read the 64-bit time-base register on a 32-bit target, we read the
10813     // two halves. Should the counter have wrapped while it was being read, we
10814     // need to try again.
10815     // ...
10816     // readLoop:
10817     // mfspr Rx,TBU # load from TBU
10818     // mfspr Ry,TB  # load from TB
10819     // mfspr Rz,TBU # load from TBU
10820     // cmpw crX,Rx,Rz # check if 'old'='new'
10821     // bne readLoop   # branch if they're not equal
10822     // ...
10823 
10824     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
10825     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10826     DebugLoc dl = MI.getDebugLoc();
10827     F->insert(It, readMBB);
10828     F->insert(It, sinkMBB);
10829 
10830     // Transfer the remainder of BB and its successor edges to sinkMBB.
10831     sinkMBB->splice(sinkMBB->begin(), BB,
10832                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10833     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10834 
10835     BB->addSuccessor(readMBB);
10836     BB = readMBB;
10837 
10838     MachineRegisterInfo &RegInfo = F->getRegInfo();
10839     unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10840     unsigned LoReg = MI.getOperand(0).getReg();
10841     unsigned HiReg = MI.getOperand(1).getReg();
10842 
10843     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
10844     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
10845     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
10846 
10847     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10848 
10849     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
10850         .addReg(HiReg)
10851         .addReg(ReadAgainReg);
10852     BuildMI(BB, dl, TII->get(PPC::BCC))
10853         .addImm(PPC::PRED_NE)
10854         .addReg(CmpReg)
10855         .addMBB(readMBB);
10856 
10857     BB->addSuccessor(readMBB);
10858     BB->addSuccessor(sinkMBB);
10859   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
10860     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
10861   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
10862     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
10863   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
10864     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
10865   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
10866     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
10867 
10868   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
10869     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
10870   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
10871     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
10872   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
10873     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
10874   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
10875     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
10876 
10877   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
10878     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
10879   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
10880     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
10881   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
10882     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
10883   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
10884     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
10885 
10886   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
10887     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
10888   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
10889     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
10890   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
10891     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
10892   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
10893     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
10894 
10895   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
10896     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
10897   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
10898     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
10899   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
10900     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
10901   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
10902     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
10903 
10904   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
10905     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
10906   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
10907     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
10908   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
10909     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
10910   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
10911     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
10912 
10913   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
10914     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
10915   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
10916     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
10917   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
10918     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
10919   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
10920     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
10921 
10922   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
10923     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
10924   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
10925     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
10926   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
10927     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
10928   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
10929     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
10930 
10931   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
10932     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
10933   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
10934     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
10935   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
10936     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
10937   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
10938     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
10939 
10940   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
10941     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
10942   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
10943     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
10944   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
10945     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
10946   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
10947     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
10948 
10949   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
10950     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
10951   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
10952     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
10953   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
10954     BB = EmitAtomicBinary(MI, BB, 4, 0);
10955   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
10956     BB = EmitAtomicBinary(MI, BB, 8, 0);
10957   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
10958            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
10959            (Subtarget.hasPartwordAtomics() &&
10960             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
10961            (Subtarget.hasPartwordAtomics() &&
10962             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
10963     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
10964 
10965     auto LoadMnemonic = PPC::LDARX;
10966     auto StoreMnemonic = PPC::STDCX;
10967     switch (MI.getOpcode()) {
10968     default:
10969       llvm_unreachable("Compare and swap of unknown size");
10970     case PPC::ATOMIC_CMP_SWAP_I8:
10971       LoadMnemonic = PPC::LBARX;
10972       StoreMnemonic = PPC::STBCX;
10973       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10974       break;
10975     case PPC::ATOMIC_CMP_SWAP_I16:
10976       LoadMnemonic = PPC::LHARX;
10977       StoreMnemonic = PPC::STHCX;
10978       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10979       break;
10980     case PPC::ATOMIC_CMP_SWAP_I32:
10981       LoadMnemonic = PPC::LWARX;
10982       StoreMnemonic = PPC::STWCX;
10983       break;
10984     case PPC::ATOMIC_CMP_SWAP_I64:
10985       LoadMnemonic = PPC::LDARX;
10986       StoreMnemonic = PPC::STDCX;
10987       break;
10988     }
10989     unsigned dest = MI.getOperand(0).getReg();
10990     unsigned ptrA = MI.getOperand(1).getReg();
10991     unsigned ptrB = MI.getOperand(2).getReg();
10992     unsigned oldval = MI.getOperand(3).getReg();
10993     unsigned newval = MI.getOperand(4).getReg();
10994     DebugLoc dl = MI.getDebugLoc();
10995 
10996     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10997     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10998     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10999     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11000     F->insert(It, loop1MBB);
11001     F->insert(It, loop2MBB);
11002     F->insert(It, midMBB);
11003     F->insert(It, exitMBB);
11004     exitMBB->splice(exitMBB->begin(), BB,
11005                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11006     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11007 
11008     //  thisMBB:
11009     //   ...
11010     //   fallthrough --> loopMBB
11011     BB->addSuccessor(loop1MBB);
11012 
11013     // loop1MBB:
11014     //   l[bhwd]arx dest, ptr
11015     //   cmp[wd] dest, oldval
11016     //   bne- midMBB
11017     // loop2MBB:
11018     //   st[bhwd]cx. newval, ptr
11019     //   bne- loopMBB
11020     //   b exitBB
11021     // midMBB:
11022     //   st[bhwd]cx. dest, ptr
11023     // exitBB:
11024     BB = loop1MBB;
11025     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11026     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11027         .addReg(oldval)
11028         .addReg(dest);
11029     BuildMI(BB, dl, TII->get(PPC::BCC))
11030         .addImm(PPC::PRED_NE)
11031         .addReg(PPC::CR0)
11032         .addMBB(midMBB);
11033     BB->addSuccessor(loop2MBB);
11034     BB->addSuccessor(midMBB);
11035 
11036     BB = loop2MBB;
11037     BuildMI(BB, dl, TII->get(StoreMnemonic))
11038         .addReg(newval)
11039         .addReg(ptrA)
11040         .addReg(ptrB);
11041     BuildMI(BB, dl, TII->get(PPC::BCC))
11042         .addImm(PPC::PRED_NE)
11043         .addReg(PPC::CR0)
11044         .addMBB(loop1MBB);
11045     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11046     BB->addSuccessor(loop1MBB);
11047     BB->addSuccessor(exitMBB);
11048 
11049     BB = midMBB;
11050     BuildMI(BB, dl, TII->get(StoreMnemonic))
11051         .addReg(dest)
11052         .addReg(ptrA)
11053         .addReg(ptrB);
11054     BB->addSuccessor(exitMBB);
11055 
11056     //  exitMBB:
11057     //   ...
11058     BB = exitMBB;
11059   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11060              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11061     // We must use 64-bit registers for addresses when targeting 64-bit,
11062     // since we're actually doing arithmetic on them.  Other registers
11063     // can be 32-bit.
11064     bool is64bit = Subtarget.isPPC64();
11065     bool isLittleEndian = Subtarget.isLittleEndian();
11066     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11067 
11068     unsigned dest = MI.getOperand(0).getReg();
11069     unsigned ptrA = MI.getOperand(1).getReg();
11070     unsigned ptrB = MI.getOperand(2).getReg();
11071     unsigned oldval = MI.getOperand(3).getReg();
11072     unsigned newval = MI.getOperand(4).getReg();
11073     DebugLoc dl = MI.getDebugLoc();
11074 
11075     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11076     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11077     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11078     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11079     F->insert(It, loop1MBB);
11080     F->insert(It, loop2MBB);
11081     F->insert(It, midMBB);
11082     F->insert(It, exitMBB);
11083     exitMBB->splice(exitMBB->begin(), BB,
11084                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11085     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11086 
11087     MachineRegisterInfo &RegInfo = F->getRegInfo();
11088     const TargetRegisterClass *RC =
11089         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11090     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11091 
11092     Register PtrReg = RegInfo.createVirtualRegister(RC);
11093     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11094     Register ShiftReg =
11095         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11096     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11097     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11098     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11099     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11100     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11101     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11102     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11103     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11104     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11105     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11106     Register Ptr1Reg;
11107     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11108     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11109     //  thisMBB:
11110     //   ...
11111     //   fallthrough --> loopMBB
11112     BB->addSuccessor(loop1MBB);
11113 
11114     // The 4-byte load must be aligned, while a char or short may be
11115     // anywhere in the word.  Hence all this nasty bookkeeping code.
11116     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11117     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11118     //   xori shift, shift1, 24 [16]
11119     //   rlwinm ptr, ptr1, 0, 0, 29
11120     //   slw newval2, newval, shift
11121     //   slw oldval2, oldval,shift
11122     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11123     //   slw mask, mask2, shift
11124     //   and newval3, newval2, mask
11125     //   and oldval3, oldval2, mask
11126     // loop1MBB:
11127     //   lwarx tmpDest, ptr
11128     //   and tmp, tmpDest, mask
11129     //   cmpw tmp, oldval3
11130     //   bne- midMBB
11131     // loop2MBB:
11132     //   andc tmp2, tmpDest, mask
11133     //   or tmp4, tmp2, newval3
11134     //   stwcx. tmp4, ptr
11135     //   bne- loop1MBB
11136     //   b exitBB
11137     // midMBB:
11138     //   stwcx. tmpDest, ptr
11139     // exitBB:
11140     //   srw dest, tmpDest, shift
11141     if (ptrA != ZeroReg) {
11142       Ptr1Reg = RegInfo.createVirtualRegister(RC);
11143       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11144           .addReg(ptrA)
11145           .addReg(ptrB);
11146     } else {
11147       Ptr1Reg = ptrB;
11148     }
11149 
11150     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11151     // mode.
11152     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11153         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11154         .addImm(3)
11155         .addImm(27)
11156         .addImm(is8bit ? 28 : 27);
11157     if (!isLittleEndian)
11158       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11159           .addReg(Shift1Reg)
11160           .addImm(is8bit ? 24 : 16);
11161     if (is64bit)
11162       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11163           .addReg(Ptr1Reg)
11164           .addImm(0)
11165           .addImm(61);
11166     else
11167       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11168           .addReg(Ptr1Reg)
11169           .addImm(0)
11170           .addImm(0)
11171           .addImm(29);
11172     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11173         .addReg(newval)
11174         .addReg(ShiftReg);
11175     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11176         .addReg(oldval)
11177         .addReg(ShiftReg);
11178     if (is8bit)
11179       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11180     else {
11181       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11182       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11183           .addReg(Mask3Reg)
11184           .addImm(65535);
11185     }
11186     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11187         .addReg(Mask2Reg)
11188         .addReg(ShiftReg);
11189     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11190         .addReg(NewVal2Reg)
11191         .addReg(MaskReg);
11192     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11193         .addReg(OldVal2Reg)
11194         .addReg(MaskReg);
11195 
11196     BB = loop1MBB;
11197     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11198         .addReg(ZeroReg)
11199         .addReg(PtrReg);
11200     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11201         .addReg(TmpDestReg)
11202         .addReg(MaskReg);
11203     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11204         .addReg(TmpReg)
11205         .addReg(OldVal3Reg);
11206     BuildMI(BB, dl, TII->get(PPC::BCC))
11207         .addImm(PPC::PRED_NE)
11208         .addReg(PPC::CR0)
11209         .addMBB(midMBB);
11210     BB->addSuccessor(loop2MBB);
11211     BB->addSuccessor(midMBB);
11212 
11213     BB = loop2MBB;
11214     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11215         .addReg(TmpDestReg)
11216         .addReg(MaskReg);
11217     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11218         .addReg(Tmp2Reg)
11219         .addReg(NewVal3Reg);
11220     BuildMI(BB, dl, TII->get(PPC::STWCX))
11221         .addReg(Tmp4Reg)
11222         .addReg(ZeroReg)
11223         .addReg(PtrReg);
11224     BuildMI(BB, dl, TII->get(PPC::BCC))
11225         .addImm(PPC::PRED_NE)
11226         .addReg(PPC::CR0)
11227         .addMBB(loop1MBB);
11228     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11229     BB->addSuccessor(loop1MBB);
11230     BB->addSuccessor(exitMBB);
11231 
11232     BB = midMBB;
11233     BuildMI(BB, dl, TII->get(PPC::STWCX))
11234         .addReg(TmpDestReg)
11235         .addReg(ZeroReg)
11236         .addReg(PtrReg);
11237     BB->addSuccessor(exitMBB);
11238 
11239     //  exitMBB:
11240     //   ...
11241     BB = exitMBB;
11242     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11243         .addReg(TmpReg)
11244         .addReg(ShiftReg);
11245   } else if (MI.getOpcode() == PPC::FADDrtz) {
11246     // This pseudo performs an FADD with rounding mode temporarily forced
11247     // to round-to-zero.  We emit this via custom inserter since the FPSCR
11248     // is not modeled at the SelectionDAG level.
11249     unsigned Dest = MI.getOperand(0).getReg();
11250     unsigned Src1 = MI.getOperand(1).getReg();
11251     unsigned Src2 = MI.getOperand(2).getReg();
11252     DebugLoc dl = MI.getDebugLoc();
11253 
11254     MachineRegisterInfo &RegInfo = F->getRegInfo();
11255     unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11256 
11257     // Save FPSCR value.
11258     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11259 
11260     // Set rounding mode to round-to-zero.
11261     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
11262     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
11263 
11264     // Perform addition.
11265     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
11266 
11267     // Restore FPSCR value.
11268     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11269   } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
11270              MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
11271              MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
11272              MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
11273     unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
11274                        MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
11275                           ? PPC::ANDIo8
11276                           : PPC::ANDIo;
11277     bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
11278                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
11279 
11280     MachineRegisterInfo &RegInfo = F->getRegInfo();
11281     unsigned Dest = RegInfo.createVirtualRegister(
11282         Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11283 
11284     DebugLoc dl = MI.getDebugLoc();
11285     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
11286         .addReg(MI.getOperand(1).getReg())
11287         .addImm(1);
11288     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
11289             MI.getOperand(0).getReg())
11290         .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
11291   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11292     DebugLoc Dl = MI.getDebugLoc();
11293     MachineRegisterInfo &RegInfo = F->getRegInfo();
11294     unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11295     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11296     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11297             MI.getOperand(0).getReg())
11298         .addReg(CRReg);
11299   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11300     DebugLoc Dl = MI.getDebugLoc();
11301     unsigned Imm = MI.getOperand(1).getImm();
11302     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
11303     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11304             MI.getOperand(0).getReg())
11305         .addReg(PPC::CR0EQ);
11306   } else if (MI.getOpcode() == PPC::SETRNDi) {
11307     DebugLoc dl = MI.getDebugLoc();
11308     unsigned OldFPSCRReg = MI.getOperand(0).getReg();
11309 
11310     // Save FPSCR value.
11311     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11312 
11313     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11314     // the following settings:
11315     //   00 Round to nearest
11316     //   01 Round to 0
11317     //   10 Round to +inf
11318     //   11 Round to -inf
11319 
11320     // When the operand is immediate, using the two least significant bits of
11321     // the immediate to set the bits 62:63 of FPSCR.
11322     unsigned Mode = MI.getOperand(1).getImm();
11323     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11324       .addImm(31);
11325 
11326     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11327       .addImm(30);
11328   } else if (MI.getOpcode() == PPC::SETRND) {
11329     DebugLoc dl = MI.getDebugLoc();
11330 
11331     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
11332     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11333     // If the target doesn't have DirectMove, we should use stack to do the
11334     // conversion, because the target doesn't have the instructions like mtvsrd
11335     // or mfvsrd to do this conversion directly.
11336     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
11337       if (Subtarget.hasDirectMove()) {
11338         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
11339           .addReg(SrcReg);
11340       } else {
11341         // Use stack to do the register copy.
11342         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
11343         MachineRegisterInfo &RegInfo = F->getRegInfo();
11344         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
11345         if (RC == &PPC::F8RCRegClass) {
11346           // Copy register from F8RCRegClass to G8RCRegclass.
11347           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
11348                  "Unsupported RegClass.");
11349 
11350           StoreOp = PPC::STFD;
11351           LoadOp = PPC::LD;
11352         } else {
11353           // Copy register from G8RCRegClass to F8RCRegclass.
11354           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
11355                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
11356                  "Unsupported RegClass.");
11357         }
11358 
11359         MachineFrameInfo &MFI = F->getFrameInfo();
11360         int FrameIdx = MFI.CreateStackObject(8, 8, false);
11361 
11362         MachineMemOperand *MMOStore = F->getMachineMemOperand(
11363           MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11364           MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
11365           MFI.getObjectAlignment(FrameIdx));
11366 
11367         // Store the SrcReg into the stack.
11368         BuildMI(*BB, MI, dl, TII->get(StoreOp))
11369           .addReg(SrcReg)
11370           .addImm(0)
11371           .addFrameIndex(FrameIdx)
11372           .addMemOperand(MMOStore);
11373 
11374         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
11375           MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11376           MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
11377           MFI.getObjectAlignment(FrameIdx));
11378 
11379         // Load from the stack where SrcReg is stored, and save to DestReg,
11380         // so we have done the RegClass conversion from RegClass::SrcReg to
11381         // RegClass::DestReg.
11382         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
11383           .addImm(0)
11384           .addFrameIndex(FrameIdx)
11385           .addMemOperand(MMOLoad);
11386       }
11387     };
11388 
11389     unsigned OldFPSCRReg = MI.getOperand(0).getReg();
11390 
11391     // Save FPSCR value.
11392     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11393 
11394     // When the operand is gprc register, use two least significant bits of the
11395     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11396     //
11397     // copy OldFPSCRTmpReg, OldFPSCRReg
11398     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
11399     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
11400     // copy NewFPSCRReg, NewFPSCRTmpReg
11401     // mtfsf 255, NewFPSCRReg
11402     MachineOperand SrcOp = MI.getOperand(1);
11403     MachineRegisterInfo &RegInfo = F->getRegInfo();
11404     unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11405 
11406     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11407 
11408     unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11409     unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11410 
11411     // The first operand of INSERT_SUBREG should be a register which has
11412     // subregisters, we only care about its RegClass, so we should use an
11413     // IMPLICIT_DEF register.
11414     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
11415     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
11416       .addReg(ImDefReg)
11417       .add(SrcOp)
11418       .addImm(1);
11419 
11420     unsigned NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11421     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
11422       .addReg(OldFPSCRTmpReg)
11423       .addReg(ExtSrcReg)
11424       .addImm(0)
11425       .addImm(62);
11426 
11427     unsigned NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11428     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
11429 
11430     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
11431     // bits of FPSCR.
11432     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
11433       .addImm(255)
11434       .addReg(NewFPSCRReg)
11435       .addImm(0)
11436       .addImm(0);
11437   } else {
11438     llvm_unreachable("Unexpected instr type to insert");
11439   }
11440 
11441   MI.eraseFromParent(); // The pseudo instruction is gone now.
11442   return BB;
11443 }
11444 
11445 //===----------------------------------------------------------------------===//
11446 // Target Optimization Hooks
11447 //===----------------------------------------------------------------------===//
11448 
11449 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
11450   // For the estimates, convergence is quadratic, so we essentially double the
11451   // number of digits correct after every iteration. For both FRE and FRSQRTE,
11452   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
11453   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
11454   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
11455   if (VT.getScalarType() == MVT::f64)
11456     RefinementSteps++;
11457   return RefinementSteps;
11458 }
11459 
11460 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
11461                                            int Enabled, int &RefinementSteps,
11462                                            bool &UseOneConstNR,
11463                                            bool Reciprocal) const {
11464   EVT VT = Operand.getValueType();
11465   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
11466       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
11467       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
11468       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
11469       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
11470       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
11471     if (RefinementSteps == ReciprocalEstimate::Unspecified)
11472       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
11473 
11474     // The Newton-Raphson computation with a single constant does not provide
11475     // enough accuracy on some CPUs.
11476     UseOneConstNR = !Subtarget.needsTwoConstNR();
11477     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
11478   }
11479   return SDValue();
11480 }
11481 
11482 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
11483                                             int Enabled,
11484                                             int &RefinementSteps) const {
11485   EVT VT = Operand.getValueType();
11486   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
11487       (VT == MVT::f64 && Subtarget.hasFRE()) ||
11488       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
11489       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
11490       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
11491       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
11492     if (RefinementSteps == ReciprocalEstimate::Unspecified)
11493       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
11494     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
11495   }
11496   return SDValue();
11497 }
11498 
11499 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
11500   // Note: This functionality is used only when unsafe-fp-math is enabled, and
11501   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
11502   // enabled for division), this functionality is redundant with the default
11503   // combiner logic (once the division -> reciprocal/multiply transformation
11504   // has taken place). As a result, this matters more for older cores than for
11505   // newer ones.
11506 
11507   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
11508   // reciprocal if there are two or more FDIVs (for embedded cores with only
11509   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
11510   switch (Subtarget.getDarwinDirective()) {
11511   default:
11512     return 3;
11513   case PPC::DIR_440:
11514   case PPC::DIR_A2:
11515   case PPC::DIR_E500:
11516   case PPC::DIR_E500mc:
11517   case PPC::DIR_E5500:
11518     return 2;
11519   }
11520 }
11521 
11522 // isConsecutiveLSLoc needs to work even if all adds have not yet been
11523 // collapsed, and so we need to look through chains of them.
11524 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
11525                                      int64_t& Offset, SelectionDAG &DAG) {
11526   if (DAG.isBaseWithConstantOffset(Loc)) {
11527     Base = Loc.getOperand(0);
11528     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
11529 
11530     // The base might itself be a base plus an offset, and if so, accumulate
11531     // that as well.
11532     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
11533   }
11534 }
11535 
11536 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
11537                             unsigned Bytes, int Dist,
11538                             SelectionDAG &DAG) {
11539   if (VT.getSizeInBits() / 8 != Bytes)
11540     return false;
11541 
11542   SDValue BaseLoc = Base->getBasePtr();
11543   if (Loc.getOpcode() == ISD::FrameIndex) {
11544     if (BaseLoc.getOpcode() != ISD::FrameIndex)
11545       return false;
11546     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
11547     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
11548     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
11549     int FS  = MFI.getObjectSize(FI);
11550     int BFS = MFI.getObjectSize(BFI);
11551     if (FS != BFS || FS != (int)Bytes) return false;
11552     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
11553   }
11554 
11555   SDValue Base1 = Loc, Base2 = BaseLoc;
11556   int64_t Offset1 = 0, Offset2 = 0;
11557   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
11558   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
11559   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
11560     return true;
11561 
11562   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11563   const GlobalValue *GV1 = nullptr;
11564   const GlobalValue *GV2 = nullptr;
11565   Offset1 = 0;
11566   Offset2 = 0;
11567   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
11568   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
11569   if (isGA1 && isGA2 && GV1 == GV2)
11570     return Offset1 == (Offset2 + Dist*Bytes);
11571   return false;
11572 }
11573 
11574 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
11575 // not enforce equality of the chain operands.
11576 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
11577                             unsigned Bytes, int Dist,
11578                             SelectionDAG &DAG) {
11579   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
11580     EVT VT = LS->getMemoryVT();
11581     SDValue Loc = LS->getBasePtr();
11582     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
11583   }
11584 
11585   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
11586     EVT VT;
11587     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11588     default: return false;
11589     case Intrinsic::ppc_qpx_qvlfd:
11590     case Intrinsic::ppc_qpx_qvlfda:
11591       VT = MVT::v4f64;
11592       break;
11593     case Intrinsic::ppc_qpx_qvlfs:
11594     case Intrinsic::ppc_qpx_qvlfsa:
11595       VT = MVT::v4f32;
11596       break;
11597     case Intrinsic::ppc_qpx_qvlfcd:
11598     case Intrinsic::ppc_qpx_qvlfcda:
11599       VT = MVT::v2f64;
11600       break;
11601     case Intrinsic::ppc_qpx_qvlfcs:
11602     case Intrinsic::ppc_qpx_qvlfcsa:
11603       VT = MVT::v2f32;
11604       break;
11605     case Intrinsic::ppc_qpx_qvlfiwa:
11606     case Intrinsic::ppc_qpx_qvlfiwz:
11607     case Intrinsic::ppc_altivec_lvx:
11608     case Intrinsic::ppc_altivec_lvxl:
11609     case Intrinsic::ppc_vsx_lxvw4x:
11610     case Intrinsic::ppc_vsx_lxvw4x_be:
11611       VT = MVT::v4i32;
11612       break;
11613     case Intrinsic::ppc_vsx_lxvd2x:
11614     case Intrinsic::ppc_vsx_lxvd2x_be:
11615       VT = MVT::v2f64;
11616       break;
11617     case Intrinsic::ppc_altivec_lvebx:
11618       VT = MVT::i8;
11619       break;
11620     case Intrinsic::ppc_altivec_lvehx:
11621       VT = MVT::i16;
11622       break;
11623     case Intrinsic::ppc_altivec_lvewx:
11624       VT = MVT::i32;
11625       break;
11626     }
11627 
11628     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
11629   }
11630 
11631   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
11632     EVT VT;
11633     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11634     default: return false;
11635     case Intrinsic::ppc_qpx_qvstfd:
11636     case Intrinsic::ppc_qpx_qvstfda:
11637       VT = MVT::v4f64;
11638       break;
11639     case Intrinsic::ppc_qpx_qvstfs:
11640     case Intrinsic::ppc_qpx_qvstfsa:
11641       VT = MVT::v4f32;
11642       break;
11643     case Intrinsic::ppc_qpx_qvstfcd:
11644     case Intrinsic::ppc_qpx_qvstfcda:
11645       VT = MVT::v2f64;
11646       break;
11647     case Intrinsic::ppc_qpx_qvstfcs:
11648     case Intrinsic::ppc_qpx_qvstfcsa:
11649       VT = MVT::v2f32;
11650       break;
11651     case Intrinsic::ppc_qpx_qvstfiw:
11652     case Intrinsic::ppc_qpx_qvstfiwa:
11653     case Intrinsic::ppc_altivec_stvx:
11654     case Intrinsic::ppc_altivec_stvxl:
11655     case Intrinsic::ppc_vsx_stxvw4x:
11656       VT = MVT::v4i32;
11657       break;
11658     case Intrinsic::ppc_vsx_stxvd2x:
11659       VT = MVT::v2f64;
11660       break;
11661     case Intrinsic::ppc_vsx_stxvw4x_be:
11662       VT = MVT::v4i32;
11663       break;
11664     case Intrinsic::ppc_vsx_stxvd2x_be:
11665       VT = MVT::v2f64;
11666       break;
11667     case Intrinsic::ppc_altivec_stvebx:
11668       VT = MVT::i8;
11669       break;
11670     case Intrinsic::ppc_altivec_stvehx:
11671       VT = MVT::i16;
11672       break;
11673     case Intrinsic::ppc_altivec_stvewx:
11674       VT = MVT::i32;
11675       break;
11676     }
11677 
11678     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
11679   }
11680 
11681   return false;
11682 }
11683 
11684 // Return true is there is a nearyby consecutive load to the one provided
11685 // (regardless of alignment). We search up and down the chain, looking though
11686 // token factors and other loads (but nothing else). As a result, a true result
11687 // indicates that it is safe to create a new consecutive load adjacent to the
11688 // load provided.
11689 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
11690   SDValue Chain = LD->getChain();
11691   EVT VT = LD->getMemoryVT();
11692 
11693   SmallSet<SDNode *, 16> LoadRoots;
11694   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
11695   SmallSet<SDNode *, 16> Visited;
11696 
11697   // First, search up the chain, branching to follow all token-factor operands.
11698   // If we find a consecutive load, then we're done, otherwise, record all
11699   // nodes just above the top-level loads and token factors.
11700   while (!Queue.empty()) {
11701     SDNode *ChainNext = Queue.pop_back_val();
11702     if (!Visited.insert(ChainNext).second)
11703       continue;
11704 
11705     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
11706       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11707         return true;
11708 
11709       if (!Visited.count(ChainLD->getChain().getNode()))
11710         Queue.push_back(ChainLD->getChain().getNode());
11711     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
11712       for (const SDUse &O : ChainNext->ops())
11713         if (!Visited.count(O.getNode()))
11714           Queue.push_back(O.getNode());
11715     } else
11716       LoadRoots.insert(ChainNext);
11717   }
11718 
11719   // Second, search down the chain, starting from the top-level nodes recorded
11720   // in the first phase. These top-level nodes are the nodes just above all
11721   // loads and token factors. Starting with their uses, recursively look though
11722   // all loads (just the chain uses) and token factors to find a consecutive
11723   // load.
11724   Visited.clear();
11725   Queue.clear();
11726 
11727   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
11728        IE = LoadRoots.end(); I != IE; ++I) {
11729     Queue.push_back(*I);
11730 
11731     while (!Queue.empty()) {
11732       SDNode *LoadRoot = Queue.pop_back_val();
11733       if (!Visited.insert(LoadRoot).second)
11734         continue;
11735 
11736       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
11737         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11738           return true;
11739 
11740       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
11741            UE = LoadRoot->use_end(); UI != UE; ++UI)
11742         if (((isa<MemSDNode>(*UI) &&
11743             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
11744             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
11745           Queue.push_back(*UI);
11746     }
11747   }
11748 
11749   return false;
11750 }
11751 
11752 /// This function is called when we have proved that a SETCC node can be replaced
11753 /// by subtraction (and other supporting instructions) so that the result of
11754 /// comparison is kept in a GPR instead of CR. This function is purely for
11755 /// codegen purposes and has some flags to guide the codegen process.
11756 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
11757                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
11758   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11759 
11760   // Zero extend the operands to the largest legal integer. Originally, they
11761   // must be of a strictly smaller size.
11762   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
11763                          DAG.getConstant(Size, DL, MVT::i32));
11764   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
11765                          DAG.getConstant(Size, DL, MVT::i32));
11766 
11767   // Swap if needed. Depends on the condition code.
11768   if (Swap)
11769     std::swap(Op0, Op1);
11770 
11771   // Subtract extended integers.
11772   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
11773 
11774   // Move the sign bit to the least significant position and zero out the rest.
11775   // Now the least significant bit carries the result of original comparison.
11776   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
11777                              DAG.getConstant(Size - 1, DL, MVT::i32));
11778   auto Final = Shifted;
11779 
11780   // Complement the result if needed. Based on the condition code.
11781   if (Complement)
11782     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
11783                         DAG.getConstant(1, DL, MVT::i64));
11784 
11785   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
11786 }
11787 
11788 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
11789                                                   DAGCombinerInfo &DCI) const {
11790   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11791 
11792   SelectionDAG &DAG = DCI.DAG;
11793   SDLoc DL(N);
11794 
11795   // Size of integers being compared has a critical role in the following
11796   // analysis, so we prefer to do this when all types are legal.
11797   if (!DCI.isAfterLegalizeDAG())
11798     return SDValue();
11799 
11800   // If all users of SETCC extend its value to a legal integer type
11801   // then we replace SETCC with a subtraction
11802   for (SDNode::use_iterator UI = N->use_begin(),
11803        UE = N->use_end(); UI != UE; ++UI) {
11804     if (UI->getOpcode() != ISD::ZERO_EXTEND)
11805       return SDValue();
11806   }
11807 
11808   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
11809   auto OpSize = N->getOperand(0).getValueSizeInBits();
11810 
11811   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
11812 
11813   if (OpSize < Size) {
11814     switch (CC) {
11815     default: break;
11816     case ISD::SETULT:
11817       return generateEquivalentSub(N, Size, false, false, DL, DAG);
11818     case ISD::SETULE:
11819       return generateEquivalentSub(N, Size, true, true, DL, DAG);
11820     case ISD::SETUGT:
11821       return generateEquivalentSub(N, Size, false, true, DL, DAG);
11822     case ISD::SETUGE:
11823       return generateEquivalentSub(N, Size, true, false, DL, DAG);
11824     }
11825   }
11826 
11827   return SDValue();
11828 }
11829 
11830 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
11831                                                   DAGCombinerInfo &DCI) const {
11832   SelectionDAG &DAG = DCI.DAG;
11833   SDLoc dl(N);
11834 
11835   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
11836   // If we're tracking CR bits, we need to be careful that we don't have:
11837   //   trunc(binary-ops(zext(x), zext(y)))
11838   // or
11839   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
11840   // such that we're unnecessarily moving things into GPRs when it would be
11841   // better to keep them in CR bits.
11842 
11843   // Note that trunc here can be an actual i1 trunc, or can be the effective
11844   // truncation that comes from a setcc or select_cc.
11845   if (N->getOpcode() == ISD::TRUNCATE &&
11846       N->getValueType(0) != MVT::i1)
11847     return SDValue();
11848 
11849   if (N->getOperand(0).getValueType() != MVT::i32 &&
11850       N->getOperand(0).getValueType() != MVT::i64)
11851     return SDValue();
11852 
11853   if (N->getOpcode() == ISD::SETCC ||
11854       N->getOpcode() == ISD::SELECT_CC) {
11855     // If we're looking at a comparison, then we need to make sure that the
11856     // high bits (all except for the first) don't matter the result.
11857     ISD::CondCode CC =
11858       cast<CondCodeSDNode>(N->getOperand(
11859         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
11860     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
11861 
11862     if (ISD::isSignedIntSetCC(CC)) {
11863       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
11864           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
11865         return SDValue();
11866     } else if (ISD::isUnsignedIntSetCC(CC)) {
11867       if (!DAG.MaskedValueIsZero(N->getOperand(0),
11868                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
11869           !DAG.MaskedValueIsZero(N->getOperand(1),
11870                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
11871         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
11872                                              : SDValue());
11873     } else {
11874       // This is neither a signed nor an unsigned comparison, just make sure
11875       // that the high bits are equal.
11876       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
11877       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
11878 
11879       // We don't really care about what is known about the first bit (if
11880       // anything), so clear it in all masks prior to comparing them.
11881       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
11882       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
11883 
11884       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
11885         return SDValue();
11886     }
11887   }
11888 
11889   // We now know that the higher-order bits are irrelevant, we just need to
11890   // make sure that all of the intermediate operations are bit operations, and
11891   // all inputs are extensions.
11892   if (N->getOperand(0).getOpcode() != ISD::AND &&
11893       N->getOperand(0).getOpcode() != ISD::OR  &&
11894       N->getOperand(0).getOpcode() != ISD::XOR &&
11895       N->getOperand(0).getOpcode() != ISD::SELECT &&
11896       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
11897       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
11898       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
11899       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
11900       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
11901     return SDValue();
11902 
11903   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
11904       N->getOperand(1).getOpcode() != ISD::AND &&
11905       N->getOperand(1).getOpcode() != ISD::OR  &&
11906       N->getOperand(1).getOpcode() != ISD::XOR &&
11907       N->getOperand(1).getOpcode() != ISD::SELECT &&
11908       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
11909       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
11910       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
11911       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
11912       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
11913     return SDValue();
11914 
11915   SmallVector<SDValue, 4> Inputs;
11916   SmallVector<SDValue, 8> BinOps, PromOps;
11917   SmallPtrSet<SDNode *, 16> Visited;
11918 
11919   for (unsigned i = 0; i < 2; ++i) {
11920     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11921           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11922           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11923           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11924         isa<ConstantSDNode>(N->getOperand(i)))
11925       Inputs.push_back(N->getOperand(i));
11926     else
11927       BinOps.push_back(N->getOperand(i));
11928 
11929     if (N->getOpcode() == ISD::TRUNCATE)
11930       break;
11931   }
11932 
11933   // Visit all inputs, collect all binary operations (and, or, xor and
11934   // select) that are all fed by extensions.
11935   while (!BinOps.empty()) {
11936     SDValue BinOp = BinOps.back();
11937     BinOps.pop_back();
11938 
11939     if (!Visited.insert(BinOp.getNode()).second)
11940       continue;
11941 
11942     PromOps.push_back(BinOp);
11943 
11944     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11945       // The condition of the select is not promoted.
11946       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11947         continue;
11948       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11949         continue;
11950 
11951       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11952             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11953             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11954            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11955           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11956         Inputs.push_back(BinOp.getOperand(i));
11957       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11958                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11959                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11960                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11961                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
11962                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11963                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11964                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11965                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
11966         BinOps.push_back(BinOp.getOperand(i));
11967       } else {
11968         // We have an input that is not an extension or another binary
11969         // operation; we'll abort this transformation.
11970         return SDValue();
11971       }
11972     }
11973   }
11974 
11975   // Make sure that this is a self-contained cluster of operations (which
11976   // is not quite the same thing as saying that everything has only one
11977   // use).
11978   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11979     if (isa<ConstantSDNode>(Inputs[i]))
11980       continue;
11981 
11982     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11983                               UE = Inputs[i].getNode()->use_end();
11984          UI != UE; ++UI) {
11985       SDNode *User = *UI;
11986       if (User != N && !Visited.count(User))
11987         return SDValue();
11988 
11989       // Make sure that we're not going to promote the non-output-value
11990       // operand(s) or SELECT or SELECT_CC.
11991       // FIXME: Although we could sometimes handle this, and it does occur in
11992       // practice that one of the condition inputs to the select is also one of
11993       // the outputs, we currently can't deal with this.
11994       if (User->getOpcode() == ISD::SELECT) {
11995         if (User->getOperand(0) == Inputs[i])
11996           return SDValue();
11997       } else if (User->getOpcode() == ISD::SELECT_CC) {
11998         if (User->getOperand(0) == Inputs[i] ||
11999             User->getOperand(1) == Inputs[i])
12000           return SDValue();
12001       }
12002     }
12003   }
12004 
12005   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12006     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12007                               UE = PromOps[i].getNode()->use_end();
12008          UI != UE; ++UI) {
12009       SDNode *User = *UI;
12010       if (User != N && !Visited.count(User))
12011         return SDValue();
12012 
12013       // Make sure that we're not going to promote the non-output-value
12014       // operand(s) or SELECT or SELECT_CC.
12015       // FIXME: Although we could sometimes handle this, and it does occur in
12016       // practice that one of the condition inputs to the select is also one of
12017       // the outputs, we currently can't deal with this.
12018       if (User->getOpcode() == ISD::SELECT) {
12019         if (User->getOperand(0) == PromOps[i])
12020           return SDValue();
12021       } else if (User->getOpcode() == ISD::SELECT_CC) {
12022         if (User->getOperand(0) == PromOps[i] ||
12023             User->getOperand(1) == PromOps[i])
12024           return SDValue();
12025       }
12026     }
12027   }
12028 
12029   // Replace all inputs with the extension operand.
12030   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12031     // Constants may have users outside the cluster of to-be-promoted nodes,
12032     // and so we need to replace those as we do the promotions.
12033     if (isa<ConstantSDNode>(Inputs[i]))
12034       continue;
12035     else
12036       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12037   }
12038 
12039   std::list<HandleSDNode> PromOpHandles;
12040   for (auto &PromOp : PromOps)
12041     PromOpHandles.emplace_back(PromOp);
12042 
12043   // Replace all operations (these are all the same, but have a different
12044   // (i1) return type). DAG.getNode will validate that the types of
12045   // a binary operator match, so go through the list in reverse so that
12046   // we've likely promoted both operands first. Any intermediate truncations or
12047   // extensions disappear.
12048   while (!PromOpHandles.empty()) {
12049     SDValue PromOp = PromOpHandles.back().getValue();
12050     PromOpHandles.pop_back();
12051 
12052     if (PromOp.getOpcode() == ISD::TRUNCATE ||
12053         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12054         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12055         PromOp.getOpcode() == ISD::ANY_EXTEND) {
12056       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12057           PromOp.getOperand(0).getValueType() != MVT::i1) {
12058         // The operand is not yet ready (see comment below).
12059         PromOpHandles.emplace_front(PromOp);
12060         continue;
12061       }
12062 
12063       SDValue RepValue = PromOp.getOperand(0);
12064       if (isa<ConstantSDNode>(RepValue))
12065         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12066 
12067       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12068       continue;
12069     }
12070 
12071     unsigned C;
12072     switch (PromOp.getOpcode()) {
12073     default:             C = 0; break;
12074     case ISD::SELECT:    C = 1; break;
12075     case ISD::SELECT_CC: C = 2; break;
12076     }
12077 
12078     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12079          PromOp.getOperand(C).getValueType() != MVT::i1) ||
12080         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12081          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12082       // The to-be-promoted operands of this node have not yet been
12083       // promoted (this should be rare because we're going through the
12084       // list backward, but if one of the operands has several users in
12085       // this cluster of to-be-promoted nodes, it is possible).
12086       PromOpHandles.emplace_front(PromOp);
12087       continue;
12088     }
12089 
12090     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12091                                 PromOp.getNode()->op_end());
12092 
12093     // If there are any constant inputs, make sure they're replaced now.
12094     for (unsigned i = 0; i < 2; ++i)
12095       if (isa<ConstantSDNode>(Ops[C+i]))
12096         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12097 
12098     DAG.ReplaceAllUsesOfValueWith(PromOp,
12099       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12100   }
12101 
12102   // Now we're left with the initial truncation itself.
12103   if (N->getOpcode() == ISD::TRUNCATE)
12104     return N->getOperand(0);
12105 
12106   // Otherwise, this is a comparison. The operands to be compared have just
12107   // changed type (to i1), but everything else is the same.
12108   return SDValue(N, 0);
12109 }
12110 
12111 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12112                                                   DAGCombinerInfo &DCI) const {
12113   SelectionDAG &DAG = DCI.DAG;
12114   SDLoc dl(N);
12115 
12116   // If we're tracking CR bits, we need to be careful that we don't have:
12117   //   zext(binary-ops(trunc(x), trunc(y)))
12118   // or
12119   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12120   // such that we're unnecessarily moving things into CR bits that can more
12121   // efficiently stay in GPRs. Note that if we're not certain that the high
12122   // bits are set as required by the final extension, we still may need to do
12123   // some masking to get the proper behavior.
12124 
12125   // This same functionality is important on PPC64 when dealing with
12126   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12127   // the return values of functions. Because it is so similar, it is handled
12128   // here as well.
12129 
12130   if (N->getValueType(0) != MVT::i32 &&
12131       N->getValueType(0) != MVT::i64)
12132     return SDValue();
12133 
12134   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12135         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12136     return SDValue();
12137 
12138   if (N->getOperand(0).getOpcode() != ISD::AND &&
12139       N->getOperand(0).getOpcode() != ISD::OR  &&
12140       N->getOperand(0).getOpcode() != ISD::XOR &&
12141       N->getOperand(0).getOpcode() != ISD::SELECT &&
12142       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12143     return SDValue();
12144 
12145   SmallVector<SDValue, 4> Inputs;
12146   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12147   SmallPtrSet<SDNode *, 16> Visited;
12148 
12149   // Visit all inputs, collect all binary operations (and, or, xor and
12150   // select) that are all fed by truncations.
12151   while (!BinOps.empty()) {
12152     SDValue BinOp = BinOps.back();
12153     BinOps.pop_back();
12154 
12155     if (!Visited.insert(BinOp.getNode()).second)
12156       continue;
12157 
12158     PromOps.push_back(BinOp);
12159 
12160     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12161       // The condition of the select is not promoted.
12162       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12163         continue;
12164       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12165         continue;
12166 
12167       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12168           isa<ConstantSDNode>(BinOp.getOperand(i))) {
12169         Inputs.push_back(BinOp.getOperand(i));
12170       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12171                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
12172                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12173                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12174                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12175         BinOps.push_back(BinOp.getOperand(i));
12176       } else {
12177         // We have an input that is not a truncation or another binary
12178         // operation; we'll abort this transformation.
12179         return SDValue();
12180       }
12181     }
12182   }
12183 
12184   // The operands of a select that must be truncated when the select is
12185   // promoted because the operand is actually part of the to-be-promoted set.
12186   DenseMap<SDNode *, EVT> SelectTruncOp[2];
12187 
12188   // Make sure that this is a self-contained cluster of operations (which
12189   // is not quite the same thing as saying that everything has only one
12190   // use).
12191   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12192     if (isa<ConstantSDNode>(Inputs[i]))
12193       continue;
12194 
12195     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12196                               UE = Inputs[i].getNode()->use_end();
12197          UI != UE; ++UI) {
12198       SDNode *User = *UI;
12199       if (User != N && !Visited.count(User))
12200         return SDValue();
12201 
12202       // If we're going to promote the non-output-value operand(s) or SELECT or
12203       // SELECT_CC, record them for truncation.
12204       if (User->getOpcode() == ISD::SELECT) {
12205         if (User->getOperand(0) == Inputs[i])
12206           SelectTruncOp[0].insert(std::make_pair(User,
12207                                     User->getOperand(0).getValueType()));
12208       } else if (User->getOpcode() == ISD::SELECT_CC) {
12209         if (User->getOperand(0) == Inputs[i])
12210           SelectTruncOp[0].insert(std::make_pair(User,
12211                                     User->getOperand(0).getValueType()));
12212         if (User->getOperand(1) == Inputs[i])
12213           SelectTruncOp[1].insert(std::make_pair(User,
12214                                     User->getOperand(1).getValueType()));
12215       }
12216     }
12217   }
12218 
12219   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12220     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12221                               UE = PromOps[i].getNode()->use_end();
12222          UI != UE; ++UI) {
12223       SDNode *User = *UI;
12224       if (User != N && !Visited.count(User))
12225         return SDValue();
12226 
12227       // If we're going to promote the non-output-value operand(s) or SELECT or
12228       // SELECT_CC, record them for truncation.
12229       if (User->getOpcode() == ISD::SELECT) {
12230         if (User->getOperand(0) == PromOps[i])
12231           SelectTruncOp[0].insert(std::make_pair(User,
12232                                     User->getOperand(0).getValueType()));
12233       } else if (User->getOpcode() == ISD::SELECT_CC) {
12234         if (User->getOperand(0) == PromOps[i])
12235           SelectTruncOp[0].insert(std::make_pair(User,
12236                                     User->getOperand(0).getValueType()));
12237         if (User->getOperand(1) == PromOps[i])
12238           SelectTruncOp[1].insert(std::make_pair(User,
12239                                     User->getOperand(1).getValueType()));
12240       }
12241     }
12242   }
12243 
12244   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12245   bool ReallyNeedsExt = false;
12246   if (N->getOpcode() != ISD::ANY_EXTEND) {
12247     // If all of the inputs are not already sign/zero extended, then
12248     // we'll still need to do that at the end.
12249     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12250       if (isa<ConstantSDNode>(Inputs[i]))
12251         continue;
12252 
12253       unsigned OpBits =
12254         Inputs[i].getOperand(0).getValueSizeInBits();
12255       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12256 
12257       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12258            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12259                                   APInt::getHighBitsSet(OpBits,
12260                                                         OpBits-PromBits))) ||
12261           (N->getOpcode() == ISD::SIGN_EXTEND &&
12262            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12263              (OpBits-(PromBits-1)))) {
12264         ReallyNeedsExt = true;
12265         break;
12266       }
12267     }
12268   }
12269 
12270   // Replace all inputs, either with the truncation operand, or a
12271   // truncation or extension to the final output type.
12272   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12273     // Constant inputs need to be replaced with the to-be-promoted nodes that
12274     // use them because they might have users outside of the cluster of
12275     // promoted nodes.
12276     if (isa<ConstantSDNode>(Inputs[i]))
12277       continue;
12278 
12279     SDValue InSrc = Inputs[i].getOperand(0);
12280     if (Inputs[i].getValueType() == N->getValueType(0))
12281       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12282     else if (N->getOpcode() == ISD::SIGN_EXTEND)
12283       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12284         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12285     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12286       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12287         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12288     else
12289       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12290         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12291   }
12292 
12293   std::list<HandleSDNode> PromOpHandles;
12294   for (auto &PromOp : PromOps)
12295     PromOpHandles.emplace_back(PromOp);
12296 
12297   // Replace all operations (these are all the same, but have a different
12298   // (promoted) return type). DAG.getNode will validate that the types of
12299   // a binary operator match, so go through the list in reverse so that
12300   // we've likely promoted both operands first.
12301   while (!PromOpHandles.empty()) {
12302     SDValue PromOp = PromOpHandles.back().getValue();
12303     PromOpHandles.pop_back();
12304 
12305     unsigned C;
12306     switch (PromOp.getOpcode()) {
12307     default:             C = 0; break;
12308     case ISD::SELECT:    C = 1; break;
12309     case ISD::SELECT_CC: C = 2; break;
12310     }
12311 
12312     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12313          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12314         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12315          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12316       // The to-be-promoted operands of this node have not yet been
12317       // promoted (this should be rare because we're going through the
12318       // list backward, but if one of the operands has several users in
12319       // this cluster of to-be-promoted nodes, it is possible).
12320       PromOpHandles.emplace_front(PromOp);
12321       continue;
12322     }
12323 
12324     // For SELECT and SELECT_CC nodes, we do a similar check for any
12325     // to-be-promoted comparison inputs.
12326     if (PromOp.getOpcode() == ISD::SELECT ||
12327         PromOp.getOpcode() == ISD::SELECT_CC) {
12328       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12329            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12330           (SelectTruncOp[1].count(PromOp.getNode()) &&
12331            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12332         PromOpHandles.emplace_front(PromOp);
12333         continue;
12334       }
12335     }
12336 
12337     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12338                                 PromOp.getNode()->op_end());
12339 
12340     // If this node has constant inputs, then they'll need to be promoted here.
12341     for (unsigned i = 0; i < 2; ++i) {
12342       if (!isa<ConstantSDNode>(Ops[C+i]))
12343         continue;
12344       if (Ops[C+i].getValueType() == N->getValueType(0))
12345         continue;
12346 
12347       if (N->getOpcode() == ISD::SIGN_EXTEND)
12348         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12349       else if (N->getOpcode() == ISD::ZERO_EXTEND)
12350         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12351       else
12352         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12353     }
12354 
12355     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
12356     // truncate them again to the original value type.
12357     if (PromOp.getOpcode() == ISD::SELECT ||
12358         PromOp.getOpcode() == ISD::SELECT_CC) {
12359       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
12360       if (SI0 != SelectTruncOp[0].end())
12361         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
12362       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
12363       if (SI1 != SelectTruncOp[1].end())
12364         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
12365     }
12366 
12367     DAG.ReplaceAllUsesOfValueWith(PromOp,
12368       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
12369   }
12370 
12371   // Now we're left with the initial extension itself.
12372   if (!ReallyNeedsExt)
12373     return N->getOperand(0);
12374 
12375   // To zero extend, just mask off everything except for the first bit (in the
12376   // i1 case).
12377   if (N->getOpcode() == ISD::ZERO_EXTEND)
12378     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
12379                        DAG.getConstant(APInt::getLowBitsSet(
12380                                          N->getValueSizeInBits(0), PromBits),
12381                                        dl, N->getValueType(0)));
12382 
12383   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
12384          "Invalid extension type");
12385   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
12386   SDValue ShiftCst =
12387       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
12388   return DAG.getNode(
12389       ISD::SRA, dl, N->getValueType(0),
12390       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
12391       ShiftCst);
12392 }
12393 
12394 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
12395                                         DAGCombinerInfo &DCI) const {
12396   assert(N->getOpcode() == ISD::SETCC &&
12397          "Should be called with a SETCC node");
12398 
12399   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12400   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
12401     SDValue LHS = N->getOperand(0);
12402     SDValue RHS = N->getOperand(1);
12403 
12404     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
12405     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
12406         LHS.hasOneUse())
12407       std::swap(LHS, RHS);
12408 
12409     // x == 0-y --> x+y == 0
12410     // x != 0-y --> x+y != 0
12411     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
12412         RHS.hasOneUse()) {
12413       SDLoc DL(N);
12414       SelectionDAG &DAG = DCI.DAG;
12415       EVT VT = N->getValueType(0);
12416       EVT OpVT = LHS.getValueType();
12417       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
12418       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
12419     }
12420   }
12421 
12422   return DAGCombineTruncBoolExt(N, DCI);
12423 }
12424 
12425 // Is this an extending load from an f32 to an f64?
12426 static bool isFPExtLoad(SDValue Op) {
12427   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
12428     return LD->getExtensionType() == ISD::EXTLOAD &&
12429       Op.getValueType() == MVT::f64;
12430   return false;
12431 }
12432 
12433 /// Reduces the number of fp-to-int conversion when building a vector.
12434 ///
12435 /// If this vector is built out of floating to integer conversions,
12436 /// transform it to a vector built out of floating point values followed by a
12437 /// single floating to integer conversion of the vector.
12438 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
12439 /// becomes (fptosi (build_vector ($A, $B, ...)))
12440 SDValue PPCTargetLowering::
12441 combineElementTruncationToVectorTruncation(SDNode *N,
12442                                            DAGCombinerInfo &DCI) const {
12443   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12444          "Should be called with a BUILD_VECTOR node");
12445 
12446   SelectionDAG &DAG = DCI.DAG;
12447   SDLoc dl(N);
12448 
12449   SDValue FirstInput = N->getOperand(0);
12450   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
12451          "The input operand must be an fp-to-int conversion.");
12452 
12453   // This combine happens after legalization so the fp_to_[su]i nodes are
12454   // already converted to PPCSISD nodes.
12455   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
12456   if (FirstConversion == PPCISD::FCTIDZ ||
12457       FirstConversion == PPCISD::FCTIDUZ ||
12458       FirstConversion == PPCISD::FCTIWZ ||
12459       FirstConversion == PPCISD::FCTIWUZ) {
12460     bool IsSplat = true;
12461     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
12462       FirstConversion == PPCISD::FCTIWUZ;
12463     EVT SrcVT = FirstInput.getOperand(0).getValueType();
12464     SmallVector<SDValue, 4> Ops;
12465     EVT TargetVT = N->getValueType(0);
12466     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
12467       SDValue NextOp = N->getOperand(i);
12468       if (NextOp.getOpcode() != PPCISD::MFVSR)
12469         return SDValue();
12470       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
12471       if (NextConversion != FirstConversion)
12472         return SDValue();
12473       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
12474       // This is not valid if the input was originally double precision. It is
12475       // also not profitable to do unless this is an extending load in which
12476       // case doing this combine will allow us to combine consecutive loads.
12477       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
12478         return SDValue();
12479       if (N->getOperand(i) != FirstInput)
12480         IsSplat = false;
12481     }
12482 
12483     // If this is a splat, we leave it as-is since there will be only a single
12484     // fp-to-int conversion followed by a splat of the integer. This is better
12485     // for 32-bit and smaller ints and neutral for 64-bit ints.
12486     if (IsSplat)
12487       return SDValue();
12488 
12489     // Now that we know we have the right type of node, get its operands
12490     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
12491       SDValue In = N->getOperand(i).getOperand(0);
12492       if (Is32Bit) {
12493         // For 32-bit values, we need to add an FP_ROUND node (if we made it
12494         // here, we know that all inputs are extending loads so this is safe).
12495         if (In.isUndef())
12496           Ops.push_back(DAG.getUNDEF(SrcVT));
12497         else {
12498           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
12499                                       MVT::f32, In.getOperand(0),
12500                                       DAG.getIntPtrConstant(1, dl));
12501           Ops.push_back(Trunc);
12502         }
12503       } else
12504         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
12505     }
12506 
12507     unsigned Opcode;
12508     if (FirstConversion == PPCISD::FCTIDZ ||
12509         FirstConversion == PPCISD::FCTIWZ)
12510       Opcode = ISD::FP_TO_SINT;
12511     else
12512       Opcode = ISD::FP_TO_UINT;
12513 
12514     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
12515     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
12516     return DAG.getNode(Opcode, dl, TargetVT, BV);
12517   }
12518   return SDValue();
12519 }
12520 
12521 /// Reduce the number of loads when building a vector.
12522 ///
12523 /// Building a vector out of multiple loads can be converted to a load
12524 /// of the vector type if the loads are consecutive. If the loads are
12525 /// consecutive but in descending order, a shuffle is added at the end
12526 /// to reorder the vector.
12527 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
12528   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12529          "Should be called with a BUILD_VECTOR node");
12530 
12531   SDLoc dl(N);
12532 
12533   // Return early for non byte-sized type, as they can't be consecutive.
12534   if (!N->getValueType(0).getVectorElementType().isByteSized())
12535     return SDValue();
12536 
12537   bool InputsAreConsecutiveLoads = true;
12538   bool InputsAreReverseConsecutive = true;
12539   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
12540   SDValue FirstInput = N->getOperand(0);
12541   bool IsRoundOfExtLoad = false;
12542 
12543   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
12544       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
12545     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
12546     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
12547   }
12548   // Not a build vector of (possibly fp_rounded) loads.
12549   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
12550       N->getNumOperands() == 1)
12551     return SDValue();
12552 
12553   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
12554     // If any inputs are fp_round(extload), they all must be.
12555     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
12556       return SDValue();
12557 
12558     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
12559       N->getOperand(i);
12560     if (NextInput.getOpcode() != ISD::LOAD)
12561       return SDValue();
12562 
12563     SDValue PreviousInput =
12564       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
12565     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
12566     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
12567 
12568     // If any inputs are fp_round(extload), they all must be.
12569     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
12570       return SDValue();
12571 
12572     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
12573       InputsAreConsecutiveLoads = false;
12574     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
12575       InputsAreReverseConsecutive = false;
12576 
12577     // Exit early if the loads are neither consecutive nor reverse consecutive.
12578     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
12579       return SDValue();
12580   }
12581 
12582   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
12583          "The loads cannot be both consecutive and reverse consecutive.");
12584 
12585   SDValue FirstLoadOp =
12586     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
12587   SDValue LastLoadOp =
12588     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
12589                        N->getOperand(N->getNumOperands()-1);
12590 
12591   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
12592   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
12593   if (InputsAreConsecutiveLoads) {
12594     assert(LD1 && "Input needs to be a LoadSDNode.");
12595     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
12596                        LD1->getBasePtr(), LD1->getPointerInfo(),
12597                        LD1->getAlignment());
12598   }
12599   if (InputsAreReverseConsecutive) {
12600     assert(LDL && "Input needs to be a LoadSDNode.");
12601     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
12602                                LDL->getBasePtr(), LDL->getPointerInfo(),
12603                                LDL->getAlignment());
12604     SmallVector<int, 16> Ops;
12605     for (int i = N->getNumOperands() - 1; i >= 0; i--)
12606       Ops.push_back(i);
12607 
12608     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
12609                                 DAG.getUNDEF(N->getValueType(0)), Ops);
12610   }
12611   return SDValue();
12612 }
12613 
12614 // This function adds the required vector_shuffle needed to get
12615 // the elements of the vector extract in the correct position
12616 // as specified by the CorrectElems encoding.
12617 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
12618                                       SDValue Input, uint64_t Elems,
12619                                       uint64_t CorrectElems) {
12620   SDLoc dl(N);
12621 
12622   unsigned NumElems = Input.getValueType().getVectorNumElements();
12623   SmallVector<int, 16> ShuffleMask(NumElems, -1);
12624 
12625   // Knowing the element indices being extracted from the original
12626   // vector and the order in which they're being inserted, just put
12627   // them at element indices required for the instruction.
12628   for (unsigned i = 0; i < N->getNumOperands(); i++) {
12629     if (DAG.getDataLayout().isLittleEndian())
12630       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
12631     else
12632       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
12633     CorrectElems = CorrectElems >> 8;
12634     Elems = Elems >> 8;
12635   }
12636 
12637   SDValue Shuffle =
12638       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
12639                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
12640 
12641   EVT Ty = N->getValueType(0);
12642   SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
12643   return BV;
12644 }
12645 
12646 // Look for build vector patterns where input operands come from sign
12647 // extended vector_extract elements of specific indices. If the correct indices
12648 // aren't used, add a vector shuffle to fix up the indices and create a new
12649 // PPCISD:SExtVElems node which selects the vector sign extend instructions
12650 // during instruction selection.
12651 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
12652   // This array encodes the indices that the vector sign extend instructions
12653   // extract from when extending from one type to another for both BE and LE.
12654   // The right nibble of each byte corresponds to the LE incides.
12655   // and the left nibble of each byte corresponds to the BE incides.
12656   // For example: 0x3074B8FC  byte->word
12657   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
12658   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
12659   // For example: 0x000070F8  byte->double word
12660   // For LE: the allowed indices are: 0x0,0x8
12661   // For BE: the allowed indices are: 0x7,0xF
12662   uint64_t TargetElems[] = {
12663       0x3074B8FC, // b->w
12664       0x000070F8, // b->d
12665       0x10325476, // h->w
12666       0x00003074, // h->d
12667       0x00001032, // w->d
12668   };
12669 
12670   uint64_t Elems = 0;
12671   int Index;
12672   SDValue Input;
12673 
12674   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
12675     if (!Op)
12676       return false;
12677     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
12678         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
12679       return false;
12680 
12681     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
12682     // of the right width.
12683     SDValue Extract = Op.getOperand(0);
12684     if (Extract.getOpcode() == ISD::ANY_EXTEND)
12685       Extract = Extract.getOperand(0);
12686     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12687       return false;
12688 
12689     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
12690     if (!ExtOp)
12691       return false;
12692 
12693     Index = ExtOp->getZExtValue();
12694     if (Input && Input != Extract.getOperand(0))
12695       return false;
12696 
12697     if (!Input)
12698       Input = Extract.getOperand(0);
12699 
12700     Elems = Elems << 8;
12701     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
12702     Elems |= Index;
12703 
12704     return true;
12705   };
12706 
12707   // If the build vector operands aren't sign extended vector extracts,
12708   // of the same input vector, then return.
12709   for (unsigned i = 0; i < N->getNumOperands(); i++) {
12710     if (!isSExtOfVecExtract(N->getOperand(i))) {
12711       return SDValue();
12712     }
12713   }
12714 
12715   // If the vector extract indicies are not correct, add the appropriate
12716   // vector_shuffle.
12717   int TgtElemArrayIdx;
12718   int InputSize = Input.getValueType().getScalarSizeInBits();
12719   int OutputSize = N->getValueType(0).getScalarSizeInBits();
12720   if (InputSize + OutputSize == 40)
12721     TgtElemArrayIdx = 0;
12722   else if (InputSize + OutputSize == 72)
12723     TgtElemArrayIdx = 1;
12724   else if (InputSize + OutputSize == 48)
12725     TgtElemArrayIdx = 2;
12726   else if (InputSize + OutputSize == 80)
12727     TgtElemArrayIdx = 3;
12728   else if (InputSize + OutputSize == 96)
12729     TgtElemArrayIdx = 4;
12730   else
12731     return SDValue();
12732 
12733   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
12734   CorrectElems = DAG.getDataLayout().isLittleEndian()
12735                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
12736                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
12737   if (Elems != CorrectElems) {
12738     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
12739   }
12740 
12741   // Regular lowering will catch cases where a shuffle is not needed.
12742   return SDValue();
12743 }
12744 
12745 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
12746                                                  DAGCombinerInfo &DCI) const {
12747   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12748          "Should be called with a BUILD_VECTOR node");
12749 
12750   SelectionDAG &DAG = DCI.DAG;
12751   SDLoc dl(N);
12752 
12753   if (!Subtarget.hasVSX())
12754     return SDValue();
12755 
12756   // The target independent DAG combiner will leave a build_vector of
12757   // float-to-int conversions intact. We can generate MUCH better code for
12758   // a float-to-int conversion of a vector of floats.
12759   SDValue FirstInput = N->getOperand(0);
12760   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
12761     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
12762     if (Reduced)
12763       return Reduced;
12764   }
12765 
12766   // If we're building a vector out of consecutive loads, just load that
12767   // vector type.
12768   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
12769   if (Reduced)
12770     return Reduced;
12771 
12772   // If we're building a vector out of extended elements from another vector
12773   // we have P9 vector integer extend instructions. The code assumes legal
12774   // input types (i.e. it can't handle things like v4i16) so do not run before
12775   // legalization.
12776   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
12777     Reduced = combineBVOfVecSExt(N, DAG);
12778     if (Reduced)
12779       return Reduced;
12780   }
12781 
12782 
12783   if (N->getValueType(0) != MVT::v2f64)
12784     return SDValue();
12785 
12786   // Looking for:
12787   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
12788   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
12789       FirstInput.getOpcode() != ISD::UINT_TO_FP)
12790     return SDValue();
12791   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
12792       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
12793     return SDValue();
12794   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
12795     return SDValue();
12796 
12797   SDValue Ext1 = FirstInput.getOperand(0);
12798   SDValue Ext2 = N->getOperand(1).getOperand(0);
12799   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
12800      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12801     return SDValue();
12802 
12803   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
12804   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
12805   if (!Ext1Op || !Ext2Op)
12806     return SDValue();
12807   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
12808       Ext1.getOperand(0) != Ext2.getOperand(0))
12809     return SDValue();
12810 
12811   int FirstElem = Ext1Op->getZExtValue();
12812   int SecondElem = Ext2Op->getZExtValue();
12813   int SubvecIdx;
12814   if (FirstElem == 0 && SecondElem == 1)
12815     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
12816   else if (FirstElem == 2 && SecondElem == 3)
12817     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
12818   else
12819     return SDValue();
12820 
12821   SDValue SrcVec = Ext1.getOperand(0);
12822   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
12823     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
12824   return DAG.getNode(NodeType, dl, MVT::v2f64,
12825                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
12826 }
12827 
12828 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
12829                                               DAGCombinerInfo &DCI) const {
12830   assert((N->getOpcode() == ISD::SINT_TO_FP ||
12831           N->getOpcode() == ISD::UINT_TO_FP) &&
12832          "Need an int -> FP conversion node here");
12833 
12834   if (useSoftFloat() || !Subtarget.has64BitSupport())
12835     return SDValue();
12836 
12837   SelectionDAG &DAG = DCI.DAG;
12838   SDLoc dl(N);
12839   SDValue Op(N, 0);
12840 
12841   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
12842   // from the hardware.
12843   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
12844     return SDValue();
12845   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
12846       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
12847     return SDValue();
12848 
12849   SDValue FirstOperand(Op.getOperand(0));
12850   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
12851     (FirstOperand.getValueType() == MVT::i8 ||
12852      FirstOperand.getValueType() == MVT::i16);
12853   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
12854     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
12855     bool DstDouble = Op.getValueType() == MVT::f64;
12856     unsigned ConvOp = Signed ?
12857       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
12858       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
12859     SDValue WidthConst =
12860       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
12861                             dl, false);
12862     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
12863     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
12864     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
12865                                          DAG.getVTList(MVT::f64, MVT::Other),
12866                                          Ops, MVT::i8, LDN->getMemOperand());
12867 
12868     // For signed conversion, we need to sign-extend the value in the VSR
12869     if (Signed) {
12870       SDValue ExtOps[] = { Ld, WidthConst };
12871       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
12872       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
12873     } else
12874       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
12875   }
12876 
12877 
12878   // For i32 intermediate values, unfortunately, the conversion functions
12879   // leave the upper 32 bits of the value are undefined. Within the set of
12880   // scalar instructions, we have no method for zero- or sign-extending the
12881   // value. Thus, we cannot handle i32 intermediate values here.
12882   if (Op.getOperand(0).getValueType() == MVT::i32)
12883     return SDValue();
12884 
12885   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
12886          "UINT_TO_FP is supported only with FPCVT");
12887 
12888   // If we have FCFIDS, then use it when converting to single-precision.
12889   // Otherwise, convert to double-precision and then round.
12890   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12891                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
12892                                                             : PPCISD::FCFIDS)
12893                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
12894                                                             : PPCISD::FCFID);
12895   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12896                   ? MVT::f32
12897                   : MVT::f64;
12898 
12899   // If we're converting from a float, to an int, and back to a float again,
12900   // then we don't need the store/load pair at all.
12901   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
12902        Subtarget.hasFPCVT()) ||
12903       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
12904     SDValue Src = Op.getOperand(0).getOperand(0);
12905     if (Src.getValueType() == MVT::f32) {
12906       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
12907       DCI.AddToWorklist(Src.getNode());
12908     } else if (Src.getValueType() != MVT::f64) {
12909       // Make sure that we don't pick up a ppc_fp128 source value.
12910       return SDValue();
12911     }
12912 
12913     unsigned FCTOp =
12914       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
12915                                                         PPCISD::FCTIDUZ;
12916 
12917     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
12918     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
12919 
12920     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
12921       FP = DAG.getNode(ISD::FP_ROUND, dl,
12922                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
12923       DCI.AddToWorklist(FP.getNode());
12924     }
12925 
12926     return FP;
12927   }
12928 
12929   return SDValue();
12930 }
12931 
12932 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
12933 // builtins) into loads with swaps.
12934 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
12935                                               DAGCombinerInfo &DCI) const {
12936   SelectionDAG &DAG = DCI.DAG;
12937   SDLoc dl(N);
12938   SDValue Chain;
12939   SDValue Base;
12940   MachineMemOperand *MMO;
12941 
12942   switch (N->getOpcode()) {
12943   default:
12944     llvm_unreachable("Unexpected opcode for little endian VSX load");
12945   case ISD::LOAD: {
12946     LoadSDNode *LD = cast<LoadSDNode>(N);
12947     Chain = LD->getChain();
12948     Base = LD->getBasePtr();
12949     MMO = LD->getMemOperand();
12950     // If the MMO suggests this isn't a load of a full vector, leave
12951     // things alone.  For a built-in, we have to make the change for
12952     // correctness, so if there is a size problem that will be a bug.
12953     if (MMO->getSize() < 16)
12954       return SDValue();
12955     break;
12956   }
12957   case ISD::INTRINSIC_W_CHAIN: {
12958     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12959     Chain = Intrin->getChain();
12960     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
12961     // us what we want. Get operand 2 instead.
12962     Base = Intrin->getOperand(2);
12963     MMO = Intrin->getMemOperand();
12964     break;
12965   }
12966   }
12967 
12968   MVT VecTy = N->getValueType(0).getSimpleVT();
12969 
12970   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
12971   // aligned and the type is a vector with elements up to 4 bytes
12972   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12973       && VecTy.getScalarSizeInBits() <= 32 ) {
12974     return SDValue();
12975   }
12976 
12977   SDValue LoadOps[] = { Chain, Base };
12978   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
12979                                          DAG.getVTList(MVT::v2f64, MVT::Other),
12980                                          LoadOps, MVT::v2f64, MMO);
12981 
12982   DCI.AddToWorklist(Load.getNode());
12983   Chain = Load.getValue(1);
12984   SDValue Swap = DAG.getNode(
12985       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
12986   DCI.AddToWorklist(Swap.getNode());
12987 
12988   // Add a bitcast if the resulting load type doesn't match v2f64.
12989   if (VecTy != MVT::v2f64) {
12990     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
12991     DCI.AddToWorklist(N.getNode());
12992     // Package {bitcast value, swap's chain} to match Load's shape.
12993     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
12994                        N, Swap.getValue(1));
12995   }
12996 
12997   return Swap;
12998 }
12999 
13000 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13001 // builtins) into stores with swaps.
13002 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13003                                                DAGCombinerInfo &DCI) const {
13004   SelectionDAG &DAG = DCI.DAG;
13005   SDLoc dl(N);
13006   SDValue Chain;
13007   SDValue Base;
13008   unsigned SrcOpnd;
13009   MachineMemOperand *MMO;
13010 
13011   switch (N->getOpcode()) {
13012   default:
13013     llvm_unreachable("Unexpected opcode for little endian VSX store");
13014   case ISD::STORE: {
13015     StoreSDNode *ST = cast<StoreSDNode>(N);
13016     Chain = ST->getChain();
13017     Base = ST->getBasePtr();
13018     MMO = ST->getMemOperand();
13019     SrcOpnd = 1;
13020     // If the MMO suggests this isn't a store of a full vector, leave
13021     // things alone.  For a built-in, we have to make the change for
13022     // correctness, so if there is a size problem that will be a bug.
13023     if (MMO->getSize() < 16)
13024       return SDValue();
13025     break;
13026   }
13027   case ISD::INTRINSIC_VOID: {
13028     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13029     Chain = Intrin->getChain();
13030     // Intrin->getBasePtr() oddly does not get what we want.
13031     Base = Intrin->getOperand(3);
13032     MMO = Intrin->getMemOperand();
13033     SrcOpnd = 2;
13034     break;
13035   }
13036   }
13037 
13038   SDValue Src = N->getOperand(SrcOpnd);
13039   MVT VecTy = Src.getValueType().getSimpleVT();
13040 
13041   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13042   // aligned and the type is a vector with elements up to 4 bytes
13043   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
13044       && VecTy.getScalarSizeInBits() <= 32 ) {
13045     return SDValue();
13046   }
13047 
13048   // All stores are done as v2f64 and possible bit cast.
13049   if (VecTy != MVT::v2f64) {
13050     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13051     DCI.AddToWorklist(Src.getNode());
13052   }
13053 
13054   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13055                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13056   DCI.AddToWorklist(Swap.getNode());
13057   Chain = Swap.getValue(1);
13058   SDValue StoreOps[] = { Chain, Swap, Base };
13059   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13060                                           DAG.getVTList(MVT::Other),
13061                                           StoreOps, VecTy, MMO);
13062   DCI.AddToWorklist(Store.getNode());
13063   return Store;
13064 }
13065 
13066 // Handle DAG combine for STORE (FP_TO_INT F).
13067 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13068                                                DAGCombinerInfo &DCI) const {
13069 
13070   SelectionDAG &DAG = DCI.DAG;
13071   SDLoc dl(N);
13072   unsigned Opcode = N->getOperand(1).getOpcode();
13073 
13074   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13075          && "Not a FP_TO_INT Instruction!");
13076 
13077   SDValue Val = N->getOperand(1).getOperand(0);
13078   EVT Op1VT = N->getOperand(1).getValueType();
13079   EVT ResVT = Val.getValueType();
13080 
13081   // Floating point types smaller than 32 bits are not legal on Power.
13082   if (ResVT.getScalarSizeInBits() < 32)
13083     return SDValue();
13084 
13085   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13086   bool ValidTypeForStoreFltAsInt =
13087         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13088          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13089 
13090   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
13091       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13092     return SDValue();
13093 
13094   // Extend f32 values to f64
13095   if (ResVT.getScalarSizeInBits() == 32) {
13096     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13097     DCI.AddToWorklist(Val.getNode());
13098   }
13099 
13100   // Set signed or unsigned conversion opcode.
13101   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13102                           PPCISD::FP_TO_SINT_IN_VSR :
13103                           PPCISD::FP_TO_UINT_IN_VSR;
13104 
13105   Val = DAG.getNode(ConvOpcode,
13106                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13107   DCI.AddToWorklist(Val.getNode());
13108 
13109   // Set number of bytes being converted.
13110   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13111   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13112                     DAG.getIntPtrConstant(ByteSize, dl, false),
13113                     DAG.getValueType(Op1VT) };
13114 
13115   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13116           DAG.getVTList(MVT::Other), Ops,
13117           cast<StoreSDNode>(N)->getMemoryVT(),
13118           cast<StoreSDNode>(N)->getMemOperand());
13119 
13120   DCI.AddToWorklist(Val.getNode());
13121   return Val;
13122 }
13123 
13124 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13125                                                 LSBaseSDNode *LSBase,
13126                                                 DAGCombinerInfo &DCI) const {
13127   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13128         "Not a reverse memop pattern!");
13129 
13130   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13131     auto Mask = SVN->getMask();
13132     int i = 0;
13133     auto I = Mask.rbegin();
13134     auto E = Mask.rend();
13135 
13136     for (; I != E; ++I) {
13137       if (*I != i)
13138         return false;
13139       i++;
13140     }
13141     return true;
13142   };
13143 
13144   SelectionDAG &DAG = DCI.DAG;
13145   EVT VT = SVN->getValueType(0);
13146 
13147   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13148     return SDValue();
13149 
13150   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13151   // See comment in PPCVSXSwapRemoval.cpp.
13152   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13153   if (!Subtarget.hasP9Vector())
13154     return SDValue();
13155 
13156   if(!IsElementReverse(SVN))
13157     return SDValue();
13158 
13159   if (LSBase->getOpcode() == ISD::LOAD) {
13160     SDLoc dl(SVN);
13161     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13162     return DAG.getMemIntrinsicNode(
13163         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13164         LSBase->getMemoryVT(), LSBase->getMemOperand());
13165   }
13166 
13167   if (LSBase->getOpcode() == ISD::STORE) {
13168     SDLoc dl(LSBase);
13169     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
13170                           LSBase->getBasePtr()};
13171     return DAG.getMemIntrinsicNode(
13172         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
13173         LSBase->getMemoryVT(), LSBase->getMemOperand());
13174   }
13175 
13176   llvm_unreachable("Expected a load or store node here");
13177 }
13178 
13179 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
13180                                              DAGCombinerInfo &DCI) const {
13181   SelectionDAG &DAG = DCI.DAG;
13182   SDLoc dl(N);
13183   switch (N->getOpcode()) {
13184   default: break;
13185   case ISD::ADD:
13186     return combineADD(N, DCI);
13187   case ISD::SHL:
13188     return combineSHL(N, DCI);
13189   case ISD::SRA:
13190     return combineSRA(N, DCI);
13191   case ISD::SRL:
13192     return combineSRL(N, DCI);
13193   case ISD::MUL:
13194     return combineMUL(N, DCI);
13195   case PPCISD::SHL:
13196     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
13197         return N->getOperand(0);
13198     break;
13199   case PPCISD::SRL:
13200     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
13201         return N->getOperand(0);
13202     break;
13203   case PPCISD::SRA:
13204     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13205       if (C->isNullValue() ||   //  0 >>s V -> 0.
13206           C->isAllOnesValue())    // -1 >>s V -> -1.
13207         return N->getOperand(0);
13208     }
13209     break;
13210   case ISD::SIGN_EXTEND:
13211   case ISD::ZERO_EXTEND:
13212   case ISD::ANY_EXTEND:
13213     return DAGCombineExtBoolTrunc(N, DCI);
13214   case ISD::TRUNCATE:
13215     return combineTRUNCATE(N, DCI);
13216   case ISD::SETCC:
13217     if (SDValue CSCC = combineSetCC(N, DCI))
13218       return CSCC;
13219     LLVM_FALLTHROUGH;
13220   case ISD::SELECT_CC:
13221     return DAGCombineTruncBoolExt(N, DCI);
13222   case ISD::SINT_TO_FP:
13223   case ISD::UINT_TO_FP:
13224     return combineFPToIntToFP(N, DCI);
13225   case ISD::VECTOR_SHUFFLE:
13226     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
13227       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
13228       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
13229     }
13230     break;
13231   case ISD::STORE: {
13232 
13233     EVT Op1VT = N->getOperand(1).getValueType();
13234     unsigned Opcode = N->getOperand(1).getOpcode();
13235 
13236     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
13237       SDValue Val= combineStoreFPToInt(N, DCI);
13238       if (Val)
13239         return Val;
13240     }
13241 
13242     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
13243       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
13244       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
13245       if (Val)
13246         return Val;
13247     }
13248 
13249     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
13250     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
13251         N->getOperand(1).getNode()->hasOneUse() &&
13252         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
13253          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
13254 
13255       // STBRX can only handle simple types and it makes no sense to store less
13256       // two bytes in byte-reversed order.
13257       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
13258       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
13259         break;
13260 
13261       SDValue BSwapOp = N->getOperand(1).getOperand(0);
13262       // Do an any-extend to 32-bits if this is a half-word input.
13263       if (BSwapOp.getValueType() == MVT::i16)
13264         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
13265 
13266       // If the type of BSWAP operand is wider than stored memory width
13267       // it need to be shifted to the right side before STBRX.
13268       if (Op1VT.bitsGT(mVT)) {
13269         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
13270         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
13271                               DAG.getConstant(Shift, dl, MVT::i32));
13272         // Need to truncate if this is a bswap of i64 stored as i32/i16.
13273         if (Op1VT == MVT::i64)
13274           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
13275       }
13276 
13277       SDValue Ops[] = {
13278         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
13279       };
13280       return
13281         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
13282                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
13283                                 cast<StoreSDNode>(N)->getMemOperand());
13284     }
13285 
13286     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
13287     // So it can increase the chance of CSE constant construction.
13288     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
13289         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
13290       // Need to sign-extended to 64-bits to handle negative values.
13291       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
13292       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
13293                                     MemVT.getSizeInBits());
13294       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
13295 
13296       // DAG.getTruncStore() can't be used here because it doesn't accept
13297       // the general (base + offset) addressing mode.
13298       // So we use UpdateNodeOperands and setTruncatingStore instead.
13299       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
13300                              N->getOperand(3));
13301       cast<StoreSDNode>(N)->setTruncatingStore(true);
13302       return SDValue(N, 0);
13303     }
13304 
13305     // For little endian, VSX stores require generating xxswapd/lxvd2x.
13306     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
13307     if (Op1VT.isSimple()) {
13308       MVT StoreVT = Op1VT.getSimpleVT();
13309       if (Subtarget.needsSwapsForVSXMemOps() &&
13310           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
13311            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
13312         return expandVSXStoreForLE(N, DCI);
13313     }
13314     break;
13315   }
13316   case ISD::LOAD: {
13317     LoadSDNode *LD = cast<LoadSDNode>(N);
13318     EVT VT = LD->getValueType(0);
13319 
13320     // For little endian, VSX loads require generating lxvd2x/xxswapd.
13321     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
13322     if (VT.isSimple()) {
13323       MVT LoadVT = VT.getSimpleVT();
13324       if (Subtarget.needsSwapsForVSXMemOps() &&
13325           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
13326            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
13327         return expandVSXLoadForLE(N, DCI);
13328     }
13329 
13330     // We sometimes end up with a 64-bit integer load, from which we extract
13331     // two single-precision floating-point numbers. This happens with
13332     // std::complex<float>, and other similar structures, because of the way we
13333     // canonicalize structure copies. However, if we lack direct moves,
13334     // then the final bitcasts from the extracted integer values to the
13335     // floating-point numbers turn into store/load pairs. Even with direct moves,
13336     // just loading the two floating-point numbers is likely better.
13337     auto ReplaceTwoFloatLoad = [&]() {
13338       if (VT != MVT::i64)
13339         return false;
13340 
13341       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
13342           LD->isVolatile())
13343         return false;
13344 
13345       //  We're looking for a sequence like this:
13346       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
13347       //      t16: i64 = srl t13, Constant:i32<32>
13348       //    t17: i32 = truncate t16
13349       //  t18: f32 = bitcast t17
13350       //    t19: i32 = truncate t13
13351       //  t20: f32 = bitcast t19
13352 
13353       if (!LD->hasNUsesOfValue(2, 0))
13354         return false;
13355 
13356       auto UI = LD->use_begin();
13357       while (UI.getUse().getResNo() != 0) ++UI;
13358       SDNode *Trunc = *UI++;
13359       while (UI.getUse().getResNo() != 0) ++UI;
13360       SDNode *RightShift = *UI;
13361       if (Trunc->getOpcode() != ISD::TRUNCATE)
13362         std::swap(Trunc, RightShift);
13363 
13364       if (Trunc->getOpcode() != ISD::TRUNCATE ||
13365           Trunc->getValueType(0) != MVT::i32 ||
13366           !Trunc->hasOneUse())
13367         return false;
13368       if (RightShift->getOpcode() != ISD::SRL ||
13369           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
13370           RightShift->getConstantOperandVal(1) != 32 ||
13371           !RightShift->hasOneUse())
13372         return false;
13373 
13374       SDNode *Trunc2 = *RightShift->use_begin();
13375       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
13376           Trunc2->getValueType(0) != MVT::i32 ||
13377           !Trunc2->hasOneUse())
13378         return false;
13379 
13380       SDNode *Bitcast = *Trunc->use_begin();
13381       SDNode *Bitcast2 = *Trunc2->use_begin();
13382 
13383       if (Bitcast->getOpcode() != ISD::BITCAST ||
13384           Bitcast->getValueType(0) != MVT::f32)
13385         return false;
13386       if (Bitcast2->getOpcode() != ISD::BITCAST ||
13387           Bitcast2->getValueType(0) != MVT::f32)
13388         return false;
13389 
13390       if (Subtarget.isLittleEndian())
13391         std::swap(Bitcast, Bitcast2);
13392 
13393       // Bitcast has the second float (in memory-layout order) and Bitcast2
13394       // has the first one.
13395 
13396       SDValue BasePtr = LD->getBasePtr();
13397       if (LD->isIndexed()) {
13398         assert(LD->getAddressingMode() == ISD::PRE_INC &&
13399                "Non-pre-inc AM on PPC?");
13400         BasePtr =
13401           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
13402                       LD->getOffset());
13403       }
13404 
13405       auto MMOFlags =
13406           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
13407       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
13408                                       LD->getPointerInfo(), LD->getAlignment(),
13409                                       MMOFlags, LD->getAAInfo());
13410       SDValue AddPtr =
13411         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
13412                     BasePtr, DAG.getIntPtrConstant(4, dl));
13413       SDValue FloatLoad2 = DAG.getLoad(
13414           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
13415           LD->getPointerInfo().getWithOffset(4),
13416           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
13417 
13418       if (LD->isIndexed()) {
13419         // Note that DAGCombine should re-form any pre-increment load(s) from
13420         // what is produced here if that makes sense.
13421         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
13422       }
13423 
13424       DCI.CombineTo(Bitcast2, FloatLoad);
13425       DCI.CombineTo(Bitcast, FloatLoad2);
13426 
13427       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
13428                                     SDValue(FloatLoad2.getNode(), 1));
13429       return true;
13430     };
13431 
13432     if (ReplaceTwoFloatLoad())
13433       return SDValue(N, 0);
13434 
13435     EVT MemVT = LD->getMemoryVT();
13436     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
13437     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
13438     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
13439     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
13440     if (LD->isUnindexed() && VT.isVector() &&
13441         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
13442           // P8 and later hardware should just use LOAD.
13443           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
13444                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
13445          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
13446           LD->getAlignment() >= ScalarABIAlignment)) &&
13447         LD->getAlignment() < ABIAlignment) {
13448       // This is a type-legal unaligned Altivec or QPX load.
13449       SDValue Chain = LD->getChain();
13450       SDValue Ptr = LD->getBasePtr();
13451       bool isLittleEndian = Subtarget.isLittleEndian();
13452 
13453       // This implements the loading of unaligned vectors as described in
13454       // the venerable Apple Velocity Engine overview. Specifically:
13455       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
13456       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
13457       //
13458       // The general idea is to expand a sequence of one or more unaligned
13459       // loads into an alignment-based permutation-control instruction (lvsl
13460       // or lvsr), a series of regular vector loads (which always truncate
13461       // their input address to an aligned address), and a series of
13462       // permutations.  The results of these permutations are the requested
13463       // loaded values.  The trick is that the last "extra" load is not taken
13464       // from the address you might suspect (sizeof(vector) bytes after the
13465       // last requested load), but rather sizeof(vector) - 1 bytes after the
13466       // last requested vector. The point of this is to avoid a page fault if
13467       // the base address happened to be aligned. This works because if the
13468       // base address is aligned, then adding less than a full vector length
13469       // will cause the last vector in the sequence to be (re)loaded.
13470       // Otherwise, the next vector will be fetched as you might suspect was
13471       // necessary.
13472 
13473       // We might be able to reuse the permutation generation from
13474       // a different base address offset from this one by an aligned amount.
13475       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
13476       // optimization later.
13477       Intrinsic::ID Intr, IntrLD, IntrPerm;
13478       MVT PermCntlTy, PermTy, LDTy;
13479       if (Subtarget.hasAltivec()) {
13480         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
13481                                  Intrinsic::ppc_altivec_lvsl;
13482         IntrLD = Intrinsic::ppc_altivec_lvx;
13483         IntrPerm = Intrinsic::ppc_altivec_vperm;
13484         PermCntlTy = MVT::v16i8;
13485         PermTy = MVT::v4i32;
13486         LDTy = MVT::v4i32;
13487       } else {
13488         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
13489                                        Intrinsic::ppc_qpx_qvlpcls;
13490         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
13491                                        Intrinsic::ppc_qpx_qvlfs;
13492         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
13493         PermCntlTy = MVT::v4f64;
13494         PermTy = MVT::v4f64;
13495         LDTy = MemVT.getSimpleVT();
13496       }
13497 
13498       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
13499 
13500       // Create the new MMO for the new base load. It is like the original MMO,
13501       // but represents an area in memory almost twice the vector size centered
13502       // on the original address. If the address is unaligned, we might start
13503       // reading up to (sizeof(vector)-1) bytes below the address of the
13504       // original unaligned load.
13505       MachineFunction &MF = DAG.getMachineFunction();
13506       MachineMemOperand *BaseMMO =
13507         MF.getMachineMemOperand(LD->getMemOperand(),
13508                                 -(long)MemVT.getStoreSize()+1,
13509                                 2*MemVT.getStoreSize()-1);
13510 
13511       // Create the new base load.
13512       SDValue LDXIntID =
13513           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
13514       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
13515       SDValue BaseLoad =
13516         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
13517                                 DAG.getVTList(PermTy, MVT::Other),
13518                                 BaseLoadOps, LDTy, BaseMMO);
13519 
13520       // Note that the value of IncOffset (which is provided to the next
13521       // load's pointer info offset value, and thus used to calculate the
13522       // alignment), and the value of IncValue (which is actually used to
13523       // increment the pointer value) are different! This is because we
13524       // require the next load to appear to be aligned, even though it
13525       // is actually offset from the base pointer by a lesser amount.
13526       int IncOffset = VT.getSizeInBits() / 8;
13527       int IncValue = IncOffset;
13528 
13529       // Walk (both up and down) the chain looking for another load at the real
13530       // (aligned) offset (the alignment of the other load does not matter in
13531       // this case). If found, then do not use the offset reduction trick, as
13532       // that will prevent the loads from being later combined (as they would
13533       // otherwise be duplicates).
13534       if (!findConsecutiveLoad(LD, DAG))
13535         --IncValue;
13536 
13537       SDValue Increment =
13538           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
13539       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
13540 
13541       MachineMemOperand *ExtraMMO =
13542         MF.getMachineMemOperand(LD->getMemOperand(),
13543                                 1, 2*MemVT.getStoreSize()-1);
13544       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
13545       SDValue ExtraLoad =
13546         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
13547                                 DAG.getVTList(PermTy, MVT::Other),
13548                                 ExtraLoadOps, LDTy, ExtraMMO);
13549 
13550       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
13551         BaseLoad.getValue(1), ExtraLoad.getValue(1));
13552 
13553       // Because vperm has a big-endian bias, we must reverse the order
13554       // of the input vectors and complement the permute control vector
13555       // when generating little endian code.  We have already handled the
13556       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
13557       // and ExtraLoad here.
13558       SDValue Perm;
13559       if (isLittleEndian)
13560         Perm = BuildIntrinsicOp(IntrPerm,
13561                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
13562       else
13563         Perm = BuildIntrinsicOp(IntrPerm,
13564                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
13565 
13566       if (VT != PermTy)
13567         Perm = Subtarget.hasAltivec() ?
13568                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
13569                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
13570                                DAG.getTargetConstant(1, dl, MVT::i64));
13571                                // second argument is 1 because this rounding
13572                                // is always exact.
13573 
13574       // The output of the permutation is our loaded result, the TokenFactor is
13575       // our new chain.
13576       DCI.CombineTo(N, Perm, TF);
13577       return SDValue(N, 0);
13578     }
13579     }
13580     break;
13581     case ISD::INTRINSIC_WO_CHAIN: {
13582       bool isLittleEndian = Subtarget.isLittleEndian();
13583       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
13584       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
13585                                            : Intrinsic::ppc_altivec_lvsl);
13586       if ((IID == Intr ||
13587            IID == Intrinsic::ppc_qpx_qvlpcld  ||
13588            IID == Intrinsic::ppc_qpx_qvlpcls) &&
13589         N->getOperand(1)->getOpcode() == ISD::ADD) {
13590         SDValue Add = N->getOperand(1);
13591 
13592         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
13593                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
13594 
13595         if (DAG.MaskedValueIsZero(Add->getOperand(1),
13596                                   APInt::getAllOnesValue(Bits /* alignment */)
13597                                       .zext(Add.getScalarValueSizeInBits()))) {
13598           SDNode *BasePtr = Add->getOperand(0).getNode();
13599           for (SDNode::use_iterator UI = BasePtr->use_begin(),
13600                                     UE = BasePtr->use_end();
13601                UI != UE; ++UI) {
13602             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
13603                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
13604               // We've found another LVSL/LVSR, and this address is an aligned
13605               // multiple of that one. The results will be the same, so use the
13606               // one we've just found instead.
13607 
13608               return SDValue(*UI, 0);
13609             }
13610           }
13611         }
13612 
13613         if (isa<ConstantSDNode>(Add->getOperand(1))) {
13614           SDNode *BasePtr = Add->getOperand(0).getNode();
13615           for (SDNode::use_iterator UI = BasePtr->use_begin(),
13616                UE = BasePtr->use_end(); UI != UE; ++UI) {
13617             if (UI->getOpcode() == ISD::ADD &&
13618                 isa<ConstantSDNode>(UI->getOperand(1)) &&
13619                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
13620                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
13621                 (1ULL << Bits) == 0) {
13622               SDNode *OtherAdd = *UI;
13623               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
13624                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
13625                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
13626                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
13627                   return SDValue(*VI, 0);
13628                 }
13629               }
13630             }
13631           }
13632         }
13633       }
13634 
13635       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
13636       // Expose the vabsduw/h/b opportunity for down stream
13637       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
13638           (IID == Intrinsic::ppc_altivec_vmaxsw ||
13639            IID == Intrinsic::ppc_altivec_vmaxsh ||
13640            IID == Intrinsic::ppc_altivec_vmaxsb)) {
13641         SDValue V1 = N->getOperand(1);
13642         SDValue V2 = N->getOperand(2);
13643         if ((V1.getSimpleValueType() == MVT::v4i32 ||
13644              V1.getSimpleValueType() == MVT::v8i16 ||
13645              V1.getSimpleValueType() == MVT::v16i8) &&
13646             V1.getSimpleValueType() == V2.getSimpleValueType()) {
13647           // (0-a, a)
13648           if (V1.getOpcode() == ISD::SUB &&
13649               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
13650               V1.getOperand(1) == V2) {
13651             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
13652           }
13653           // (a, 0-a)
13654           if (V2.getOpcode() == ISD::SUB &&
13655               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
13656               V2.getOperand(1) == V1) {
13657             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
13658           }
13659           // (x-y, y-x)
13660           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
13661               V1.getOperand(0) == V2.getOperand(1) &&
13662               V1.getOperand(1) == V2.getOperand(0)) {
13663             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
13664           }
13665         }
13666       }
13667     }
13668 
13669     break;
13670   case ISD::INTRINSIC_W_CHAIN:
13671     // For little endian, VSX loads require generating lxvd2x/xxswapd.
13672     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
13673     if (Subtarget.needsSwapsForVSXMemOps()) {
13674       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13675       default:
13676         break;
13677       case Intrinsic::ppc_vsx_lxvw4x:
13678       case Intrinsic::ppc_vsx_lxvd2x:
13679         return expandVSXLoadForLE(N, DCI);
13680       }
13681     }
13682     break;
13683   case ISD::INTRINSIC_VOID:
13684     // For little endian, VSX stores require generating xxswapd/stxvd2x.
13685     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
13686     if (Subtarget.needsSwapsForVSXMemOps()) {
13687       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13688       default:
13689         break;
13690       case Intrinsic::ppc_vsx_stxvw4x:
13691       case Intrinsic::ppc_vsx_stxvd2x:
13692         return expandVSXStoreForLE(N, DCI);
13693       }
13694     }
13695     break;
13696   case ISD::BSWAP:
13697     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
13698     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
13699         N->getOperand(0).hasOneUse() &&
13700         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
13701          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
13702           N->getValueType(0) == MVT::i64))) {
13703       SDValue Load = N->getOperand(0);
13704       LoadSDNode *LD = cast<LoadSDNode>(Load);
13705       // Create the byte-swapping load.
13706       SDValue Ops[] = {
13707         LD->getChain(),    // Chain
13708         LD->getBasePtr(),  // Ptr
13709         DAG.getValueType(N->getValueType(0)) // VT
13710       };
13711       SDValue BSLoad =
13712         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
13713                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
13714                                               MVT::i64 : MVT::i32, MVT::Other),
13715                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
13716 
13717       // If this is an i16 load, insert the truncate.
13718       SDValue ResVal = BSLoad;
13719       if (N->getValueType(0) == MVT::i16)
13720         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
13721 
13722       // First, combine the bswap away.  This makes the value produced by the
13723       // load dead.
13724       DCI.CombineTo(N, ResVal);
13725 
13726       // Next, combine the load away, we give it a bogus result value but a real
13727       // chain result.  The result value is dead because the bswap is dead.
13728       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
13729 
13730       // Return N so it doesn't get rechecked!
13731       return SDValue(N, 0);
13732     }
13733     break;
13734   case PPCISD::VCMP:
13735     // If a VCMPo node already exists with exactly the same operands as this
13736     // node, use its result instead of this node (VCMPo computes both a CR6 and
13737     // a normal output).
13738     //
13739     if (!N->getOperand(0).hasOneUse() &&
13740         !N->getOperand(1).hasOneUse() &&
13741         !N->getOperand(2).hasOneUse()) {
13742 
13743       // Scan all of the users of the LHS, looking for VCMPo's that match.
13744       SDNode *VCMPoNode = nullptr;
13745 
13746       SDNode *LHSN = N->getOperand(0).getNode();
13747       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
13748            UI != E; ++UI)
13749         if (UI->getOpcode() == PPCISD::VCMPo &&
13750             UI->getOperand(1) == N->getOperand(1) &&
13751             UI->getOperand(2) == N->getOperand(2) &&
13752             UI->getOperand(0) == N->getOperand(0)) {
13753           VCMPoNode = *UI;
13754           break;
13755         }
13756 
13757       // If there is no VCMPo node, or if the flag value has a single use, don't
13758       // transform this.
13759       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
13760         break;
13761 
13762       // Look at the (necessarily single) use of the flag value.  If it has a
13763       // chain, this transformation is more complex.  Note that multiple things
13764       // could use the value result, which we should ignore.
13765       SDNode *FlagUser = nullptr;
13766       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
13767            FlagUser == nullptr; ++UI) {
13768         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
13769         SDNode *User = *UI;
13770         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
13771           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
13772             FlagUser = User;
13773             break;
13774           }
13775         }
13776       }
13777 
13778       // If the user is a MFOCRF instruction, we know this is safe.
13779       // Otherwise we give up for right now.
13780       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
13781         return SDValue(VCMPoNode, 0);
13782     }
13783     break;
13784   case ISD::BRCOND: {
13785     SDValue Cond = N->getOperand(1);
13786     SDValue Target = N->getOperand(2);
13787 
13788     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13789         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
13790           Intrinsic::loop_decrement) {
13791 
13792       // We now need to make the intrinsic dead (it cannot be instruction
13793       // selected).
13794       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
13795       assert(Cond.getNode()->hasOneUse() &&
13796              "Counter decrement has more than one use");
13797 
13798       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
13799                          N->getOperand(0), Target);
13800     }
13801   }
13802   break;
13803   case ISD::BR_CC: {
13804     // If this is a branch on an altivec predicate comparison, lower this so
13805     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
13806     // lowering is done pre-legalize, because the legalizer lowers the predicate
13807     // compare down to code that is difficult to reassemble.
13808     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
13809     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
13810 
13811     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
13812     // value. If so, pass-through the AND to get to the intrinsic.
13813     if (LHS.getOpcode() == ISD::AND &&
13814         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13815         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
13816           Intrinsic::loop_decrement &&
13817         isa<ConstantSDNode>(LHS.getOperand(1)) &&
13818         !isNullConstant(LHS.getOperand(1)))
13819       LHS = LHS.getOperand(0);
13820 
13821     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13822         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
13823           Intrinsic::loop_decrement &&
13824         isa<ConstantSDNode>(RHS)) {
13825       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
13826              "Counter decrement comparison is not EQ or NE");
13827 
13828       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13829       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
13830                     (CC == ISD::SETNE && !Val);
13831 
13832       // We now need to make the intrinsic dead (it cannot be instruction
13833       // selected).
13834       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
13835       assert(LHS.getNode()->hasOneUse() &&
13836              "Counter decrement has more than one use");
13837 
13838       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
13839                          N->getOperand(0), N->getOperand(4));
13840     }
13841 
13842     int CompareOpc;
13843     bool isDot;
13844 
13845     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
13846         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
13847         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
13848       assert(isDot && "Can't compare against a vector result!");
13849 
13850       // If this is a comparison against something other than 0/1, then we know
13851       // that the condition is never/always true.
13852       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13853       if (Val != 0 && Val != 1) {
13854         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
13855           return N->getOperand(0);
13856         // Always !=, turn it into an unconditional branch.
13857         return DAG.getNode(ISD::BR, dl, MVT::Other,
13858                            N->getOperand(0), N->getOperand(4));
13859       }
13860 
13861       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
13862 
13863       // Create the PPCISD altivec 'dot' comparison node.
13864       SDValue Ops[] = {
13865         LHS.getOperand(2),  // LHS of compare
13866         LHS.getOperand(3),  // RHS of compare
13867         DAG.getConstant(CompareOpc, dl, MVT::i32)
13868       };
13869       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
13870       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
13871 
13872       // Unpack the result based on how the target uses it.
13873       PPC::Predicate CompOpc;
13874       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
13875       default:  // Can't happen, don't crash on invalid number though.
13876       case 0:   // Branch on the value of the EQ bit of CR6.
13877         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
13878         break;
13879       case 1:   // Branch on the inverted value of the EQ bit of CR6.
13880         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
13881         break;
13882       case 2:   // Branch on the value of the LT bit of CR6.
13883         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
13884         break;
13885       case 3:   // Branch on the inverted value of the LT bit of CR6.
13886         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
13887         break;
13888       }
13889 
13890       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
13891                          DAG.getConstant(CompOpc, dl, MVT::i32),
13892                          DAG.getRegister(PPC::CR6, MVT::i32),
13893                          N->getOperand(4), CompNode.getValue(1));
13894     }
13895     break;
13896   }
13897   case ISD::BUILD_VECTOR:
13898     return DAGCombineBuildVector(N, DCI);
13899   case ISD::ABS:
13900     return combineABS(N, DCI);
13901   case ISD::VSELECT:
13902     return combineVSelect(N, DCI);
13903   }
13904 
13905   return SDValue();
13906 }
13907 
13908 SDValue
13909 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13910                                  SelectionDAG &DAG,
13911                                  SmallVectorImpl<SDNode *> &Created) const {
13912   // fold (sdiv X, pow2)
13913   EVT VT = N->getValueType(0);
13914   if (VT == MVT::i64 && !Subtarget.isPPC64())
13915     return SDValue();
13916   if ((VT != MVT::i32 && VT != MVT::i64) ||
13917       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
13918     return SDValue();
13919 
13920   SDLoc DL(N);
13921   SDValue N0 = N->getOperand(0);
13922 
13923   bool IsNegPow2 = (-Divisor).isPowerOf2();
13924   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
13925   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
13926 
13927   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
13928   Created.push_back(Op.getNode());
13929 
13930   if (IsNegPow2) {
13931     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
13932     Created.push_back(Op.getNode());
13933   }
13934 
13935   return Op;
13936 }
13937 
13938 //===----------------------------------------------------------------------===//
13939 // Inline Assembly Support
13940 //===----------------------------------------------------------------------===//
13941 
13942 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
13943                                                       KnownBits &Known,
13944                                                       const APInt &DemandedElts,
13945                                                       const SelectionDAG &DAG,
13946                                                       unsigned Depth) const {
13947   Known.resetAll();
13948   switch (Op.getOpcode()) {
13949   default: break;
13950   case PPCISD::LBRX: {
13951     // lhbrx is known to have the top bits cleared out.
13952     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
13953       Known.Zero = 0xFFFF0000;
13954     break;
13955   }
13956   case ISD::INTRINSIC_WO_CHAIN: {
13957     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
13958     default: break;
13959     case Intrinsic::ppc_altivec_vcmpbfp_p:
13960     case Intrinsic::ppc_altivec_vcmpeqfp_p:
13961     case Intrinsic::ppc_altivec_vcmpequb_p:
13962     case Intrinsic::ppc_altivec_vcmpequh_p:
13963     case Intrinsic::ppc_altivec_vcmpequw_p:
13964     case Intrinsic::ppc_altivec_vcmpequd_p:
13965     case Intrinsic::ppc_altivec_vcmpgefp_p:
13966     case Intrinsic::ppc_altivec_vcmpgtfp_p:
13967     case Intrinsic::ppc_altivec_vcmpgtsb_p:
13968     case Intrinsic::ppc_altivec_vcmpgtsh_p:
13969     case Intrinsic::ppc_altivec_vcmpgtsw_p:
13970     case Intrinsic::ppc_altivec_vcmpgtsd_p:
13971     case Intrinsic::ppc_altivec_vcmpgtub_p:
13972     case Intrinsic::ppc_altivec_vcmpgtuh_p:
13973     case Intrinsic::ppc_altivec_vcmpgtuw_p:
13974     case Intrinsic::ppc_altivec_vcmpgtud_p:
13975       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
13976       break;
13977     }
13978   }
13979   }
13980 }
13981 
13982 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
13983   switch (Subtarget.getDarwinDirective()) {
13984   default: break;
13985   case PPC::DIR_970:
13986   case PPC::DIR_PWR4:
13987   case PPC::DIR_PWR5:
13988   case PPC::DIR_PWR5X:
13989   case PPC::DIR_PWR6:
13990   case PPC::DIR_PWR6X:
13991   case PPC::DIR_PWR7:
13992   case PPC::DIR_PWR8:
13993   case PPC::DIR_PWR9: {
13994     if (!ML)
13995       break;
13996 
13997     if (!DisableInnermostLoopAlign32) {
13998       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
13999       // so that we can decrease cache misses and branch-prediction misses.
14000       // Actual alignment of the loop will depend on the hotness check and other
14001       // logic in alignBlocks.
14002       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14003         return 5;
14004     }
14005 
14006     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14007 
14008     // For small loops (between 5 and 8 instructions), align to a 32-byte
14009     // boundary so that the entire loop fits in one instruction-cache line.
14010     uint64_t LoopSize = 0;
14011     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14012       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14013         LoopSize += TII->getInstSizeInBytes(*J);
14014         if (LoopSize > 32)
14015           break;
14016       }
14017 
14018     if (LoopSize > 16 && LoopSize <= 32)
14019       return 5;
14020 
14021     break;
14022   }
14023   }
14024 
14025   return TargetLowering::getPrefLoopAlignment(ML);
14026 }
14027 
14028 /// getConstraintType - Given a constraint, return the type of
14029 /// constraint it is for this target.
14030 PPCTargetLowering::ConstraintType
14031 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14032   if (Constraint.size() == 1) {
14033     switch (Constraint[0]) {
14034     default: break;
14035     case 'b':
14036     case 'r':
14037     case 'f':
14038     case 'd':
14039     case 'v':
14040     case 'y':
14041       return C_RegisterClass;
14042     case 'Z':
14043       // FIXME: While Z does indicate a memory constraint, it specifically
14044       // indicates an r+r address (used in conjunction with the 'y' modifier
14045       // in the replacement string). Currently, we're forcing the base
14046       // register to be r0 in the asm printer (which is interpreted as zero)
14047       // and forming the complete address in the second register. This is
14048       // suboptimal.
14049       return C_Memory;
14050     }
14051   } else if (Constraint == "wc") { // individual CR bits.
14052     return C_RegisterClass;
14053   } else if (Constraint == "wa" || Constraint == "wd" ||
14054              Constraint == "wf" || Constraint == "ws" ||
14055              Constraint == "wi" || Constraint == "ww") {
14056     return C_RegisterClass; // VSX registers.
14057   }
14058   return TargetLowering::getConstraintType(Constraint);
14059 }
14060 
14061 /// Examine constraint type and operand type and determine a weight value.
14062 /// This object must already have been set up with the operand type
14063 /// and the current alternative constraint selected.
14064 TargetLowering::ConstraintWeight
14065 PPCTargetLowering::getSingleConstraintMatchWeight(
14066     AsmOperandInfo &info, const char *constraint) const {
14067   ConstraintWeight weight = CW_Invalid;
14068   Value *CallOperandVal = info.CallOperandVal;
14069     // If we don't have a value, we can't do a match,
14070     // but allow it at the lowest weight.
14071   if (!CallOperandVal)
14072     return CW_Default;
14073   Type *type = CallOperandVal->getType();
14074 
14075   // Look at the constraint type.
14076   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14077     return CW_Register; // an individual CR bit.
14078   else if ((StringRef(constraint) == "wa" ||
14079             StringRef(constraint) == "wd" ||
14080             StringRef(constraint) == "wf") &&
14081            type->isVectorTy())
14082     return CW_Register;
14083   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14084     return CW_Register; // just hold 64-bit integers data.
14085   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14086     return CW_Register;
14087   else if (StringRef(constraint) == "ww" && type->isFloatTy())
14088     return CW_Register;
14089 
14090   switch (*constraint) {
14091   default:
14092     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14093     break;
14094   case 'b':
14095     if (type->isIntegerTy())
14096       weight = CW_Register;
14097     break;
14098   case 'f':
14099     if (type->isFloatTy())
14100       weight = CW_Register;
14101     break;
14102   case 'd':
14103     if (type->isDoubleTy())
14104       weight = CW_Register;
14105     break;
14106   case 'v':
14107     if (type->isVectorTy())
14108       weight = CW_Register;
14109     break;
14110   case 'y':
14111     weight = CW_Register;
14112     break;
14113   case 'Z':
14114     weight = CW_Memory;
14115     break;
14116   }
14117   return weight;
14118 }
14119 
14120 std::pair<unsigned, const TargetRegisterClass *>
14121 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14122                                                 StringRef Constraint,
14123                                                 MVT VT) const {
14124   if (Constraint.size() == 1) {
14125     // GCC RS6000 Constraint Letters
14126     switch (Constraint[0]) {
14127     case 'b':   // R1-R31
14128       if (VT == MVT::i64 && Subtarget.isPPC64())
14129         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14130       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14131     case 'r':   // R0-R31
14132       if (VT == MVT::i64 && Subtarget.isPPC64())
14133         return std::make_pair(0U, &PPC::G8RCRegClass);
14134       return std::make_pair(0U, &PPC::GPRCRegClass);
14135     // 'd' and 'f' constraints are both defined to be "the floating point
14136     // registers", where one is for 32-bit and the other for 64-bit. We don't
14137     // really care overly much here so just give them all the same reg classes.
14138     case 'd':
14139     case 'f':
14140       if (Subtarget.hasSPE()) {
14141         if (VT == MVT::f32 || VT == MVT::i32)
14142           return std::make_pair(0U, &PPC::SPE4RCRegClass);
14143         if (VT == MVT::f64 || VT == MVT::i64)
14144           return std::make_pair(0U, &PPC::SPERCRegClass);
14145       } else {
14146         if (VT == MVT::f32 || VT == MVT::i32)
14147           return std::make_pair(0U, &PPC::F4RCRegClass);
14148         if (VT == MVT::f64 || VT == MVT::i64)
14149           return std::make_pair(0U, &PPC::F8RCRegClass);
14150         if (VT == MVT::v4f64 && Subtarget.hasQPX())
14151           return std::make_pair(0U, &PPC::QFRCRegClass);
14152         if (VT == MVT::v4f32 && Subtarget.hasQPX())
14153           return std::make_pair(0U, &PPC::QSRCRegClass);
14154       }
14155       break;
14156     case 'v':
14157       if (VT == MVT::v4f64 && Subtarget.hasQPX())
14158         return std::make_pair(0U, &PPC::QFRCRegClass);
14159       if (VT == MVT::v4f32 && Subtarget.hasQPX())
14160         return std::make_pair(0U, &PPC::QSRCRegClass);
14161       if (Subtarget.hasAltivec())
14162         return std::make_pair(0U, &PPC::VRRCRegClass);
14163       break;
14164     case 'y':   // crrc
14165       return std::make_pair(0U, &PPC::CRRCRegClass);
14166     }
14167   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14168     // An individual CR bit.
14169     return std::make_pair(0U, &PPC::CRBITRCRegClass);
14170   } else if ((Constraint == "wa" || Constraint == "wd" ||
14171              Constraint == "wf" || Constraint == "wi") &&
14172              Subtarget.hasVSX()) {
14173     return std::make_pair(0U, &PPC::VSRCRegClass);
14174   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14175     if (VT == MVT::f32 && Subtarget.hasP8Vector())
14176       return std::make_pair(0U, &PPC::VSSRCRegClass);
14177     else
14178       return std::make_pair(0U, &PPC::VSFRCRegClass);
14179   }
14180 
14181   std::pair<unsigned, const TargetRegisterClass *> R =
14182       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
14183 
14184   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
14185   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
14186   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
14187   // register.
14188   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
14189   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
14190   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
14191       PPC::GPRCRegClass.contains(R.first))
14192     return std::make_pair(TRI->getMatchingSuperReg(R.first,
14193                             PPC::sub_32, &PPC::G8RCRegClass),
14194                           &PPC::G8RCRegClass);
14195 
14196   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
14197   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
14198     R.first = PPC::CR0;
14199     R.second = &PPC::CRRCRegClass;
14200   }
14201 
14202   return R;
14203 }
14204 
14205 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
14206 /// vector.  If it is invalid, don't add anything to Ops.
14207 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
14208                                                      std::string &Constraint,
14209                                                      std::vector<SDValue>&Ops,
14210                                                      SelectionDAG &DAG) const {
14211   SDValue Result;
14212 
14213   // Only support length 1 constraints.
14214   if (Constraint.length() > 1) return;
14215 
14216   char Letter = Constraint[0];
14217   switch (Letter) {
14218   default: break;
14219   case 'I':
14220   case 'J':
14221   case 'K':
14222   case 'L':
14223   case 'M':
14224   case 'N':
14225   case 'O':
14226   case 'P': {
14227     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
14228     if (!CST) return; // Must be an immediate to match.
14229     SDLoc dl(Op);
14230     int64_t Value = CST->getSExtValue();
14231     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
14232                          // numbers are printed as such.
14233     switch (Letter) {
14234     default: llvm_unreachable("Unknown constraint letter!");
14235     case 'I':  // "I" is a signed 16-bit constant.
14236       if (isInt<16>(Value))
14237         Result = DAG.getTargetConstant(Value, dl, TCVT);
14238       break;
14239     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
14240       if (isShiftedUInt<16, 16>(Value))
14241         Result = DAG.getTargetConstant(Value, dl, TCVT);
14242       break;
14243     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
14244       if (isShiftedInt<16, 16>(Value))
14245         Result = DAG.getTargetConstant(Value, dl, TCVT);
14246       break;
14247     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
14248       if (isUInt<16>(Value))
14249         Result = DAG.getTargetConstant(Value, dl, TCVT);
14250       break;
14251     case 'M':  // "M" is a constant that is greater than 31.
14252       if (Value > 31)
14253         Result = DAG.getTargetConstant(Value, dl, TCVT);
14254       break;
14255     case 'N':  // "N" is a positive constant that is an exact power of two.
14256       if (Value > 0 && isPowerOf2_64(Value))
14257         Result = DAG.getTargetConstant(Value, dl, TCVT);
14258       break;
14259     case 'O':  // "O" is the constant zero.
14260       if (Value == 0)
14261         Result = DAG.getTargetConstant(Value, dl, TCVT);
14262       break;
14263     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
14264       if (isInt<16>(-Value))
14265         Result = DAG.getTargetConstant(Value, dl, TCVT);
14266       break;
14267     }
14268     break;
14269   }
14270   }
14271 
14272   if (Result.getNode()) {
14273     Ops.push_back(Result);
14274     return;
14275   }
14276 
14277   // Handle standard constraint letters.
14278   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
14279 }
14280 
14281 // isLegalAddressingMode - Return true if the addressing mode represented
14282 // by AM is legal for this target, for a load/store of the specified type.
14283 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
14284                                               const AddrMode &AM, Type *Ty,
14285                                               unsigned AS, Instruction *I) const {
14286   // PPC does not allow r+i addressing modes for vectors!
14287   if (Ty->isVectorTy() && AM.BaseOffs != 0)
14288     return false;
14289 
14290   // PPC allows a sign-extended 16-bit immediate field.
14291   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
14292     return false;
14293 
14294   // No global is ever allowed as a base.
14295   if (AM.BaseGV)
14296     return false;
14297 
14298   // PPC only support r+r,
14299   switch (AM.Scale) {
14300   case 0:  // "r+i" or just "i", depending on HasBaseReg.
14301     break;
14302   case 1:
14303     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
14304       return false;
14305     // Otherwise we have r+r or r+i.
14306     break;
14307   case 2:
14308     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
14309       return false;
14310     // Allow 2*r as r+r.
14311     break;
14312   default:
14313     // No other scales are supported.
14314     return false;
14315   }
14316 
14317   return true;
14318 }
14319 
14320 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
14321                                            SelectionDAG &DAG) const {
14322   MachineFunction &MF = DAG.getMachineFunction();
14323   MachineFrameInfo &MFI = MF.getFrameInfo();
14324   MFI.setReturnAddressIsTaken(true);
14325 
14326   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
14327     return SDValue();
14328 
14329   SDLoc dl(Op);
14330   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14331 
14332   // Make sure the function does not optimize away the store of the RA to
14333   // the stack.
14334   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
14335   FuncInfo->setLRStoreRequired();
14336   bool isPPC64 = Subtarget.isPPC64();
14337   auto PtrVT = getPointerTy(MF.getDataLayout());
14338 
14339   if (Depth > 0) {
14340     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
14341     SDValue Offset =
14342         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
14343                         isPPC64 ? MVT::i64 : MVT::i32);
14344     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
14345                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
14346                        MachinePointerInfo());
14347   }
14348 
14349   // Just load the return address off the stack.
14350   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
14351   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
14352                      MachinePointerInfo());
14353 }
14354 
14355 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
14356                                           SelectionDAG &DAG) const {
14357   SDLoc dl(Op);
14358   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14359 
14360   MachineFunction &MF = DAG.getMachineFunction();
14361   MachineFrameInfo &MFI = MF.getFrameInfo();
14362   MFI.setFrameAddressIsTaken(true);
14363 
14364   EVT PtrVT = getPointerTy(MF.getDataLayout());
14365   bool isPPC64 = PtrVT == MVT::i64;
14366 
14367   // Naked functions never have a frame pointer, and so we use r1. For all
14368   // other functions, this decision must be delayed until during PEI.
14369   unsigned FrameReg;
14370   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
14371     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
14372   else
14373     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
14374 
14375   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
14376                                          PtrVT);
14377   while (Depth--)
14378     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
14379                             FrameAddr, MachinePointerInfo());
14380   return FrameAddr;
14381 }
14382 
14383 // FIXME? Maybe this could be a TableGen attribute on some registers and
14384 // this table could be generated automatically from RegInfo.
14385 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT,
14386                                               SelectionDAG &DAG) const {
14387   bool isPPC64 = Subtarget.isPPC64();
14388   bool isDarwinABI = Subtarget.isDarwinABI();
14389 
14390   if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
14391       (!isPPC64 && VT != MVT::i32))
14392     report_fatal_error("Invalid register global variable type");
14393 
14394   bool is64Bit = isPPC64 && VT == MVT::i64;
14395   unsigned Reg = StringSwitch<unsigned>(RegName)
14396                    .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
14397                    .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
14398                    .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
14399                                   (is64Bit ? PPC::X13 : PPC::R13))
14400                    .Default(0);
14401 
14402   if (Reg)
14403     return Reg;
14404   report_fatal_error("Invalid register name global variable");
14405 }
14406 
14407 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
14408   // 32-bit SVR4 ABI access everything as got-indirect.
14409   if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
14410     return true;
14411 
14412   // AIX accesses everything indirectly through the TOC, which is similar to
14413   // the GOT.
14414   if (Subtarget.isAIXABI())
14415     return true;
14416 
14417   CodeModel::Model CModel = getTargetMachine().getCodeModel();
14418   // If it is small or large code model, module locals are accessed
14419   // indirectly by loading their address from .toc/.got.
14420   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
14421     return true;
14422 
14423   // JumpTable and BlockAddress are accessed as got-indirect.
14424   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
14425     return true;
14426 
14427   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) {
14428     const GlobalValue *GV = G->getGlobal();
14429     unsigned char GVFlags = Subtarget.classifyGlobalReference(GV);
14430     // The NLP flag indicates that a global access has to use an
14431     // extra indirection.
14432     if (GVFlags & PPCII::MO_NLP_FLAG)
14433       return true;
14434   }
14435 
14436   return false;
14437 }
14438 
14439 bool
14440 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
14441   // The PowerPC target isn't yet aware of offsets.
14442   return false;
14443 }
14444 
14445 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
14446                                            const CallInst &I,
14447                                            MachineFunction &MF,
14448                                            unsigned Intrinsic) const {
14449   switch (Intrinsic) {
14450   case Intrinsic::ppc_qpx_qvlfd:
14451   case Intrinsic::ppc_qpx_qvlfs:
14452   case Intrinsic::ppc_qpx_qvlfcd:
14453   case Intrinsic::ppc_qpx_qvlfcs:
14454   case Intrinsic::ppc_qpx_qvlfiwa:
14455   case Intrinsic::ppc_qpx_qvlfiwz:
14456   case Intrinsic::ppc_altivec_lvx:
14457   case Intrinsic::ppc_altivec_lvxl:
14458   case Intrinsic::ppc_altivec_lvebx:
14459   case Intrinsic::ppc_altivec_lvehx:
14460   case Intrinsic::ppc_altivec_lvewx:
14461   case Intrinsic::ppc_vsx_lxvd2x:
14462   case Intrinsic::ppc_vsx_lxvw4x: {
14463     EVT VT;
14464     switch (Intrinsic) {
14465     case Intrinsic::ppc_altivec_lvebx:
14466       VT = MVT::i8;
14467       break;
14468     case Intrinsic::ppc_altivec_lvehx:
14469       VT = MVT::i16;
14470       break;
14471     case Intrinsic::ppc_altivec_lvewx:
14472       VT = MVT::i32;
14473       break;
14474     case Intrinsic::ppc_vsx_lxvd2x:
14475       VT = MVT::v2f64;
14476       break;
14477     case Intrinsic::ppc_qpx_qvlfd:
14478       VT = MVT::v4f64;
14479       break;
14480     case Intrinsic::ppc_qpx_qvlfs:
14481       VT = MVT::v4f32;
14482       break;
14483     case Intrinsic::ppc_qpx_qvlfcd:
14484       VT = MVT::v2f64;
14485       break;
14486     case Intrinsic::ppc_qpx_qvlfcs:
14487       VT = MVT::v2f32;
14488       break;
14489     default:
14490       VT = MVT::v4i32;
14491       break;
14492     }
14493 
14494     Info.opc = ISD::INTRINSIC_W_CHAIN;
14495     Info.memVT = VT;
14496     Info.ptrVal = I.getArgOperand(0);
14497     Info.offset = -VT.getStoreSize()+1;
14498     Info.size = 2*VT.getStoreSize()-1;
14499     Info.align = Align(1);
14500     Info.flags = MachineMemOperand::MOLoad;
14501     return true;
14502   }
14503   case Intrinsic::ppc_qpx_qvlfda:
14504   case Intrinsic::ppc_qpx_qvlfsa:
14505   case Intrinsic::ppc_qpx_qvlfcda:
14506   case Intrinsic::ppc_qpx_qvlfcsa:
14507   case Intrinsic::ppc_qpx_qvlfiwaa:
14508   case Intrinsic::ppc_qpx_qvlfiwza: {
14509     EVT VT;
14510     switch (Intrinsic) {
14511     case Intrinsic::ppc_qpx_qvlfda:
14512       VT = MVT::v4f64;
14513       break;
14514     case Intrinsic::ppc_qpx_qvlfsa:
14515       VT = MVT::v4f32;
14516       break;
14517     case Intrinsic::ppc_qpx_qvlfcda:
14518       VT = MVT::v2f64;
14519       break;
14520     case Intrinsic::ppc_qpx_qvlfcsa:
14521       VT = MVT::v2f32;
14522       break;
14523     default:
14524       VT = MVT::v4i32;
14525       break;
14526     }
14527 
14528     Info.opc = ISD::INTRINSIC_W_CHAIN;
14529     Info.memVT = VT;
14530     Info.ptrVal = I.getArgOperand(0);
14531     Info.offset = 0;
14532     Info.size = VT.getStoreSize();
14533     Info.align = Align(1);
14534     Info.flags = MachineMemOperand::MOLoad;
14535     return true;
14536   }
14537   case Intrinsic::ppc_qpx_qvstfd:
14538   case Intrinsic::ppc_qpx_qvstfs:
14539   case Intrinsic::ppc_qpx_qvstfcd:
14540   case Intrinsic::ppc_qpx_qvstfcs:
14541   case Intrinsic::ppc_qpx_qvstfiw:
14542   case Intrinsic::ppc_altivec_stvx:
14543   case Intrinsic::ppc_altivec_stvxl:
14544   case Intrinsic::ppc_altivec_stvebx:
14545   case Intrinsic::ppc_altivec_stvehx:
14546   case Intrinsic::ppc_altivec_stvewx:
14547   case Intrinsic::ppc_vsx_stxvd2x:
14548   case Intrinsic::ppc_vsx_stxvw4x: {
14549     EVT VT;
14550     switch (Intrinsic) {
14551     case Intrinsic::ppc_altivec_stvebx:
14552       VT = MVT::i8;
14553       break;
14554     case Intrinsic::ppc_altivec_stvehx:
14555       VT = MVT::i16;
14556       break;
14557     case Intrinsic::ppc_altivec_stvewx:
14558       VT = MVT::i32;
14559       break;
14560     case Intrinsic::ppc_vsx_stxvd2x:
14561       VT = MVT::v2f64;
14562       break;
14563     case Intrinsic::ppc_qpx_qvstfd:
14564       VT = MVT::v4f64;
14565       break;
14566     case Intrinsic::ppc_qpx_qvstfs:
14567       VT = MVT::v4f32;
14568       break;
14569     case Intrinsic::ppc_qpx_qvstfcd:
14570       VT = MVT::v2f64;
14571       break;
14572     case Intrinsic::ppc_qpx_qvstfcs:
14573       VT = MVT::v2f32;
14574       break;
14575     default:
14576       VT = MVT::v4i32;
14577       break;
14578     }
14579 
14580     Info.opc = ISD::INTRINSIC_VOID;
14581     Info.memVT = VT;
14582     Info.ptrVal = I.getArgOperand(1);
14583     Info.offset = -VT.getStoreSize()+1;
14584     Info.size = 2*VT.getStoreSize()-1;
14585     Info.align = Align(1);
14586     Info.flags = MachineMemOperand::MOStore;
14587     return true;
14588   }
14589   case Intrinsic::ppc_qpx_qvstfda:
14590   case Intrinsic::ppc_qpx_qvstfsa:
14591   case Intrinsic::ppc_qpx_qvstfcda:
14592   case Intrinsic::ppc_qpx_qvstfcsa:
14593   case Intrinsic::ppc_qpx_qvstfiwa: {
14594     EVT VT;
14595     switch (Intrinsic) {
14596     case Intrinsic::ppc_qpx_qvstfda:
14597       VT = MVT::v4f64;
14598       break;
14599     case Intrinsic::ppc_qpx_qvstfsa:
14600       VT = MVT::v4f32;
14601       break;
14602     case Intrinsic::ppc_qpx_qvstfcda:
14603       VT = MVT::v2f64;
14604       break;
14605     case Intrinsic::ppc_qpx_qvstfcsa:
14606       VT = MVT::v2f32;
14607       break;
14608     default:
14609       VT = MVT::v4i32;
14610       break;
14611     }
14612 
14613     Info.opc = ISD::INTRINSIC_VOID;
14614     Info.memVT = VT;
14615     Info.ptrVal = I.getArgOperand(1);
14616     Info.offset = 0;
14617     Info.size = VT.getStoreSize();
14618     Info.align = Align(1);
14619     Info.flags = MachineMemOperand::MOStore;
14620     return true;
14621   }
14622   default:
14623     break;
14624   }
14625 
14626   return false;
14627 }
14628 
14629 /// getOptimalMemOpType - Returns the target specific optimal type for load
14630 /// and store operations as a result of memset, memcpy, and memmove
14631 /// lowering. If DstAlign is zero that means it's safe to destination
14632 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
14633 /// means there isn't a need to check it against alignment requirement,
14634 /// probably because the source does not need to be loaded. If 'IsMemset' is
14635 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
14636 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
14637 /// source is constant so it does not need to be loaded.
14638 /// It returns EVT::Other if the type should be determined using generic
14639 /// target-independent logic.
14640 EVT PPCTargetLowering::getOptimalMemOpType(
14641     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
14642     bool ZeroMemset, bool MemcpyStrSrc,
14643     const AttributeList &FuncAttributes) const {
14644   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
14645     // When expanding a memset, require at least two QPX instructions to cover
14646     // the cost of loading the value to be stored from the constant pool.
14647     if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
14648        (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
14649         !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
14650       return MVT::v4f64;
14651     }
14652 
14653     // We should use Altivec/VSX loads and stores when available. For unaligned
14654     // addresses, unaligned VSX loads are only fast starting with the P8.
14655     if (Subtarget.hasAltivec() && Size >= 16 &&
14656         (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
14657          ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
14658       return MVT::v4i32;
14659   }
14660 
14661   if (Subtarget.isPPC64()) {
14662     return MVT::i64;
14663   }
14664 
14665   return MVT::i32;
14666 }
14667 
14668 /// Returns true if it is beneficial to convert a load of a constant
14669 /// to just the constant itself.
14670 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
14671                                                           Type *Ty) const {
14672   assert(Ty->isIntegerTy());
14673 
14674   unsigned BitSize = Ty->getPrimitiveSizeInBits();
14675   return !(BitSize == 0 || BitSize > 64);
14676 }
14677 
14678 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
14679   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
14680     return false;
14681   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
14682   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
14683   return NumBits1 == 64 && NumBits2 == 32;
14684 }
14685 
14686 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
14687   if (!VT1.isInteger() || !VT2.isInteger())
14688     return false;
14689   unsigned NumBits1 = VT1.getSizeInBits();
14690   unsigned NumBits2 = VT2.getSizeInBits();
14691   return NumBits1 == 64 && NumBits2 == 32;
14692 }
14693 
14694 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
14695   // Generally speaking, zexts are not free, but they are free when they can be
14696   // folded with other operations.
14697   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
14698     EVT MemVT = LD->getMemoryVT();
14699     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
14700          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
14701         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
14702          LD->getExtensionType() == ISD::ZEXTLOAD))
14703       return true;
14704   }
14705 
14706   // FIXME: Add other cases...
14707   //  - 32-bit shifts with a zext to i64
14708   //  - zext after ctlz, bswap, etc.
14709   //  - zext after and by a constant mask
14710 
14711   return TargetLowering::isZExtFree(Val, VT2);
14712 }
14713 
14714 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
14715   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
14716          "invalid fpext types");
14717   // Extending to float128 is not free.
14718   if (DestVT == MVT::f128)
14719     return false;
14720   return true;
14721 }
14722 
14723 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
14724   return isInt<16>(Imm) || isUInt<16>(Imm);
14725 }
14726 
14727 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
14728   return isInt<16>(Imm) || isUInt<16>(Imm);
14729 }
14730 
14731 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
14732                                                        unsigned,
14733                                                        unsigned,
14734                                                        MachineMemOperand::Flags,
14735                                                        bool *Fast) const {
14736   if (DisablePPCUnaligned)
14737     return false;
14738 
14739   // PowerPC supports unaligned memory access for simple non-vector types.
14740   // Although accessing unaligned addresses is not as efficient as accessing
14741   // aligned addresses, it is generally more efficient than manual expansion,
14742   // and generally only traps for software emulation when crossing page
14743   // boundaries.
14744 
14745   if (!VT.isSimple())
14746     return false;
14747 
14748   if (VT.getSimpleVT().isVector()) {
14749     if (Subtarget.hasVSX()) {
14750       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
14751           VT != MVT::v4f32 && VT != MVT::v4i32)
14752         return false;
14753     } else {
14754       return false;
14755     }
14756   }
14757 
14758   if (VT == MVT::ppcf128)
14759     return false;
14760 
14761   if (Fast)
14762     *Fast = true;
14763 
14764   return true;
14765 }
14766 
14767 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
14768   VT = VT.getScalarType();
14769 
14770   if (!VT.isSimple())
14771     return false;
14772 
14773   switch (VT.getSimpleVT().SimpleTy) {
14774   case MVT::f32:
14775   case MVT::f64:
14776     return true;
14777   case MVT::f128:
14778     return (EnableQuadPrecision && Subtarget.hasP9Vector());
14779   default:
14780     break;
14781   }
14782 
14783   return false;
14784 }
14785 
14786 const MCPhysReg *
14787 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
14788   // LR is a callee-save register, but we must treat it as clobbered by any call
14789   // site. Hence we include LR in the scratch registers, which are in turn added
14790   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
14791   // to CTR, which is used by any indirect call.
14792   static const MCPhysReg ScratchRegs[] = {
14793     PPC::X12, PPC::LR8, PPC::CTR8, 0
14794   };
14795 
14796   return ScratchRegs;
14797 }
14798 
14799 unsigned PPCTargetLowering::getExceptionPointerRegister(
14800     const Constant *PersonalityFn) const {
14801   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
14802 }
14803 
14804 unsigned PPCTargetLowering::getExceptionSelectorRegister(
14805     const Constant *PersonalityFn) const {
14806   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
14807 }
14808 
14809 bool
14810 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
14811                      EVT VT , unsigned DefinedValues) const {
14812   if (VT == MVT::v2i64)
14813     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
14814 
14815   if (Subtarget.hasVSX() || Subtarget.hasQPX())
14816     return true;
14817 
14818   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
14819 }
14820 
14821 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
14822   if (DisableILPPref || Subtarget.enableMachineScheduler())
14823     return TargetLowering::getSchedulingPreference(N);
14824 
14825   return Sched::ILP;
14826 }
14827 
14828 // Create a fast isel object.
14829 FastISel *
14830 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
14831                                   const TargetLibraryInfo *LibInfo) const {
14832   return PPC::createFastISel(FuncInfo, LibInfo);
14833 }
14834 
14835 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
14836   if (Subtarget.isDarwinABI()) return;
14837   if (!Subtarget.isPPC64()) return;
14838 
14839   // Update IsSplitCSR in PPCFunctionInfo
14840   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
14841   PFI->setIsSplitCSR(true);
14842 }
14843 
14844 void PPCTargetLowering::insertCopiesSplitCSR(
14845   MachineBasicBlock *Entry,
14846   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
14847   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
14848   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
14849   if (!IStart)
14850     return;
14851 
14852   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
14853   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
14854   MachineBasicBlock::iterator MBBI = Entry->begin();
14855   for (const MCPhysReg *I = IStart; *I; ++I) {
14856     const TargetRegisterClass *RC = nullptr;
14857     if (PPC::G8RCRegClass.contains(*I))
14858       RC = &PPC::G8RCRegClass;
14859     else if (PPC::F8RCRegClass.contains(*I))
14860       RC = &PPC::F8RCRegClass;
14861     else if (PPC::CRRCRegClass.contains(*I))
14862       RC = &PPC::CRRCRegClass;
14863     else if (PPC::VRRCRegClass.contains(*I))
14864       RC = &PPC::VRRCRegClass;
14865     else
14866       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
14867 
14868     unsigned NewVR = MRI->createVirtualRegister(RC);
14869     // Create copy from CSR to a virtual register.
14870     // FIXME: this currently does not emit CFI pseudo-instructions, it works
14871     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
14872     // nounwind. If we want to generalize this later, we may need to emit
14873     // CFI pseudo-instructions.
14874     assert(Entry->getParent()->getFunction().hasFnAttribute(
14875              Attribute::NoUnwind) &&
14876            "Function should be nounwind in insertCopiesSplitCSR!");
14877     Entry->addLiveIn(*I);
14878     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
14879       .addReg(*I);
14880 
14881     // Insert the copy-back instructions right before the terminator.
14882     for (auto *Exit : Exits)
14883       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
14884               TII->get(TargetOpcode::COPY), *I)
14885         .addReg(NewVR);
14886   }
14887 }
14888 
14889 // Override to enable LOAD_STACK_GUARD lowering on Linux.
14890 bool PPCTargetLowering::useLoadStackGuardNode() const {
14891   if (!Subtarget.isTargetLinux())
14892     return TargetLowering::useLoadStackGuardNode();
14893   return true;
14894 }
14895 
14896 // Override to disable global variable loading on Linux.
14897 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
14898   if (!Subtarget.isTargetLinux())
14899     return TargetLowering::insertSSPDeclarations(M);
14900 }
14901 
14902 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
14903                                      bool ForCodeSize) const {
14904   if (!VT.isSimple() || !Subtarget.hasVSX())
14905     return false;
14906 
14907   switch(VT.getSimpleVT().SimpleTy) {
14908   default:
14909     // For FP types that are currently not supported by PPC backend, return
14910     // false. Examples: f16, f80.
14911     return false;
14912   case MVT::f32:
14913   case MVT::f64:
14914   case MVT::ppcf128:
14915     return Imm.isPosZero();
14916   }
14917 }
14918 
14919 // For vector shift operation op, fold
14920 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
14921 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
14922                                   SelectionDAG &DAG) {
14923   SDValue N0 = N->getOperand(0);
14924   SDValue N1 = N->getOperand(1);
14925   EVT VT = N0.getValueType();
14926   unsigned OpSizeInBits = VT.getScalarSizeInBits();
14927   unsigned Opcode = N->getOpcode();
14928   unsigned TargetOpcode;
14929 
14930   switch (Opcode) {
14931   default:
14932     llvm_unreachable("Unexpected shift operation");
14933   case ISD::SHL:
14934     TargetOpcode = PPCISD::SHL;
14935     break;
14936   case ISD::SRL:
14937     TargetOpcode = PPCISD::SRL;
14938     break;
14939   case ISD::SRA:
14940     TargetOpcode = PPCISD::SRA;
14941     break;
14942   }
14943 
14944   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
14945       N1->getOpcode() == ISD::AND)
14946     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
14947       if (Mask->getZExtValue() == OpSizeInBits - 1)
14948         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
14949 
14950   return SDValue();
14951 }
14952 
14953 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
14954   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14955     return Value;
14956 
14957   SDValue N0 = N->getOperand(0);
14958   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
14959   if (!Subtarget.isISA3_0() ||
14960       N0.getOpcode() != ISD::SIGN_EXTEND ||
14961       N0.getOperand(0).getValueType() != MVT::i32 ||
14962       CN1 == nullptr || N->getValueType(0) != MVT::i64)
14963     return SDValue();
14964 
14965   // We can't save an operation here if the value is already extended, and
14966   // the existing shift is easier to combine.
14967   SDValue ExtsSrc = N0.getOperand(0);
14968   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
14969       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
14970     return SDValue();
14971 
14972   SDLoc DL(N0);
14973   SDValue ShiftBy = SDValue(CN1, 0);
14974   // We want the shift amount to be i32 on the extswli, but the shift could
14975   // have an i64.
14976   if (ShiftBy.getValueType() == MVT::i64)
14977     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
14978 
14979   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
14980                          ShiftBy);
14981 }
14982 
14983 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
14984   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14985     return Value;
14986 
14987   return SDValue();
14988 }
14989 
14990 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
14991   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14992     return Value;
14993 
14994   return SDValue();
14995 }
14996 
14997 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
14998 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
14999 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15000 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15001 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15002                                  const PPCSubtarget &Subtarget) {
15003   if (!Subtarget.isPPC64())
15004     return SDValue();
15005 
15006   SDValue LHS = N->getOperand(0);
15007   SDValue RHS = N->getOperand(1);
15008 
15009   auto isZextOfCompareWithConstant = [](SDValue Op) {
15010     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15011         Op.getValueType() != MVT::i64)
15012       return false;
15013 
15014     SDValue Cmp = Op.getOperand(0);
15015     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15016         Cmp.getOperand(0).getValueType() != MVT::i64)
15017       return false;
15018 
15019     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15020       int64_t NegConstant = 0 - Constant->getSExtValue();
15021       // Due to the limitations of the addi instruction,
15022       // -C is required to be [-32768, 32767].
15023       return isInt<16>(NegConstant);
15024     }
15025 
15026     return false;
15027   };
15028 
15029   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15030   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15031 
15032   // If there is a pattern, canonicalize a zext operand to the RHS.
15033   if (LHSHasPattern && !RHSHasPattern)
15034     std::swap(LHS, RHS);
15035   else if (!LHSHasPattern && !RHSHasPattern)
15036     return SDValue();
15037 
15038   SDLoc DL(N);
15039   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15040   SDValue Cmp = RHS.getOperand(0);
15041   SDValue Z = Cmp.getOperand(0);
15042   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15043 
15044   assert(Constant && "Constant Should not be a null pointer.");
15045   int64_t NegConstant = 0 - Constant->getSExtValue();
15046 
15047   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15048   default: break;
15049   case ISD::SETNE: {
15050     //                                 when C == 0
15051     //                             --> addze X, (addic Z, -1).carry
15052     //                            /
15053     // add X, (zext(setne Z, C))--
15054     //                            \    when -32768 <= -C <= 32767 && C != 0
15055     //                             --> addze X, (addic (addi Z, -C), -1).carry
15056     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15057                               DAG.getConstant(NegConstant, DL, MVT::i64));
15058     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15059     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15060                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15061     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15062                        SDValue(Addc.getNode(), 1));
15063     }
15064   case ISD::SETEQ: {
15065     //                                 when C == 0
15066     //                             --> addze X, (subfic Z, 0).carry
15067     //                            /
15068     // add X, (zext(sete  Z, C))--
15069     //                            \    when -32768 <= -C <= 32767 && C != 0
15070     //                             --> addze X, (subfic (addi Z, -C), 0).carry
15071     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15072                               DAG.getConstant(NegConstant, DL, MVT::i64));
15073     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15074     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15075                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15076     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15077                        SDValue(Subc.getNode(), 1));
15078     }
15079   }
15080 
15081   return SDValue();
15082 }
15083 
15084 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15085   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15086     return Value;
15087 
15088   return SDValue();
15089 }
15090 
15091 // Detect TRUNCATE operations on bitcasts of float128 values.
15092 // What we are looking for here is the situtation where we extract a subset
15093 // of bits from a 128 bit float.
15094 // This can be of two forms:
15095 // 1) BITCAST of f128 feeding TRUNCATE
15096 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15097 // The reason this is required is because we do not have a legal i128 type
15098 // and so we want to prevent having to store the f128 and then reload part
15099 // of it.
15100 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15101                                            DAGCombinerInfo &DCI) const {
15102   // If we are using CRBits then try that first.
15103   if (Subtarget.useCRBits()) {
15104     // Check if CRBits did anything and return that if it did.
15105     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15106       return CRTruncValue;
15107   }
15108 
15109   SDLoc dl(N);
15110   SDValue Op0 = N->getOperand(0);
15111 
15112   // Looking for a truncate of i128 to i64.
15113   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
15114     return SDValue();
15115 
15116   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
15117 
15118   // SRL feeding TRUNCATE.
15119   if (Op0.getOpcode() == ISD::SRL) {
15120     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
15121     // The right shift has to be by 64 bits.
15122     if (!ConstNode || ConstNode->getZExtValue() != 64)
15123       return SDValue();
15124 
15125     // Switch the element number to extract.
15126     EltToExtract = EltToExtract ? 0 : 1;
15127     // Update Op0 past the SRL.
15128     Op0 = Op0.getOperand(0);
15129   }
15130 
15131   // BITCAST feeding a TRUNCATE possibly via SRL.
15132   if (Op0.getOpcode() == ISD::BITCAST &&
15133       Op0.getValueType() == MVT::i128 &&
15134       Op0.getOperand(0).getValueType() == MVT::f128) {
15135     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
15136     return DCI.DAG.getNode(
15137         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
15138         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
15139   }
15140   return SDValue();
15141 }
15142 
15143 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
15144   SelectionDAG &DAG = DCI.DAG;
15145 
15146   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
15147   if (!ConstOpOrElement)
15148     return SDValue();
15149 
15150   // An imul is usually smaller than the alternative sequence for legal type.
15151   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
15152       isOperationLegal(ISD::MUL, N->getValueType(0)))
15153     return SDValue();
15154 
15155   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
15156     switch (this->Subtarget.getDarwinDirective()) {
15157     default:
15158       // TODO: enhance the condition for subtarget before pwr8
15159       return false;
15160     case PPC::DIR_PWR8:
15161       //  type        mul     add    shl
15162       // scalar        4       1      1
15163       // vector        7       2      2
15164       return true;
15165     case PPC::DIR_PWR9:
15166       //  type        mul     add    shl
15167       // scalar        5       2      2
15168       // vector        7       2      2
15169 
15170       // The cycle RATIO of related operations are showed as a table above.
15171       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
15172       // scalar and vector type. For 2 instrs patterns, add/sub + shl
15173       // are 4, it is always profitable; but for 3 instrs patterns
15174       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
15175       // So we should only do it for vector type.
15176       return IsAddOne && IsNeg ? VT.isVector() : true;
15177     }
15178   };
15179 
15180   EVT VT = N->getValueType(0);
15181   SDLoc DL(N);
15182 
15183   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
15184   bool IsNeg = MulAmt.isNegative();
15185   APInt MulAmtAbs = MulAmt.abs();
15186 
15187   if ((MulAmtAbs - 1).isPowerOf2()) {
15188     // (mul x, 2^N + 1) => (add (shl x, N), x)
15189     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
15190 
15191     if (!IsProfitable(IsNeg, true, VT))
15192       return SDValue();
15193 
15194     SDValue Op0 = N->getOperand(0);
15195     SDValue Op1 =
15196         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15197                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
15198     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
15199 
15200     if (!IsNeg)
15201       return Res;
15202 
15203     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
15204   } else if ((MulAmtAbs + 1).isPowerOf2()) {
15205     // (mul x, 2^N - 1) => (sub (shl x, N), x)
15206     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
15207 
15208     if (!IsProfitable(IsNeg, false, VT))
15209       return SDValue();
15210 
15211     SDValue Op0 = N->getOperand(0);
15212     SDValue Op1 =
15213         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15214                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
15215 
15216     if (!IsNeg)
15217       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
15218     else
15219       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
15220 
15221   } else {
15222     return SDValue();
15223   }
15224 }
15225 
15226 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
15227   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
15228   if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64())
15229     return false;
15230 
15231   // If not a tail call then no need to proceed.
15232   if (!CI->isTailCall())
15233     return false;
15234 
15235   // If tail calls are disabled for the caller then we are done.
15236   const Function *Caller = CI->getParent()->getParent();
15237   auto Attr = Caller->getFnAttribute("disable-tail-calls");
15238   if (Attr.getValueAsString() == "true")
15239     return false;
15240 
15241   // If sibling calls have been disabled and tail-calls aren't guaranteed
15242   // there is no reason to duplicate.
15243   auto &TM = getTargetMachine();
15244   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
15245     return false;
15246 
15247   // Can't tail call a function called indirectly, or if it has variadic args.
15248   const Function *Callee = CI->getCalledFunction();
15249   if (!Callee || Callee->isVarArg())
15250     return false;
15251 
15252   // Make sure the callee and caller calling conventions are eligible for tco.
15253   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
15254                                            CI->getCallingConv()))
15255       return false;
15256 
15257   // If the function is local then we have a good chance at tail-calling it
15258   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
15259 }
15260 
15261 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
15262   if (!Subtarget.hasVSX())
15263     return false;
15264   if (Subtarget.hasP9Vector() && VT == MVT::f128)
15265     return true;
15266   return VT == MVT::f32 || VT == MVT::f64 ||
15267     VT == MVT::v4f32 || VT == MVT::v2f64;
15268 }
15269 
15270 bool PPCTargetLowering::
15271 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
15272   const Value *Mask = AndI.getOperand(1);
15273   // If the mask is suitable for andi. or andis. we should sink the and.
15274   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
15275     // Can't handle constants wider than 64-bits.
15276     if (CI->getBitWidth() > 64)
15277       return false;
15278     int64_t ConstVal = CI->getZExtValue();
15279     return isUInt<16>(ConstVal) ||
15280       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
15281   }
15282 
15283   // For non-constant masks, we can always use the record-form and.
15284   return true;
15285 }
15286 
15287 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
15288 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
15289 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
15290 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
15291 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
15292 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
15293   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
15294   assert(Subtarget.hasP9Altivec() &&
15295          "Only combine this when P9 altivec supported!");
15296   EVT VT = N->getValueType(0);
15297   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15298     return SDValue();
15299 
15300   SelectionDAG &DAG = DCI.DAG;
15301   SDLoc dl(N);
15302   if (N->getOperand(0).getOpcode() == ISD::SUB) {
15303     // Even for signed integers, if it's known to be positive (as signed
15304     // integer) due to zero-extended inputs.
15305     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
15306     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
15307     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
15308          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
15309         (SubOpcd1 == ISD::ZERO_EXTEND ||
15310          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
15311       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15312                          N->getOperand(0)->getOperand(0),
15313                          N->getOperand(0)->getOperand(1),
15314                          DAG.getTargetConstant(0, dl, MVT::i32));
15315     }
15316 
15317     // For type v4i32, it can be optimized with xvnegsp + vabsduw
15318     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
15319         N->getOperand(0).hasOneUse()) {
15320       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15321                          N->getOperand(0)->getOperand(0),
15322                          N->getOperand(0)->getOperand(1),
15323                          DAG.getTargetConstant(1, dl, MVT::i32));
15324     }
15325   }
15326 
15327   return SDValue();
15328 }
15329 
15330 // For type v4i32/v8ii16/v16i8, transform
15331 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
15332 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
15333 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
15334 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
15335 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
15336                                           DAGCombinerInfo &DCI) const {
15337   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
15338   assert(Subtarget.hasP9Altivec() &&
15339          "Only combine this when P9 altivec supported!");
15340 
15341   SelectionDAG &DAG = DCI.DAG;
15342   SDLoc dl(N);
15343   SDValue Cond = N->getOperand(0);
15344   SDValue TrueOpnd = N->getOperand(1);
15345   SDValue FalseOpnd = N->getOperand(2);
15346   EVT VT = N->getOperand(1).getValueType();
15347 
15348   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
15349       FalseOpnd.getOpcode() != ISD::SUB)
15350     return SDValue();
15351 
15352   // ABSD only available for type v4i32/v8i16/v16i8
15353   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15354     return SDValue();
15355 
15356   // At least to save one more dependent computation
15357   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
15358     return SDValue();
15359 
15360   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
15361 
15362   // Can only handle unsigned comparison here
15363   switch (CC) {
15364   default:
15365     return SDValue();
15366   case ISD::SETUGT:
15367   case ISD::SETUGE:
15368     break;
15369   case ISD::SETULT:
15370   case ISD::SETULE:
15371     std::swap(TrueOpnd, FalseOpnd);
15372     break;
15373   }
15374 
15375   SDValue CmpOpnd1 = Cond.getOperand(0);
15376   SDValue CmpOpnd2 = Cond.getOperand(1);
15377 
15378   // SETCC CmpOpnd1 CmpOpnd2 cond
15379   // TrueOpnd = CmpOpnd1 - CmpOpnd2
15380   // FalseOpnd = CmpOpnd2 - CmpOpnd1
15381   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
15382       TrueOpnd.getOperand(1) == CmpOpnd2 &&
15383       FalseOpnd.getOperand(0) == CmpOpnd2 &&
15384       FalseOpnd.getOperand(1) == CmpOpnd1) {
15385     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
15386                        CmpOpnd1, CmpOpnd2,
15387                        DAG.getTargetConstant(0, dl, MVT::i32));
15388   }
15389 
15390   return SDValue();
15391 }
15392