1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the PPCISelLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCISelLowering.h"
15 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPC.h"
17 #include "PPCCCState.h"
18 #include "PPCCallingConv.h"
19 #include "PPCFrameLowering.h"
20 #include "PPCInstrInfo.h"
21 #include "PPCMachineFunctionInfo.h"
22 #include "PPCPerfectShuffle.h"
23 #include "PPCRegisterInfo.h"
24 #include "PPCSubtarget.h"
25 #include "PPCTargetMachine.h"
26 #include "llvm/ADT/APFloat.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/CodeGen/CallingConvLower.h"
39 #include "llvm/CodeGen/ISDOpcodes.h"
40 #include "llvm/CodeGen/MachineBasicBlock.h"
41 #include "llvm/CodeGen/MachineFrameInfo.h"
42 #include "llvm/CodeGen/MachineFunction.h"
43 #include "llvm/CodeGen/MachineInstr.h"
44 #include "llvm/CodeGen/MachineInstrBuilder.h"
45 #include "llvm/CodeGen/MachineJumpTableInfo.h"
46 #include "llvm/CodeGen/MachineLoopInfo.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetRegisterInfo.h"
56 #include "llvm/CodeGen/ValueTypes.h"
57 #include "llvm/IR/CallSite.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/Module.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/MC/MCExpr.h"
74 #include "llvm/MC/MCRegisterInfo.h"
75 #include "llvm/Support/AtomicOrdering.h"
76 #include "llvm/Support/BranchProbability.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CodeGen.h"
79 #include "llvm/Support/CommandLine.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/Format.h"
84 #include "llvm/Support/KnownBits.h"
85 #include "llvm/Support/MachineValueType.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include <algorithm>
91 #include <cassert>
92 #include <cstdint>
93 #include <iterator>
94 #include <list>
95 #include <utility>
96 #include <vector>
97 
98 using namespace llvm;
99 
100 #define DEBUG_TYPE "ppc-lowering"
101 
102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
104 
105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisableSCO("disable-ppc-sco",
112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
113 
114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
115 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
116 
117 STATISTIC(NumTailCalls, "Number of tail calls");
118 STATISTIC(NumSiblingCalls, "Number of sibling calls");
119 
120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
121 
122 // FIXME: Remove this once the bug has been fixed!
123 extern cl::opt<bool> ANDIGlueBug;
124 
125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
126                                      const PPCSubtarget &STI)
127     : TargetLowering(TM), Subtarget(STI) {
128   // Use _setjmp/_longjmp instead of setjmp/longjmp.
129   setUseUnderscoreSetJmp(true);
130   setUseUnderscoreLongJmp(true);
131 
132   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
133   // arguments are at least 4/8 bytes aligned.
134   bool isPPC64 = Subtarget.isPPC64();
135   setMinStackArgumentAlignment(isPPC64 ? 8:4);
136 
137   // Set up the register classes.
138   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
139   if (!useSoftFloat()) {
140     if (hasSPE()) {
141       addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass);
142       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
143     } else {
144       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
145       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
146     }
147   }
148 
149   // Match BITREVERSE to customized fast code sequence in the td file.
150   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
151   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
152 
153   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
154   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
155 
156   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
157   for (MVT VT : MVT::integer_valuetypes()) {
158     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
159     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
160   }
161 
162   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
163 
164   // PowerPC has pre-inc load and store's.
165   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
166   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
167   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
168   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
169   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
170   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
171   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
172   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
173   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
174   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
175   if (!Subtarget.hasSPE()) {
176     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
177     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
178     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
179     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
180   }
181 
182   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
183   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
184   for (MVT VT : ScalarIntVTs) {
185     setOperationAction(ISD::ADDC, VT, Legal);
186     setOperationAction(ISD::ADDE, VT, Legal);
187     setOperationAction(ISD::SUBC, VT, Legal);
188     setOperationAction(ISD::SUBE, VT, Legal);
189   }
190 
191   if (Subtarget.useCRBits()) {
192     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193 
194     if (isPPC64 || Subtarget.hasFPCVT()) {
195       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
196       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
197                          isPPC64 ? MVT::i64 : MVT::i32);
198       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
199       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
200                         isPPC64 ? MVT::i64 : MVT::i32);
201     } else {
202       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
203       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
204     }
205 
206     // PowerPC does not support direct load/store of condition registers.
207     setOperationAction(ISD::LOAD, MVT::i1, Custom);
208     setOperationAction(ISD::STORE, MVT::i1, Custom);
209 
210     // FIXME: Remove this once the ANDI glue bug is fixed:
211     if (ANDIGlueBug)
212       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
213 
214     for (MVT VT : MVT::integer_valuetypes()) {
215       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
216       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
217       setTruncStoreAction(VT, MVT::i1, Expand);
218     }
219 
220     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
221   }
222 
223   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
224   // PPC (the libcall is not available).
225   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
226   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
227 
228   // We do not currently implement these libm ops for PowerPC.
229   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
230   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
231   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
232   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
233   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
234   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
235 
236   // PowerPC has no SREM/UREM instructions unless we are on P9
237   // On P9 we may use a hardware instruction to compute the remainder.
238   // The instructions are not legalized directly because in the cases where the
239   // result of both the remainder and the division is required it is more
240   // efficient to compute the remainder from the result of the division rather
241   // than use the remainder instruction.
242   if (Subtarget.isISA3_0()) {
243     setOperationAction(ISD::SREM, MVT::i32, Custom);
244     setOperationAction(ISD::UREM, MVT::i32, Custom);
245     setOperationAction(ISD::SREM, MVT::i64, Custom);
246     setOperationAction(ISD::UREM, MVT::i64, Custom);
247   } else {
248     setOperationAction(ISD::SREM, MVT::i32, Expand);
249     setOperationAction(ISD::UREM, MVT::i32, Expand);
250     setOperationAction(ISD::SREM, MVT::i64, Expand);
251     setOperationAction(ISD::UREM, MVT::i64, Expand);
252   }
253 
254   if (Subtarget.hasP9Vector()) {
255     setOperationAction(ISD::ABS, MVT::v4i32, Legal);
256     setOperationAction(ISD::ABS, MVT::v8i16, Legal);
257     setOperationAction(ISD::ABS, MVT::v16i8, Legal);
258   }
259 
260   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
261   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
262   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
263   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
264   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
265   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
266   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
267   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
268   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
269 
270   // We don't support sin/cos/sqrt/fmod/pow
271   setOperationAction(ISD::FSIN , MVT::f64, Expand);
272   setOperationAction(ISD::FCOS , MVT::f64, Expand);
273   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
274   setOperationAction(ISD::FREM , MVT::f64, Expand);
275   setOperationAction(ISD::FPOW , MVT::f64, Expand);
276   setOperationAction(ISD::FSIN , MVT::f32, Expand);
277   setOperationAction(ISD::FCOS , MVT::f32, Expand);
278   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
279   setOperationAction(ISD::FREM , MVT::f32, Expand);
280   setOperationAction(ISD::FPOW , MVT::f32, Expand);
281   if (Subtarget.hasSPE()) {
282     setOperationAction(ISD::FMA  , MVT::f64, Expand);
283     setOperationAction(ISD::FMA  , MVT::f32, Expand);
284   } else {
285     setOperationAction(ISD::FMA  , MVT::f64, Legal);
286     setOperationAction(ISD::FMA  , MVT::f32, Legal);
287   }
288 
289   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
290 
291   // If we're enabling GP optimizations, use hardware square root
292   if (!Subtarget.hasFSQRT() &&
293       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
294         Subtarget.hasFRE()))
295     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
296 
297   if (!Subtarget.hasFSQRT() &&
298       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
299         Subtarget.hasFRES()))
300     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
301 
302   if (Subtarget.hasFCPSGN()) {
303     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
304     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
305   } else {
306     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
307     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
308   }
309 
310   if (Subtarget.hasFPRND()) {
311     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
312     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
313     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
314     setOperationAction(ISD::FROUND, MVT::f64, Legal);
315 
316     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
317     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
318     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
319     setOperationAction(ISD::FROUND, MVT::f32, Legal);
320   }
321 
322   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
323   // to speed up scalar BSWAP64.
324   // CTPOP or CTTZ were introduced in P8/P9 respectively
325   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
326   if (Subtarget.isISA3_0()) {
327     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
328     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
329     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
330   } else {
331     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
332     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
333     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
334   }
335 
336   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
337     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
338     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
339   } else {
340     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
341     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
342   }
343 
344   // PowerPC does not have ROTR
345   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
346   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
347 
348   if (!Subtarget.useCRBits()) {
349     // PowerPC does not have Select
350     setOperationAction(ISD::SELECT, MVT::i32, Expand);
351     setOperationAction(ISD::SELECT, MVT::i64, Expand);
352     setOperationAction(ISD::SELECT, MVT::f32, Expand);
353     setOperationAction(ISD::SELECT, MVT::f64, Expand);
354   }
355 
356   // PowerPC wants to turn select_cc of FP into fsel when possible.
357   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
358   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
359 
360   // PowerPC wants to optimize integer setcc a bit
361   if (!Subtarget.useCRBits())
362     setOperationAction(ISD::SETCC, MVT::i32, Custom);
363 
364   // PowerPC does not have BRCOND which requires SetCC
365   if (!Subtarget.useCRBits())
366     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
367 
368   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
369 
370   if (Subtarget.hasSPE()) {
371     // SPE has built-in conversions
372     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
373     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
374     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
375   } else {
376     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
377     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
378 
379     // PowerPC does not have [U|S]INT_TO_FP
380     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
381     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
382   }
383 
384   if (Subtarget.hasDirectMove() && isPPC64) {
385     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
386     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
387     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
388     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
389   } else {
390     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
391     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
392     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
393     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
394   }
395 
396   // We cannot sextinreg(i1).  Expand to shifts.
397   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
398 
399   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
400   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
401   // support continuation, user-level threading, and etc.. As a result, no
402   // other SjLj exception interfaces are implemented and please don't build
403   // your own exception handling based on them.
404   // LLVM/Clang supports zero-cost DWARF exception handling.
405   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
406   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
407 
408   // We want to legalize GlobalAddress and ConstantPool nodes into the
409   // appropriate instructions to materialize the address.
410   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
411   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
412   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
413   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
414   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
415   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
416   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
417   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
418   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
419   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
420 
421   // TRAP is legal.
422   setOperationAction(ISD::TRAP, MVT::Other, Legal);
423 
424   // TRAMPOLINE is custom lowered.
425   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
426   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
427 
428   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
429   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
430 
431   if (Subtarget.isSVR4ABI()) {
432     if (isPPC64) {
433       // VAARG always uses double-word chunks, so promote anything smaller.
434       setOperationAction(ISD::VAARG, MVT::i1, Promote);
435       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
436       setOperationAction(ISD::VAARG, MVT::i8, Promote);
437       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
438       setOperationAction(ISD::VAARG, MVT::i16, Promote);
439       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
440       setOperationAction(ISD::VAARG, MVT::i32, Promote);
441       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
442       setOperationAction(ISD::VAARG, MVT::Other, Expand);
443     } else {
444       // VAARG is custom lowered with the 32-bit SVR4 ABI.
445       setOperationAction(ISD::VAARG, MVT::Other, Custom);
446       setOperationAction(ISD::VAARG, MVT::i64, Custom);
447     }
448   } else
449     setOperationAction(ISD::VAARG, MVT::Other, Expand);
450 
451   if (Subtarget.isSVR4ABI() && !isPPC64)
452     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
453     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
454   else
455     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
456 
457   // Use the default implementation.
458   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
459   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
460   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
461   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
462   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
463   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
464   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
465   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
466   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
467 
468   // We want to custom lower some of our intrinsics.
469   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
470 
471   // To handle counter-based loop conditions.
472   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
473 
474   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
475   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
476   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
477   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
478 
479   // Comparisons that require checking two conditions.
480   if (Subtarget.hasSPE()) {
481     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
482     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
483     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
484     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
485   }
486   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
487   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
488   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
489   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
490   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
491   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
492   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
493   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
494   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
495   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
496   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
497   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
498 
499   if (Subtarget.has64BitSupport()) {
500     // They also have instructions for converting between i64 and fp.
501     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
502     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
503     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
504     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
505     // This is just the low 32 bits of a (signed) fp->i64 conversion.
506     // We cannot do this with Promote because i64 is not a legal type.
507     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
508 
509     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
510       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
511   } else {
512     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
513     if (Subtarget.hasSPE())
514       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
515     else
516       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
517   }
518 
519   // With the instructions enabled under FPCVT, we can do everything.
520   if (Subtarget.hasFPCVT()) {
521     if (Subtarget.has64BitSupport()) {
522       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
523       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
524       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
525       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
526     }
527 
528     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
529     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
530     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
531     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
532   }
533 
534   if (Subtarget.use64BitRegs()) {
535     // 64-bit PowerPC implementations can support i64 types directly
536     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
537     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
538     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
539     // 64-bit PowerPC wants to expand i128 shifts itself.
540     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
541     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
542     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
543   } else {
544     // 32-bit PowerPC wants to expand i64 shifts itself.
545     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
546     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
547     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
548   }
549 
550   if (Subtarget.hasAltivec()) {
551     // First set operation action for all vector types to expand. Then we
552     // will selectively turn on ones that can be effectively codegen'd.
553     for (MVT VT : MVT::vector_valuetypes()) {
554       // add/sub are legal for all supported vector VT's.
555       setOperationAction(ISD::ADD, VT, Legal);
556       setOperationAction(ISD::SUB, VT, Legal);
557 
558       // Vector instructions introduced in P8
559       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
560         setOperationAction(ISD::CTPOP, VT, Legal);
561         setOperationAction(ISD::CTLZ, VT, Legal);
562       }
563       else {
564         setOperationAction(ISD::CTPOP, VT, Expand);
565         setOperationAction(ISD::CTLZ, VT, Expand);
566       }
567 
568       // Vector instructions introduced in P9
569       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
570         setOperationAction(ISD::CTTZ, VT, Legal);
571       else
572         setOperationAction(ISD::CTTZ, VT, Expand);
573 
574       // We promote all shuffles to v16i8.
575       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
576       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
577 
578       // We promote all non-typed operations to v4i32.
579       setOperationAction(ISD::AND   , VT, Promote);
580       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
581       setOperationAction(ISD::OR    , VT, Promote);
582       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
583       setOperationAction(ISD::XOR   , VT, Promote);
584       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
585       setOperationAction(ISD::LOAD  , VT, Promote);
586       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
587       setOperationAction(ISD::SELECT, VT, Promote);
588       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
589       setOperationAction(ISD::VSELECT, VT, Promote);
590       AddPromotedToType (ISD::VSELECT, VT, MVT::v4i32);
591       setOperationAction(ISD::SELECT_CC, VT, Promote);
592       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
593       setOperationAction(ISD::STORE, VT, Promote);
594       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
595 
596       // No other operations are legal.
597       setOperationAction(ISD::MUL , VT, Expand);
598       setOperationAction(ISD::SDIV, VT, Expand);
599       setOperationAction(ISD::SREM, VT, Expand);
600       setOperationAction(ISD::UDIV, VT, Expand);
601       setOperationAction(ISD::UREM, VT, Expand);
602       setOperationAction(ISD::FDIV, VT, Expand);
603       setOperationAction(ISD::FREM, VT, Expand);
604       setOperationAction(ISD::FNEG, VT, Expand);
605       setOperationAction(ISD::FSQRT, VT, Expand);
606       setOperationAction(ISD::FLOG, VT, Expand);
607       setOperationAction(ISD::FLOG10, VT, Expand);
608       setOperationAction(ISD::FLOG2, VT, Expand);
609       setOperationAction(ISD::FEXP, VT, Expand);
610       setOperationAction(ISD::FEXP2, VT, Expand);
611       setOperationAction(ISD::FSIN, VT, Expand);
612       setOperationAction(ISD::FCOS, VT, Expand);
613       setOperationAction(ISD::FABS, VT, Expand);
614       setOperationAction(ISD::FFLOOR, VT, Expand);
615       setOperationAction(ISD::FCEIL,  VT, Expand);
616       setOperationAction(ISD::FTRUNC, VT, Expand);
617       setOperationAction(ISD::FRINT,  VT, Expand);
618       setOperationAction(ISD::FNEARBYINT, VT, Expand);
619       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
620       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
621       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
622       setOperationAction(ISD::MULHU, VT, Expand);
623       setOperationAction(ISD::MULHS, VT, Expand);
624       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
625       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
626       setOperationAction(ISD::UDIVREM, VT, Expand);
627       setOperationAction(ISD::SDIVREM, VT, Expand);
628       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
629       setOperationAction(ISD::FPOW, VT, Expand);
630       setOperationAction(ISD::BSWAP, VT, Expand);
631       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
632       setOperationAction(ISD::ROTL, VT, Expand);
633       setOperationAction(ISD::ROTR, VT, Expand);
634 
635       for (MVT InnerVT : MVT::vector_valuetypes()) {
636         setTruncStoreAction(VT, InnerVT, Expand);
637         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
638         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
639         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
640       }
641     }
642 
643     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
644     // with merges, splats, etc.
645     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
646 
647     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
648     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
649     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
650     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
651     setOperationAction(ISD::SELECT, MVT::v4i32,
652                        Subtarget.useCRBits() ? Legal : Expand);
653     setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
654     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
655     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
656     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
657     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
658     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
659     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
660     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
661     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
662     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
663 
664     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
665     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
666     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
667     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
668 
669     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
670     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
671 
672     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
673       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
674       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
675     }
676 
677     if (Subtarget.hasP8Altivec())
678       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
679     else
680       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
681 
682     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
683     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
684 
685     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
686     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
687 
688     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
689     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
690     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
691     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
692 
693     // Altivec does not contain unordered floating-point compare instructions
694     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
695     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
696     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
697     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
698 
699     if (Subtarget.hasVSX()) {
700       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
701       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
702       if (Subtarget.hasP8Vector()) {
703         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
704         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
705       }
706       if (Subtarget.hasDirectMove() && isPPC64) {
707         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
708         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
709         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
710         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
711         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
712         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
713         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
714         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
715       }
716       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
717 
718       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
719       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
720       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
721       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
722       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
723 
724       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
725 
726       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
727       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
728 
729       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
730       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
731 
732       // Share the Altivec comparison restrictions.
733       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
734       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
735       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
736       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
737 
738       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
739       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
740 
741       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
742 
743       if (Subtarget.hasP8Vector())
744         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
745 
746       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
747 
748       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
749       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
750       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
751 
752       if (Subtarget.hasP8Altivec()) {
753         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
754         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
755         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
756 
757         // 128 bit shifts can be accomplished via 3 instructions for SHL and
758         // SRL, but not for SRA because of the instructions available:
759         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
760         // doing
761         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
762         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
763         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
764 
765         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
766       }
767       else {
768         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
769         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
770         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
771 
772         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
773 
774         // VSX v2i64 only supports non-arithmetic operations.
775         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
776         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
777       }
778 
779       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
780       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
781       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
782       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
783 
784       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
785 
786       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
787       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
788       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
789       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
790 
791       // Vector operation legalization checks the result type of
792       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
793       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
794       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
795       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
796       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
797 
798       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
799       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
800       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
801       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
802 
803       if (Subtarget.hasDirectMove())
804         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
805       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
806 
807       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
808     }
809 
810     if (Subtarget.hasP8Altivec()) {
811       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
812       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
813     }
814 
815     if (Subtarget.hasP9Vector()) {
816       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
817       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
818 
819       // 128 bit shifts can be accomplished via 3 instructions for SHL and
820       // SRL, but not for SRA because of the instructions available:
821       // VS{RL} and VS{RL}O.
822       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
823       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
824       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
825 
826       if (EnableQuadPrecision) {
827         addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
828         setOperationAction(ISD::FADD, MVT::f128, Legal);
829         setOperationAction(ISD::FSUB, MVT::f128, Legal);
830         setOperationAction(ISD::FDIV, MVT::f128, Legal);
831         setOperationAction(ISD::FMUL, MVT::f128, Legal);
832         setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
833         // No extending loads to f128 on PPC.
834         for (MVT FPT : MVT::fp_valuetypes())
835           setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
836         setOperationAction(ISD::FMA, MVT::f128, Legal);
837         setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
838         setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
839         setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
840         setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
841         setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
842         setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
843 
844         setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
845         setOperationAction(ISD::FRINT, MVT::f128, Legal);
846         setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
847         setOperationAction(ISD::FCEIL, MVT::f128, Legal);
848         setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
849         setOperationAction(ISD::FROUND, MVT::f128, Legal);
850 
851         setOperationAction(ISD::SELECT, MVT::f128, Expand);
852         setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
853         setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
854         setTruncStoreAction(MVT::f128, MVT::f64, Expand);
855         setTruncStoreAction(MVT::f128, MVT::f32, Expand);
856         setOperationAction(ISD::BITCAST, MVT::i128, Custom);
857         // No implementation for these ops for PowerPC.
858         setOperationAction(ISD::FSIN , MVT::f128, Expand);
859         setOperationAction(ISD::FCOS , MVT::f128, Expand);
860         setOperationAction(ISD::FPOW, MVT::f128, Expand);
861         setOperationAction(ISD::FPOWI, MVT::f128, Expand);
862         setOperationAction(ISD::FREM, MVT::f128, Expand);
863       }
864 
865     }
866 
867     if (Subtarget.hasP9Altivec()) {
868       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
869       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
870     }
871   }
872 
873   if (Subtarget.hasQPX()) {
874     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
875     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
876     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
877     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
878 
879     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
880     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
881 
882     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
883     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
884 
885     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
886     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
887 
888     if (!Subtarget.useCRBits())
889       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
890     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
891 
892     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
893     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
894     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
895     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
896     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
897     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
898     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
899 
900     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
901     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
902 
903     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
904     setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
905     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
906 
907     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
908     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
909     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
910     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
911     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
912     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
913     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
914     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
915     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
916     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
917 
918     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
919     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
920 
921     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
922     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
923 
924     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
925 
926     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
927     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
928     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
929     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
930 
931     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
932     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
933 
934     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
935     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
936 
937     if (!Subtarget.useCRBits())
938       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
939     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
940 
941     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
942     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
943     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
944     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
945     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
946     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
947     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
948 
949     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
950     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
951 
952     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
953     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
954     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
955     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
956     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
957     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
958     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
959     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
960     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
961     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
962 
963     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
964     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
965 
966     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
967     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
968 
969     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
970 
971     setOperationAction(ISD::AND , MVT::v4i1, Legal);
972     setOperationAction(ISD::OR , MVT::v4i1, Legal);
973     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
974 
975     if (!Subtarget.useCRBits())
976       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
977     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
978 
979     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
980     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
981 
982     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
983     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
984     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
985     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
986     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
987     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
988     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
989 
990     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
991     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
992 
993     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
994 
995     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
996     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
997     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
998     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
999 
1000     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1001     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1002     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1003     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1004 
1005     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1006     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1007 
1008     // These need to set FE_INEXACT, and so cannot be vectorized here.
1009     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1010     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1011 
1012     if (TM.Options.UnsafeFPMath) {
1013       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1014       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1015 
1016       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1017       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1018     } else {
1019       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1020       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1021 
1022       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1023       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1024     }
1025   }
1026 
1027   if (Subtarget.has64BitSupport())
1028     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1029 
1030   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1031 
1032   if (!isPPC64) {
1033     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1034     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1035   }
1036 
1037   setBooleanContents(ZeroOrOneBooleanContent);
1038 
1039   if (Subtarget.hasAltivec()) {
1040     // Altivec instructions set fields to all zeros or all ones.
1041     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1042   }
1043 
1044   if (!isPPC64) {
1045     // These libcalls are not available in 32-bit.
1046     setLibcallName(RTLIB::SHL_I128, nullptr);
1047     setLibcallName(RTLIB::SRL_I128, nullptr);
1048     setLibcallName(RTLIB::SRA_I128, nullptr);
1049   }
1050 
1051   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1052 
1053   // We have target-specific dag combine patterns for the following nodes:
1054   setTargetDAGCombine(ISD::SHL);
1055   setTargetDAGCombine(ISD::SRA);
1056   setTargetDAGCombine(ISD::SRL);
1057   setTargetDAGCombine(ISD::SINT_TO_FP);
1058   setTargetDAGCombine(ISD::BUILD_VECTOR);
1059   if (Subtarget.hasFPCVT())
1060     setTargetDAGCombine(ISD::UINT_TO_FP);
1061   setTargetDAGCombine(ISD::LOAD);
1062   setTargetDAGCombine(ISD::STORE);
1063   setTargetDAGCombine(ISD::BR_CC);
1064   if (Subtarget.useCRBits())
1065     setTargetDAGCombine(ISD::BRCOND);
1066   setTargetDAGCombine(ISD::BSWAP);
1067   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1068   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1069   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1070 
1071   setTargetDAGCombine(ISD::SIGN_EXTEND);
1072   setTargetDAGCombine(ISD::ZERO_EXTEND);
1073   setTargetDAGCombine(ISD::ANY_EXTEND);
1074 
1075   if (Subtarget.useCRBits()) {
1076     setTargetDAGCombine(ISD::TRUNCATE);
1077     setTargetDAGCombine(ISD::SETCC);
1078     setTargetDAGCombine(ISD::SELECT_CC);
1079   }
1080 
1081   // Use reciprocal estimates.
1082   if (TM.Options.UnsafeFPMath) {
1083     setTargetDAGCombine(ISD::FDIV);
1084     setTargetDAGCombine(ISD::FSQRT);
1085   }
1086 
1087   // Darwin long double math library functions have $LDBL128 appended.
1088   if (Subtarget.isDarwin()) {
1089     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1090     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1091     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1092     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1093     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1094     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1095     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1096     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1097     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1098     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1099   }
1100 
1101   if (EnableQuadPrecision) {
1102     setLibcallName(RTLIB::LOG_F128, "logf128");
1103     setLibcallName(RTLIB::LOG2_F128, "log2f128");
1104     setLibcallName(RTLIB::LOG10_F128, "log10f128");
1105     setLibcallName(RTLIB::EXP_F128, "expf128");
1106     setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1107     setLibcallName(RTLIB::SIN_F128, "sinf128");
1108     setLibcallName(RTLIB::COS_F128, "cosf128");
1109     setLibcallName(RTLIB::POW_F128, "powf128");
1110     setLibcallName(RTLIB::FMIN_F128, "fminf128");
1111     setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1112     setLibcallName(RTLIB::POWI_F128, "__powikf2");
1113     setLibcallName(RTLIB::REM_F128, "fmodf128");
1114   }
1115 
1116   // With 32 condition bits, we don't need to sink (and duplicate) compares
1117   // aggressively in CodeGenPrep.
1118   if (Subtarget.useCRBits()) {
1119     setHasMultipleConditionRegisters();
1120     setJumpIsExpensive();
1121   }
1122 
1123   setMinFunctionAlignment(2);
1124   if (Subtarget.isDarwin())
1125     setPrefFunctionAlignment(4);
1126 
1127   switch (Subtarget.getDarwinDirective()) {
1128   default: break;
1129   case PPC::DIR_970:
1130   case PPC::DIR_A2:
1131   case PPC::DIR_E500:
1132   case PPC::DIR_E500mc:
1133   case PPC::DIR_E5500:
1134   case PPC::DIR_PWR4:
1135   case PPC::DIR_PWR5:
1136   case PPC::DIR_PWR5X:
1137   case PPC::DIR_PWR6:
1138   case PPC::DIR_PWR6X:
1139   case PPC::DIR_PWR7:
1140   case PPC::DIR_PWR8:
1141   case PPC::DIR_PWR9:
1142     setPrefFunctionAlignment(4);
1143     setPrefLoopAlignment(4);
1144     break;
1145   }
1146 
1147   if (Subtarget.enableMachineScheduler())
1148     setSchedulingPreference(Sched::Source);
1149   else
1150     setSchedulingPreference(Sched::Hybrid);
1151 
1152   computeRegisterProperties(STI.getRegisterInfo());
1153 
1154   // The Freescale cores do better with aggressive inlining of memcpy and
1155   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1156   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
1157       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
1158     MaxStoresPerMemset = 32;
1159     MaxStoresPerMemsetOptSize = 16;
1160     MaxStoresPerMemcpy = 32;
1161     MaxStoresPerMemcpyOptSize = 8;
1162     MaxStoresPerMemmove = 32;
1163     MaxStoresPerMemmoveOptSize = 8;
1164   } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) {
1165     // The A2 also benefits from (very) aggressive inlining of memcpy and
1166     // friends. The overhead of a the function call, even when warm, can be
1167     // over one hundred cycles.
1168     MaxStoresPerMemset = 128;
1169     MaxStoresPerMemcpy = 128;
1170     MaxStoresPerMemmove = 128;
1171     MaxLoadsPerMemcmp = 128;
1172   } else {
1173     MaxLoadsPerMemcmp = 8;
1174     MaxLoadsPerMemcmpOptSize = 4;
1175   }
1176 }
1177 
1178 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1179 /// the desired ByVal argument alignment.
1180 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1181                              unsigned MaxMaxAlign) {
1182   if (MaxAlign == MaxMaxAlign)
1183     return;
1184   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1185     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1186       MaxAlign = 32;
1187     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1188       MaxAlign = 16;
1189   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1190     unsigned EltAlign = 0;
1191     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1192     if (EltAlign > MaxAlign)
1193       MaxAlign = EltAlign;
1194   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1195     for (auto *EltTy : STy->elements()) {
1196       unsigned EltAlign = 0;
1197       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1198       if (EltAlign > MaxAlign)
1199         MaxAlign = EltAlign;
1200       if (MaxAlign == MaxMaxAlign)
1201         break;
1202     }
1203   }
1204 }
1205 
1206 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1207 /// function arguments in the caller parameter area.
1208 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1209                                                   const DataLayout &DL) const {
1210   // Darwin passes everything on 4 byte boundary.
1211   if (Subtarget.isDarwin())
1212     return 4;
1213 
1214   // 16byte and wider vectors are passed on 16byte boundary.
1215   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1216   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1217   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1218     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1219   return Align;
1220 }
1221 
1222 unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1223                                                           CallingConv:: ID CC,
1224                                                           EVT VT) const {
1225   if (Subtarget.hasSPE() && VT == MVT::f64)
1226     return 2;
1227   return PPCTargetLowering::getNumRegisters(Context, VT);
1228 }
1229 
1230 MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1231                                                      CallingConv:: ID CC,
1232                                                      EVT VT) const {
1233   if (Subtarget.hasSPE() && VT == MVT::f64)
1234     return MVT::i32;
1235   return PPCTargetLowering::getRegisterType(Context, VT);
1236 }
1237 
1238 bool PPCTargetLowering::useSoftFloat() const {
1239   return Subtarget.useSoftFloat();
1240 }
1241 
1242 bool PPCTargetLowering::hasSPE() const {
1243   return Subtarget.hasSPE();
1244 }
1245 
1246 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1247   switch ((PPCISD::NodeType)Opcode) {
1248   case PPCISD::FIRST_NUMBER:    break;
1249   case PPCISD::FSEL:            return "PPCISD::FSEL";
1250   case PPCISD::FCFID:           return "PPCISD::FCFID";
1251   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1252   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1253   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1254   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1255   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1256   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1257   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1258   case PPCISD::FP_TO_UINT_IN_VSR:
1259                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1260   case PPCISD::FP_TO_SINT_IN_VSR:
1261                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1262   case PPCISD::FRE:             return "PPCISD::FRE";
1263   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1264   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1265   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1266   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1267   case PPCISD::VPERM:           return "PPCISD::VPERM";
1268   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1269   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1270   case PPCISD::XXREVERSE:       return "PPCISD::XXREVERSE";
1271   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1272   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1273   case PPCISD::CMPB:            return "PPCISD::CMPB";
1274   case PPCISD::Hi:              return "PPCISD::Hi";
1275   case PPCISD::Lo:              return "PPCISD::Lo";
1276   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1277   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1278   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1279   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1280   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1281   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1282   case PPCISD::SRL:             return "PPCISD::SRL";
1283   case PPCISD::SRA:             return "PPCISD::SRA";
1284   case PPCISD::SHL:             return "PPCISD::SHL";
1285   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1286   case PPCISD::CALL:            return "PPCISD::CALL";
1287   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1288   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1289   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1290   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1291   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1292   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1293   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1294   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1295   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1296   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1297   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1298   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1299   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1300   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1301   case PPCISD::ANDIo_1_EQ_BIT:  return "PPCISD::ANDIo_1_EQ_BIT";
1302   case PPCISD::ANDIo_1_GT_BIT:  return "PPCISD::ANDIo_1_GT_BIT";
1303   case PPCISD::VCMP:            return "PPCISD::VCMP";
1304   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1305   case PPCISD::LBRX:            return "PPCISD::LBRX";
1306   case PPCISD::STBRX:           return "PPCISD::STBRX";
1307   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1308   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1309   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1310   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1311   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1312   case PPCISD::SExtVElems:      return "PPCISD::SExtVElems";
1313   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1314   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1315   case PPCISD::ST_VSR_SCAL_INT:
1316                                 return "PPCISD::ST_VSR_SCAL_INT";
1317   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1318   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1319   case PPCISD::BDZ:             return "PPCISD::BDZ";
1320   case PPCISD::MFFS:            return "PPCISD::MFFS";
1321   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1322   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1323   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1324   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1325   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1326   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1327   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1328   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1329   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1330   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1331   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1332   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1333   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1334   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1335   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1336   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1337   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1338   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1339   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1340   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1341   case PPCISD::SC:              return "PPCISD::SC";
1342   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1343   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1344   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1345   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1346   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1347   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1348   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1349   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1350   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1351   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1352   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1353   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1354   }
1355   return nullptr;
1356 }
1357 
1358 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1359                                           EVT VT) const {
1360   if (!VT.isVector())
1361     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1362 
1363   if (Subtarget.hasQPX())
1364     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1365 
1366   return VT.changeVectorElementTypeToInteger();
1367 }
1368 
1369 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1370   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1371   return true;
1372 }
1373 
1374 //===----------------------------------------------------------------------===//
1375 // Node matching predicates, for use by the tblgen matching code.
1376 //===----------------------------------------------------------------------===//
1377 
1378 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1379 static bool isFloatingPointZero(SDValue Op) {
1380   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1381     return CFP->getValueAPF().isZero();
1382   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1383     // Maybe this has already been legalized into the constant pool?
1384     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1385       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1386         return CFP->getValueAPF().isZero();
1387   }
1388   return false;
1389 }
1390 
1391 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1392 /// true if Op is undef or if it matches the specified value.
1393 static bool isConstantOrUndef(int Op, int Val) {
1394   return Op < 0 || Op == Val;
1395 }
1396 
1397 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1398 /// VPKUHUM instruction.
1399 /// The ShuffleKind distinguishes between big-endian operations with
1400 /// two different inputs (0), either-endian operations with two identical
1401 /// inputs (1), and little-endian operations with two different inputs (2).
1402 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1403 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1404                                SelectionDAG &DAG) {
1405   bool IsLE = DAG.getDataLayout().isLittleEndian();
1406   if (ShuffleKind == 0) {
1407     if (IsLE)
1408       return false;
1409     for (unsigned i = 0; i != 16; ++i)
1410       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1411         return false;
1412   } else if (ShuffleKind == 2) {
1413     if (!IsLE)
1414       return false;
1415     for (unsigned i = 0; i != 16; ++i)
1416       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1417         return false;
1418   } else if (ShuffleKind == 1) {
1419     unsigned j = IsLE ? 0 : 1;
1420     for (unsigned i = 0; i != 8; ++i)
1421       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1422           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1423         return false;
1424   }
1425   return true;
1426 }
1427 
1428 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1429 /// VPKUWUM instruction.
1430 /// The ShuffleKind distinguishes between big-endian operations with
1431 /// two different inputs (0), either-endian operations with two identical
1432 /// inputs (1), and little-endian operations with two different inputs (2).
1433 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1434 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1435                                SelectionDAG &DAG) {
1436   bool IsLE = DAG.getDataLayout().isLittleEndian();
1437   if (ShuffleKind == 0) {
1438     if (IsLE)
1439       return false;
1440     for (unsigned i = 0; i != 16; i += 2)
1441       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1442           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1443         return false;
1444   } else if (ShuffleKind == 2) {
1445     if (!IsLE)
1446       return false;
1447     for (unsigned i = 0; i != 16; i += 2)
1448       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1449           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1450         return false;
1451   } else if (ShuffleKind == 1) {
1452     unsigned j = IsLE ? 0 : 2;
1453     for (unsigned i = 0; i != 8; i += 2)
1454       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1455           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1456           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1457           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1458         return false;
1459   }
1460   return true;
1461 }
1462 
1463 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1464 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1465 /// current subtarget.
1466 ///
1467 /// The ShuffleKind distinguishes between big-endian operations with
1468 /// two different inputs (0), either-endian operations with two identical
1469 /// inputs (1), and little-endian operations with two different inputs (2).
1470 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1471 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1472                                SelectionDAG &DAG) {
1473   const PPCSubtarget& Subtarget =
1474     static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1475   if (!Subtarget.hasP8Vector())
1476     return false;
1477 
1478   bool IsLE = DAG.getDataLayout().isLittleEndian();
1479   if (ShuffleKind == 0) {
1480     if (IsLE)
1481       return false;
1482     for (unsigned i = 0; i != 16; i += 4)
1483       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1484           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1485           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1486           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1487         return false;
1488   } else if (ShuffleKind == 2) {
1489     if (!IsLE)
1490       return false;
1491     for (unsigned i = 0; i != 16; i += 4)
1492       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1493           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1494           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1495           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1496         return false;
1497   } else if (ShuffleKind == 1) {
1498     unsigned j = IsLE ? 0 : 4;
1499     for (unsigned i = 0; i != 8; i += 4)
1500       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1501           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1502           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1503           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1504           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1505           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1506           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1507           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1508         return false;
1509   }
1510   return true;
1511 }
1512 
1513 /// isVMerge - Common function, used to match vmrg* shuffles.
1514 ///
1515 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1516                      unsigned LHSStart, unsigned RHSStart) {
1517   if (N->getValueType(0) != MVT::v16i8)
1518     return false;
1519   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1520          "Unsupported merge size!");
1521 
1522   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1523     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1524       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1525                              LHSStart+j+i*UnitSize) ||
1526           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1527                              RHSStart+j+i*UnitSize))
1528         return false;
1529     }
1530   return true;
1531 }
1532 
1533 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1534 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1535 /// The ShuffleKind distinguishes between big-endian merges with two
1536 /// different inputs (0), either-endian merges with two identical inputs (1),
1537 /// and little-endian merges with two different inputs (2).  For the latter,
1538 /// the input operands are swapped (see PPCInstrAltivec.td).
1539 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1540                              unsigned ShuffleKind, SelectionDAG &DAG) {
1541   if (DAG.getDataLayout().isLittleEndian()) {
1542     if (ShuffleKind == 1) // unary
1543       return isVMerge(N, UnitSize, 0, 0);
1544     else if (ShuffleKind == 2) // swapped
1545       return isVMerge(N, UnitSize, 0, 16);
1546     else
1547       return false;
1548   } else {
1549     if (ShuffleKind == 1) // unary
1550       return isVMerge(N, UnitSize, 8, 8);
1551     else if (ShuffleKind == 0) // normal
1552       return isVMerge(N, UnitSize, 8, 24);
1553     else
1554       return false;
1555   }
1556 }
1557 
1558 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1559 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1560 /// The ShuffleKind distinguishes between big-endian merges with two
1561 /// different inputs (0), either-endian merges with two identical inputs (1),
1562 /// and little-endian merges with two different inputs (2).  For the latter,
1563 /// the input operands are swapped (see PPCInstrAltivec.td).
1564 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1565                              unsigned ShuffleKind, SelectionDAG &DAG) {
1566   if (DAG.getDataLayout().isLittleEndian()) {
1567     if (ShuffleKind == 1) // unary
1568       return isVMerge(N, UnitSize, 8, 8);
1569     else if (ShuffleKind == 2) // swapped
1570       return isVMerge(N, UnitSize, 8, 24);
1571     else
1572       return false;
1573   } else {
1574     if (ShuffleKind == 1) // unary
1575       return isVMerge(N, UnitSize, 0, 0);
1576     else if (ShuffleKind == 0) // normal
1577       return isVMerge(N, UnitSize, 0, 16);
1578     else
1579       return false;
1580   }
1581 }
1582 
1583 /**
1584  * Common function used to match vmrgew and vmrgow shuffles
1585  *
1586  * The indexOffset determines whether to look for even or odd words in
1587  * the shuffle mask. This is based on the of the endianness of the target
1588  * machine.
1589  *   - Little Endian:
1590  *     - Use offset of 0 to check for odd elements
1591  *     - Use offset of 4 to check for even elements
1592  *   - Big Endian:
1593  *     - Use offset of 0 to check for even elements
1594  *     - Use offset of 4 to check for odd elements
1595  * A detailed description of the vector element ordering for little endian and
1596  * big endian can be found at
1597  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1598  * Targeting your applications - what little endian and big endian IBM XL C/C++
1599  * compiler differences mean to you
1600  *
1601  * The mask to the shuffle vector instruction specifies the indices of the
1602  * elements from the two input vectors to place in the result. The elements are
1603  * numbered in array-access order, starting with the first vector. These vectors
1604  * are always of type v16i8, thus each vector will contain 16 elements of size
1605  * 8. More info on the shuffle vector can be found in the
1606  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1607  * Language Reference.
1608  *
1609  * The RHSStartValue indicates whether the same input vectors are used (unary)
1610  * or two different input vectors are used, based on the following:
1611  *   - If the instruction uses the same vector for both inputs, the range of the
1612  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1613  *     be 0.
1614  *   - If the instruction has two different vectors then the range of the
1615  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1616  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1617  *     to 31 specify elements in the second vector).
1618  *
1619  * \param[in] N The shuffle vector SD Node to analyze
1620  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1621  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1622  * vector to the shuffle_vector instruction
1623  * \return true iff this shuffle vector represents an even or odd word merge
1624  */
1625 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1626                      unsigned RHSStartValue) {
1627   if (N->getValueType(0) != MVT::v16i8)
1628     return false;
1629 
1630   for (unsigned i = 0; i < 2; ++i)
1631     for (unsigned j = 0; j < 4; ++j)
1632       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1633                              i*RHSStartValue+j+IndexOffset) ||
1634           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1635                              i*RHSStartValue+j+IndexOffset+8))
1636         return false;
1637   return true;
1638 }
1639 
1640 /**
1641  * Determine if the specified shuffle mask is suitable for the vmrgew or
1642  * vmrgow instructions.
1643  *
1644  * \param[in] N The shuffle vector SD Node to analyze
1645  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1646  * \param[in] ShuffleKind Identify the type of merge:
1647  *   - 0 = big-endian merge with two different inputs;
1648  *   - 1 = either-endian merge with two identical inputs;
1649  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1650  *     little-endian merges).
1651  * \param[in] DAG The current SelectionDAG
1652  * \return true iff this shuffle mask
1653  */
1654 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1655                               unsigned ShuffleKind, SelectionDAG &DAG) {
1656   if (DAG.getDataLayout().isLittleEndian()) {
1657     unsigned indexOffset = CheckEven ? 4 : 0;
1658     if (ShuffleKind == 1) // Unary
1659       return isVMerge(N, indexOffset, 0);
1660     else if (ShuffleKind == 2) // swapped
1661       return isVMerge(N, indexOffset, 16);
1662     else
1663       return false;
1664   }
1665   else {
1666     unsigned indexOffset = CheckEven ? 0 : 4;
1667     if (ShuffleKind == 1) // Unary
1668       return isVMerge(N, indexOffset, 0);
1669     else if (ShuffleKind == 0) // Normal
1670       return isVMerge(N, indexOffset, 16);
1671     else
1672       return false;
1673   }
1674   return false;
1675 }
1676 
1677 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1678 /// amount, otherwise return -1.
1679 /// The ShuffleKind distinguishes between big-endian operations with two
1680 /// different inputs (0), either-endian operations with two identical inputs
1681 /// (1), and little-endian operations with two different inputs (2).  For the
1682 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1683 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1684                              SelectionDAG &DAG) {
1685   if (N->getValueType(0) != MVT::v16i8)
1686     return -1;
1687 
1688   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1689 
1690   // Find the first non-undef value in the shuffle mask.
1691   unsigned i;
1692   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1693     /*search*/;
1694 
1695   if (i == 16) return -1;  // all undef.
1696 
1697   // Otherwise, check to see if the rest of the elements are consecutively
1698   // numbered from this value.
1699   unsigned ShiftAmt = SVOp->getMaskElt(i);
1700   if (ShiftAmt < i) return -1;
1701 
1702   ShiftAmt -= i;
1703   bool isLE = DAG.getDataLayout().isLittleEndian();
1704 
1705   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1706     // Check the rest of the elements to see if they are consecutive.
1707     for (++i; i != 16; ++i)
1708       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1709         return -1;
1710   } else if (ShuffleKind == 1) {
1711     // Check the rest of the elements to see if they are consecutive.
1712     for (++i; i != 16; ++i)
1713       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1714         return -1;
1715   } else
1716     return -1;
1717 
1718   if (isLE)
1719     ShiftAmt = 16 - ShiftAmt;
1720 
1721   return ShiftAmt;
1722 }
1723 
1724 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1725 /// specifies a splat of a single element that is suitable for input to
1726 /// VSPLTB/VSPLTH/VSPLTW.
1727 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1728   assert(N->getValueType(0) == MVT::v16i8 &&
1729          (EltSize == 1 || EltSize == 2 || EltSize == 4));
1730 
1731   // The consecutive indices need to specify an element, not part of two
1732   // different elements.  So abandon ship early if this isn't the case.
1733   if (N->getMaskElt(0) % EltSize != 0)
1734     return false;
1735 
1736   // This is a splat operation if each element of the permute is the same, and
1737   // if the value doesn't reference the second vector.
1738   unsigned ElementBase = N->getMaskElt(0);
1739 
1740   // FIXME: Handle UNDEF elements too!
1741   if (ElementBase >= 16)
1742     return false;
1743 
1744   // Check that the indices are consecutive, in the case of a multi-byte element
1745   // splatted with a v16i8 mask.
1746   for (unsigned i = 1; i != EltSize; ++i)
1747     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1748       return false;
1749 
1750   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1751     if (N->getMaskElt(i) < 0) continue;
1752     for (unsigned j = 0; j != EltSize; ++j)
1753       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1754         return false;
1755   }
1756   return true;
1757 }
1758 
1759 /// Check that the mask is shuffling N byte elements. Within each N byte
1760 /// element of the mask, the indices could be either in increasing or
1761 /// decreasing order as long as they are consecutive.
1762 /// \param[in] N the shuffle vector SD Node to analyze
1763 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1764 /// Word/DoubleWord/QuadWord).
1765 /// \param[in] StepLen the delta indices number among the N byte element, if
1766 /// the mask is in increasing/decreasing order then it is 1/-1.
1767 /// \return true iff the mask is shuffling N byte elements.
1768 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1769                                    int StepLen) {
1770   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1771          "Unexpected element width.");
1772   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1773 
1774   unsigned NumOfElem = 16 / Width;
1775   unsigned MaskVal[16]; //  Width is never greater than 16
1776   for (unsigned i = 0; i < NumOfElem; ++i) {
1777     MaskVal[0] = N->getMaskElt(i * Width);
1778     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1779       return false;
1780     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1781       return false;
1782     }
1783 
1784     for (unsigned int j = 1; j < Width; ++j) {
1785       MaskVal[j] = N->getMaskElt(i * Width + j);
1786       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1787         return false;
1788       }
1789     }
1790   }
1791 
1792   return true;
1793 }
1794 
1795 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1796                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1797   if (!isNByteElemShuffleMask(N, 4, 1))
1798     return false;
1799 
1800   // Now we look at mask elements 0,4,8,12
1801   unsigned M0 = N->getMaskElt(0) / 4;
1802   unsigned M1 = N->getMaskElt(4) / 4;
1803   unsigned M2 = N->getMaskElt(8) / 4;
1804   unsigned M3 = N->getMaskElt(12) / 4;
1805   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1806   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1807 
1808   // Below, let H and L be arbitrary elements of the shuffle mask
1809   // where H is in the range [4,7] and L is in the range [0,3].
1810   // H, 1, 2, 3 or L, 5, 6, 7
1811   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1812       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1813     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1814     InsertAtByte = IsLE ? 12 : 0;
1815     Swap = M0 < 4;
1816     return true;
1817   }
1818   // 0, H, 2, 3 or 4, L, 6, 7
1819   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1820       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1821     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1822     InsertAtByte = IsLE ? 8 : 4;
1823     Swap = M1 < 4;
1824     return true;
1825   }
1826   // 0, 1, H, 3 or 4, 5, L, 7
1827   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1828       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1829     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1830     InsertAtByte = IsLE ? 4 : 8;
1831     Swap = M2 < 4;
1832     return true;
1833   }
1834   // 0, 1, 2, H or 4, 5, 6, L
1835   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1836       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1837     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1838     InsertAtByte = IsLE ? 0 : 12;
1839     Swap = M3 < 4;
1840     return true;
1841   }
1842 
1843   // If both vector operands for the shuffle are the same vector, the mask will
1844   // contain only elements from the first one and the second one will be undef.
1845   if (N->getOperand(1).isUndef()) {
1846     ShiftElts = 0;
1847     Swap = true;
1848     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1849     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1850       InsertAtByte = IsLE ? 12 : 0;
1851       return true;
1852     }
1853     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1854       InsertAtByte = IsLE ? 8 : 4;
1855       return true;
1856     }
1857     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1858       InsertAtByte = IsLE ? 4 : 8;
1859       return true;
1860     }
1861     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1862       InsertAtByte = IsLE ? 0 : 12;
1863       return true;
1864     }
1865   }
1866 
1867   return false;
1868 }
1869 
1870 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1871                                bool &Swap, bool IsLE) {
1872   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1873   // Ensure each byte index of the word is consecutive.
1874   if (!isNByteElemShuffleMask(N, 4, 1))
1875     return false;
1876 
1877   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1878   unsigned M0 = N->getMaskElt(0) / 4;
1879   unsigned M1 = N->getMaskElt(4) / 4;
1880   unsigned M2 = N->getMaskElt(8) / 4;
1881   unsigned M3 = N->getMaskElt(12) / 4;
1882 
1883   // If both vector operands for the shuffle are the same vector, the mask will
1884   // contain only elements from the first one and the second one will be undef.
1885   if (N->getOperand(1).isUndef()) {
1886     assert(M0 < 4 && "Indexing into an undef vector?");
1887     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1888       return false;
1889 
1890     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1891     Swap = false;
1892     return true;
1893   }
1894 
1895   // Ensure each word index of the ShuffleVector Mask is consecutive.
1896   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
1897     return false;
1898 
1899   if (IsLE) {
1900     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
1901       // Input vectors don't need to be swapped if the leading element
1902       // of the result is one of the 3 left elements of the second vector
1903       // (or if there is no shift to be done at all).
1904       Swap = false;
1905       ShiftElts = (8 - M0) % 8;
1906     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
1907       // Input vectors need to be swapped if the leading element
1908       // of the result is one of the 3 left elements of the first vector
1909       // (or if we're shifting by 4 - thereby simply swapping the vectors).
1910       Swap = true;
1911       ShiftElts = (4 - M0) % 4;
1912     }
1913 
1914     return true;
1915   } else {                                          // BE
1916     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
1917       // Input vectors don't need to be swapped if the leading element
1918       // of the result is one of the 4 elements of the first vector.
1919       Swap = false;
1920       ShiftElts = M0;
1921     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
1922       // Input vectors need to be swapped if the leading element
1923       // of the result is one of the 4 elements of the right vector.
1924       Swap = true;
1925       ShiftElts = M0 - 4;
1926     }
1927 
1928     return true;
1929   }
1930 }
1931 
1932 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
1933   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1934 
1935   if (!isNByteElemShuffleMask(N, Width, -1))
1936     return false;
1937 
1938   for (int i = 0; i < 16; i += Width)
1939     if (N->getMaskElt(i) != i + Width - 1)
1940       return false;
1941 
1942   return true;
1943 }
1944 
1945 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
1946   return isXXBRShuffleMaskHelper(N, 2);
1947 }
1948 
1949 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
1950   return isXXBRShuffleMaskHelper(N, 4);
1951 }
1952 
1953 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
1954   return isXXBRShuffleMaskHelper(N, 8);
1955 }
1956 
1957 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
1958   return isXXBRShuffleMaskHelper(N, 16);
1959 }
1960 
1961 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
1962 /// if the inputs to the instruction should be swapped and set \p DM to the
1963 /// value for the immediate.
1964 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
1965 /// AND element 0 of the result comes from the first input (LE) or second input
1966 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
1967 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
1968 /// mask.
1969 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
1970                                bool &Swap, bool IsLE) {
1971   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1972 
1973   // Ensure each byte index of the double word is consecutive.
1974   if (!isNByteElemShuffleMask(N, 8, 1))
1975     return false;
1976 
1977   unsigned M0 = N->getMaskElt(0) / 8;
1978   unsigned M1 = N->getMaskElt(8) / 8;
1979   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
1980 
1981   // If both vector operands for the shuffle are the same vector, the mask will
1982   // contain only elements from the first one and the second one will be undef.
1983   if (N->getOperand(1).isUndef()) {
1984     if ((M0 | M1) < 2) {
1985       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
1986       Swap = false;
1987       return true;
1988     } else
1989       return false;
1990   }
1991 
1992   if (IsLE) {
1993     if (M0 > 1 && M1 < 2) {
1994       Swap = false;
1995     } else if (M0 < 2 && M1 > 1) {
1996       M0 = (M0 + 2) % 4;
1997       M1 = (M1 + 2) % 4;
1998       Swap = true;
1999     } else
2000       return false;
2001 
2002     // Note: if control flow comes here that means Swap is already set above
2003     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2004     return true;
2005   } else { // BE
2006     if (M0 < 2 && M1 > 1) {
2007       Swap = false;
2008     } else if (M0 > 1 && M1 < 2) {
2009       M0 = (M0 + 2) % 4;
2010       M1 = (M1 + 2) % 4;
2011       Swap = true;
2012     } else
2013       return false;
2014 
2015     // Note: if control flow comes here that means Swap is already set above
2016     DM = (M0 << 1) + (M1 & 1);
2017     return true;
2018   }
2019 }
2020 
2021 
2022 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
2023 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
2024 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
2025                                 SelectionDAG &DAG) {
2026   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2027   assert(isSplatShuffleMask(SVOp, EltSize));
2028   if (DAG.getDataLayout().isLittleEndian())
2029     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2030   else
2031     return SVOp->getMaskElt(0) / EltSize;
2032 }
2033 
2034 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2035 /// by using a vspltis[bhw] instruction of the specified element size, return
2036 /// the constant being splatted.  The ByteSize field indicates the number of
2037 /// bytes of each element [124] -> [bhw].
2038 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2039   SDValue OpVal(nullptr, 0);
2040 
2041   // If ByteSize of the splat is bigger than the element size of the
2042   // build_vector, then we have a case where we are checking for a splat where
2043   // multiple elements of the buildvector are folded together into a single
2044   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2045   unsigned EltSize = 16/N->getNumOperands();
2046   if (EltSize < ByteSize) {
2047     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2048     SDValue UniquedVals[4];
2049     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2050 
2051     // See if all of the elements in the buildvector agree across.
2052     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2053       if (N->getOperand(i).isUndef()) continue;
2054       // If the element isn't a constant, bail fully out.
2055       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2056 
2057       if (!UniquedVals[i&(Multiple-1)].getNode())
2058         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2059       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2060         return SDValue();  // no match.
2061     }
2062 
2063     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2064     // either constant or undef values that are identical for each chunk.  See
2065     // if these chunks can form into a larger vspltis*.
2066 
2067     // Check to see if all of the leading entries are either 0 or -1.  If
2068     // neither, then this won't fit into the immediate field.
2069     bool LeadingZero = true;
2070     bool LeadingOnes = true;
2071     for (unsigned i = 0; i != Multiple-1; ++i) {
2072       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2073 
2074       LeadingZero &= isNullConstant(UniquedVals[i]);
2075       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2076     }
2077     // Finally, check the least significant entry.
2078     if (LeadingZero) {
2079       if (!UniquedVals[Multiple-1].getNode())
2080         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2081       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2082       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2083         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2084     }
2085     if (LeadingOnes) {
2086       if (!UniquedVals[Multiple-1].getNode())
2087         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2088       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2089       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2090         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2091     }
2092 
2093     return SDValue();
2094   }
2095 
2096   // Check to see if this buildvec has a single non-undef value in its elements.
2097   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2098     if (N->getOperand(i).isUndef()) continue;
2099     if (!OpVal.getNode())
2100       OpVal = N->getOperand(i);
2101     else if (OpVal != N->getOperand(i))
2102       return SDValue();
2103   }
2104 
2105   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2106 
2107   unsigned ValSizeInBytes = EltSize;
2108   uint64_t Value = 0;
2109   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2110     Value = CN->getZExtValue();
2111   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2112     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2113     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2114   }
2115 
2116   // If the splat value is larger than the element value, then we can never do
2117   // this splat.  The only case that we could fit the replicated bits into our
2118   // immediate field for would be zero, and we prefer to use vxor for it.
2119   if (ValSizeInBytes < ByteSize) return SDValue();
2120 
2121   // If the element value is larger than the splat value, check if it consists
2122   // of a repeated bit pattern of size ByteSize.
2123   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2124     return SDValue();
2125 
2126   // Properly sign extend the value.
2127   int MaskVal = SignExtend32(Value, ByteSize * 8);
2128 
2129   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2130   if (MaskVal == 0) return SDValue();
2131 
2132   // Finally, if this value fits in a 5 bit sext field, return it
2133   if (SignExtend32<5>(MaskVal) == MaskVal)
2134     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2135   return SDValue();
2136 }
2137 
2138 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2139 /// amount, otherwise return -1.
2140 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2141   EVT VT = N->getValueType(0);
2142   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2143     return -1;
2144 
2145   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2146 
2147   // Find the first non-undef value in the shuffle mask.
2148   unsigned i;
2149   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2150     /*search*/;
2151 
2152   if (i == 4) return -1;  // all undef.
2153 
2154   // Otherwise, check to see if the rest of the elements are consecutively
2155   // numbered from this value.
2156   unsigned ShiftAmt = SVOp->getMaskElt(i);
2157   if (ShiftAmt < i) return -1;
2158   ShiftAmt -= i;
2159 
2160   // Check the rest of the elements to see if they are consecutive.
2161   for (++i; i != 4; ++i)
2162     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2163       return -1;
2164 
2165   return ShiftAmt;
2166 }
2167 
2168 //===----------------------------------------------------------------------===//
2169 //  Addressing Mode Selection
2170 //===----------------------------------------------------------------------===//
2171 
2172 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2173 /// or 64-bit immediate, and if the value can be accurately represented as a
2174 /// sign extension from a 16-bit value.  If so, this returns true and the
2175 /// immediate.
2176 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2177   if (!isa<ConstantSDNode>(N))
2178     return false;
2179 
2180   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2181   if (N->getValueType(0) == MVT::i32)
2182     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2183   else
2184     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2185 }
2186 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2187   return isIntS16Immediate(Op.getNode(), Imm);
2188 }
2189 
2190 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2191 /// can be represented as an indexed [r+r] operation.  Returns false if it
2192 /// can be more efficiently represented with [r+imm].
2193 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2194                                             SDValue &Index,
2195                                             SelectionDAG &DAG) const {
2196   int16_t imm = 0;
2197   if (N.getOpcode() == ISD::ADD) {
2198     if (isIntS16Immediate(N.getOperand(1), imm))
2199       return false;    // r+i
2200     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2201       return false;    // r+i
2202 
2203     Base = N.getOperand(0);
2204     Index = N.getOperand(1);
2205     return true;
2206   } else if (N.getOpcode() == ISD::OR) {
2207     if (isIntS16Immediate(N.getOperand(1), imm))
2208       return false;    // r+i can fold it if we can.
2209 
2210     // If this is an or of disjoint bitfields, we can codegen this as an add
2211     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2212     // disjoint.
2213     KnownBits LHSKnown, RHSKnown;
2214     DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2215 
2216     if (LHSKnown.Zero.getBoolValue()) {
2217       DAG.computeKnownBits(N.getOperand(1), RHSKnown);
2218       // If all of the bits are known zero on the LHS or RHS, the add won't
2219       // carry.
2220       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2221         Base = N.getOperand(0);
2222         Index = N.getOperand(1);
2223         return true;
2224       }
2225     }
2226   }
2227 
2228   return false;
2229 }
2230 
2231 // If we happen to be doing an i64 load or store into a stack slot that has
2232 // less than a 4-byte alignment, then the frame-index elimination may need to
2233 // use an indexed load or store instruction (because the offset may not be a
2234 // multiple of 4). The extra register needed to hold the offset comes from the
2235 // register scavenger, and it is possible that the scavenger will need to use
2236 // an emergency spill slot. As a result, we need to make sure that a spill slot
2237 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2238 // stack slot.
2239 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2240   // FIXME: This does not handle the LWA case.
2241   if (VT != MVT::i64)
2242     return;
2243 
2244   // NOTE: We'll exclude negative FIs here, which come from argument
2245   // lowering, because there are no known test cases triggering this problem
2246   // using packed structures (or similar). We can remove this exclusion if
2247   // we find such a test case. The reason why this is so test-case driven is
2248   // because this entire 'fixup' is only to prevent crashes (from the
2249   // register scavenger) on not-really-valid inputs. For example, if we have:
2250   //   %a = alloca i1
2251   //   %b = bitcast i1* %a to i64*
2252   //   store i64* a, i64 b
2253   // then the store should really be marked as 'align 1', but is not. If it
2254   // were marked as 'align 1' then the indexed form would have been
2255   // instruction-selected initially, and the problem this 'fixup' is preventing
2256   // won't happen regardless.
2257   if (FrameIdx < 0)
2258     return;
2259 
2260   MachineFunction &MF = DAG.getMachineFunction();
2261   MachineFrameInfo &MFI = MF.getFrameInfo();
2262 
2263   unsigned Align = MFI.getObjectAlignment(FrameIdx);
2264   if (Align >= 4)
2265     return;
2266 
2267   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2268   FuncInfo->setHasNonRISpills();
2269 }
2270 
2271 /// Returns true if the address N can be represented by a base register plus
2272 /// a signed 16-bit displacement [r+imm], and if it is not better
2273 /// represented as reg+reg.  If \p Alignment is non-zero, only accept
2274 /// displacements that are multiples of that value.
2275 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2276                                             SDValue &Base,
2277                                             SelectionDAG &DAG,
2278                                             unsigned Alignment) const {
2279   // FIXME dl should come from parent load or store, not from address
2280   SDLoc dl(N);
2281   // If this can be more profitably realized as r+r, fail.
2282   if (SelectAddressRegReg(N, Disp, Base, DAG))
2283     return false;
2284 
2285   if (N.getOpcode() == ISD::ADD) {
2286     int16_t imm = 0;
2287     if (isIntS16Immediate(N.getOperand(1), imm) &&
2288         (!Alignment || (imm % Alignment) == 0)) {
2289       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2290       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2291         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2292         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2293       } else {
2294         Base = N.getOperand(0);
2295       }
2296       return true; // [r+i]
2297     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2298       // Match LOAD (ADD (X, Lo(G))).
2299       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2300              && "Cannot handle constant offsets yet!");
2301       Disp = N.getOperand(1).getOperand(0);  // The global address.
2302       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2303              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2304              Disp.getOpcode() == ISD::TargetConstantPool ||
2305              Disp.getOpcode() == ISD::TargetJumpTable);
2306       Base = N.getOperand(0);
2307       return true;  // [&g+r]
2308     }
2309   } else if (N.getOpcode() == ISD::OR) {
2310     int16_t imm = 0;
2311     if (isIntS16Immediate(N.getOperand(1), imm) &&
2312         (!Alignment || (imm % Alignment) == 0)) {
2313       // If this is an or of disjoint bitfields, we can codegen this as an add
2314       // (for better address arithmetic) if the LHS and RHS of the OR are
2315       // provably disjoint.
2316       KnownBits LHSKnown;
2317       DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2318 
2319       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2320         // If all of the bits are known zero on the LHS or RHS, the add won't
2321         // carry.
2322         if (FrameIndexSDNode *FI =
2323               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2324           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2325           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2326         } else {
2327           Base = N.getOperand(0);
2328         }
2329         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2330         return true;
2331       }
2332     }
2333   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2334     // Loading from a constant address.
2335 
2336     // If this address fits entirely in a 16-bit sext immediate field, codegen
2337     // this as "d, 0"
2338     int16_t Imm;
2339     if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) {
2340       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2341       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2342                              CN->getValueType(0));
2343       return true;
2344     }
2345 
2346     // Handle 32-bit sext immediates with LIS + addr mode.
2347     if ((CN->getValueType(0) == MVT::i32 ||
2348          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2349         (!Alignment || (CN->getZExtValue() % Alignment) == 0)) {
2350       int Addr = (int)CN->getZExtValue();
2351 
2352       // Otherwise, break this down into an LIS + disp.
2353       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2354 
2355       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2356                                    MVT::i32);
2357       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2358       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2359       return true;
2360     }
2361   }
2362 
2363   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2364   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2365     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2366     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2367   } else
2368     Base = N;
2369   return true;      // [r+0]
2370 }
2371 
2372 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2373 /// represented as an indexed [r+r] operation.
2374 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2375                                                 SDValue &Index,
2376                                                 SelectionDAG &DAG) const {
2377   // Check to see if we can easily represent this as an [r+r] address.  This
2378   // will fail if it thinks that the address is more profitably represented as
2379   // reg+imm, e.g. where imm = 0.
2380   if (SelectAddressRegReg(N, Base, Index, DAG))
2381     return true;
2382 
2383   // If the address is the result of an add, we will utilize the fact that the
2384   // address calculation includes an implicit add.  However, we can reduce
2385   // register pressure if we do not materialize a constant just for use as the
2386   // index register.  We only get rid of the add if it is not an add of a
2387   // value and a 16-bit signed constant and both have a single use.
2388   int16_t imm = 0;
2389   if (N.getOpcode() == ISD::ADD &&
2390       (!isIntS16Immediate(N.getOperand(1), imm) ||
2391        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2392     Base = N.getOperand(0);
2393     Index = N.getOperand(1);
2394     return true;
2395   }
2396 
2397   // Otherwise, do it the hard way, using R0 as the base register.
2398   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2399                          N.getValueType());
2400   Index = N;
2401   return true;
2402 }
2403 
2404 /// Returns true if we should use a direct load into vector instruction
2405 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2406 static bool usePartialVectorLoads(SDNode *N) {
2407   if (!N->hasOneUse())
2408     return false;
2409 
2410   // If there are any other uses other than scalar to vector, then we should
2411   // keep it as a scalar load -> direct move pattern to prevent multiple
2412   // loads.  Currently, only check for i64 since we have lxsd/lfd to do this
2413   // efficiently, but no update equivalent.
2414   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2415     EVT MemVT = LD->getMemoryVT();
2416     if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) {
2417       SDNode *User = *(LD->use_begin());
2418       if (User->getOpcode() == ISD::SCALAR_TO_VECTOR)
2419         return true;
2420     }
2421   }
2422 
2423   return false;
2424 }
2425 
2426 /// getPreIndexedAddressParts - returns true by value, base pointer and
2427 /// offset pointer and addressing mode by reference if the node's address
2428 /// can be legally represented as pre-indexed load / store address.
2429 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2430                                                   SDValue &Offset,
2431                                                   ISD::MemIndexedMode &AM,
2432                                                   SelectionDAG &DAG) const {
2433   if (DisablePPCPreinc) return false;
2434 
2435   bool isLoad = true;
2436   SDValue Ptr;
2437   EVT VT;
2438   unsigned Alignment;
2439   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2440     Ptr = LD->getBasePtr();
2441     VT = LD->getMemoryVT();
2442     Alignment = LD->getAlignment();
2443   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2444     Ptr = ST->getBasePtr();
2445     VT  = ST->getMemoryVT();
2446     Alignment = ST->getAlignment();
2447     isLoad = false;
2448   } else
2449     return false;
2450 
2451   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2452   // instructions because we can fold these into a more efficient instruction
2453   // instead, (such as LXSD).
2454   if (isLoad && usePartialVectorLoads(N)) {
2455     return false;
2456   }
2457 
2458   // PowerPC doesn't have preinc load/store instructions for vectors (except
2459   // for QPX, which does have preinc r+r forms).
2460   if (VT.isVector()) {
2461     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2462       return false;
2463     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2464       AM = ISD::PRE_INC;
2465       return true;
2466     }
2467   }
2468 
2469   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2470     // Common code will reject creating a pre-inc form if the base pointer
2471     // is a frame index, or if N is a store and the base pointer is either
2472     // the same as or a predecessor of the value being stored.  Check for
2473     // those situations here, and try with swapped Base/Offset instead.
2474     bool Swap = false;
2475 
2476     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2477       Swap = true;
2478     else if (!isLoad) {
2479       SDValue Val = cast<StoreSDNode>(N)->getValue();
2480       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2481         Swap = true;
2482     }
2483 
2484     if (Swap)
2485       std::swap(Base, Offset);
2486 
2487     AM = ISD::PRE_INC;
2488     return true;
2489   }
2490 
2491   // LDU/STU can only handle immediates that are a multiple of 4.
2492   if (VT != MVT::i64) {
2493     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2494       return false;
2495   } else {
2496     // LDU/STU need an address with at least 4-byte alignment.
2497     if (Alignment < 4)
2498       return false;
2499 
2500     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2501       return false;
2502   }
2503 
2504   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2505     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2506     // sext i32 to i64 when addr mode is r+i.
2507     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2508         LD->getExtensionType() == ISD::SEXTLOAD &&
2509         isa<ConstantSDNode>(Offset))
2510       return false;
2511   }
2512 
2513   AM = ISD::PRE_INC;
2514   return true;
2515 }
2516 
2517 //===----------------------------------------------------------------------===//
2518 //  LowerOperation implementation
2519 //===----------------------------------------------------------------------===//
2520 
2521 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2522 /// and LoOpFlags to the target MO flags.
2523 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2524                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2525                                const GlobalValue *GV = nullptr) {
2526   HiOpFlags = PPCII::MO_HA;
2527   LoOpFlags = PPCII::MO_LO;
2528 
2529   // Don't use the pic base if not in PIC relocation model.
2530   if (IsPIC) {
2531     HiOpFlags |= PPCII::MO_PIC_FLAG;
2532     LoOpFlags |= PPCII::MO_PIC_FLAG;
2533   }
2534 
2535   // If this is a reference to a global value that requires a non-lazy-ptr, make
2536   // sure that instruction lowering adds it.
2537   if (GV && Subtarget.hasLazyResolverStub(GV)) {
2538     HiOpFlags |= PPCII::MO_NLP_FLAG;
2539     LoOpFlags |= PPCII::MO_NLP_FLAG;
2540 
2541     if (GV->hasHiddenVisibility()) {
2542       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2543       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2544     }
2545   }
2546 }
2547 
2548 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2549                              SelectionDAG &DAG) {
2550   SDLoc DL(HiPart);
2551   EVT PtrVT = HiPart.getValueType();
2552   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2553 
2554   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2555   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2556 
2557   // With PIC, the first instruction is actually "GR+hi(&G)".
2558   if (isPIC)
2559     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2560                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2561 
2562   // Generate non-pic code that has direct accesses to the constant pool.
2563   // The address of the global is just (hi(&g)+lo(&g)).
2564   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2565 }
2566 
2567 static void setUsesTOCBasePtr(MachineFunction &MF) {
2568   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2569   FuncInfo->setUsesTOCBasePtr();
2570 }
2571 
2572 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2573   setUsesTOCBasePtr(DAG.getMachineFunction());
2574 }
2575 
2576 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit,
2577                            SDValue GA) {
2578   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2579   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
2580                 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2581 
2582   SDValue Ops[] = { GA, Reg };
2583   return DAG.getMemIntrinsicNode(
2584       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2585       MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2586       MachineMemOperand::MOLoad);
2587 }
2588 
2589 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2590                                              SelectionDAG &DAG) const {
2591   EVT PtrVT = Op.getValueType();
2592   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2593   const Constant *C = CP->getConstVal();
2594 
2595   // 64-bit SVR4 ABI code is always position-independent.
2596   // The actual address of the GlobalValue is stored in the TOC.
2597   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2598     setUsesTOCBasePtr(DAG);
2599     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2600     return getTOCEntry(DAG, SDLoc(CP), true, GA);
2601   }
2602 
2603   unsigned MOHiFlag, MOLoFlag;
2604   bool IsPIC = isPositionIndependent();
2605   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2606 
2607   if (IsPIC && Subtarget.isSVR4ABI()) {
2608     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2609                                            PPCII::MO_PIC_FLAG);
2610     return getTOCEntry(DAG, SDLoc(CP), false, GA);
2611   }
2612 
2613   SDValue CPIHi =
2614     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2615   SDValue CPILo =
2616     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2617   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2618 }
2619 
2620 // For 64-bit PowerPC, prefer the more compact relative encodings.
2621 // This trades 32 bits per jump table entry for one or two instructions
2622 // on the jump site.
2623 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2624   if (isJumpTableRelative())
2625     return MachineJumpTableInfo::EK_LabelDifference32;
2626 
2627   return TargetLowering::getJumpTableEncoding();
2628 }
2629 
2630 bool PPCTargetLowering::isJumpTableRelative() const {
2631   if (Subtarget.isPPC64())
2632     return true;
2633   return TargetLowering::isJumpTableRelative();
2634 }
2635 
2636 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2637                                                     SelectionDAG &DAG) const {
2638   if (!Subtarget.isPPC64())
2639     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2640 
2641   switch (getTargetMachine().getCodeModel()) {
2642   case CodeModel::Small:
2643   case CodeModel::Medium:
2644     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2645   default:
2646     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2647                        getPointerTy(DAG.getDataLayout()));
2648   }
2649 }
2650 
2651 const MCExpr *
2652 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2653                                                 unsigned JTI,
2654                                                 MCContext &Ctx) const {
2655   if (!Subtarget.isPPC64())
2656     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2657 
2658   switch (getTargetMachine().getCodeModel()) {
2659   case CodeModel::Small:
2660   case CodeModel::Medium:
2661     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2662   default:
2663     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2664   }
2665 }
2666 
2667 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2668   EVT PtrVT = Op.getValueType();
2669   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2670 
2671   // 64-bit SVR4 ABI code is always position-independent.
2672   // The actual address of the GlobalValue is stored in the TOC.
2673   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2674     setUsesTOCBasePtr(DAG);
2675     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2676     return getTOCEntry(DAG, SDLoc(JT), true, GA);
2677   }
2678 
2679   unsigned MOHiFlag, MOLoFlag;
2680   bool IsPIC = isPositionIndependent();
2681   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2682 
2683   if (IsPIC && Subtarget.isSVR4ABI()) {
2684     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2685                                         PPCII::MO_PIC_FLAG);
2686     return getTOCEntry(DAG, SDLoc(GA), false, GA);
2687   }
2688 
2689   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2690   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2691   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2692 }
2693 
2694 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2695                                              SelectionDAG &DAG) const {
2696   EVT PtrVT = Op.getValueType();
2697   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2698   const BlockAddress *BA = BASDN->getBlockAddress();
2699 
2700   // 64-bit SVR4 ABI code is always position-independent.
2701   // The actual BlockAddress is stored in the TOC.
2702   if (Subtarget.isSVR4ABI() && isPositionIndependent()) {
2703     if (Subtarget.isPPC64())
2704       setUsesTOCBasePtr(DAG);
2705     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2706     return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA);
2707   }
2708 
2709   unsigned MOHiFlag, MOLoFlag;
2710   bool IsPIC = isPositionIndependent();
2711   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2712   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2713   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2714   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2715 }
2716 
2717 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2718                                               SelectionDAG &DAG) const {
2719   // FIXME: TLS addresses currently use medium model code sequences,
2720   // which is the most useful form.  Eventually support for small and
2721   // large models could be added if users need it, at the cost of
2722   // additional complexity.
2723   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2724   if (DAG.getTarget().useEmulatedTLS())
2725     return LowerToTLSEmulatedModel(GA, DAG);
2726 
2727   SDLoc dl(GA);
2728   const GlobalValue *GV = GA->getGlobal();
2729   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2730   bool is64bit = Subtarget.isPPC64();
2731   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2732   PICLevel::Level picLevel = M->getPICLevel();
2733 
2734   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
2735 
2736   if (Model == TLSModel::LocalExec) {
2737     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2738                                                PPCII::MO_TPREL_HA);
2739     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2740                                                PPCII::MO_TPREL_LO);
2741     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2742                              : DAG.getRegister(PPC::R2, MVT::i32);
2743 
2744     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2745     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2746   }
2747 
2748   if (Model == TLSModel::InitialExec) {
2749     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2750     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2751                                                 PPCII::MO_TLS);
2752     SDValue GOTPtr;
2753     if (is64bit) {
2754       setUsesTOCBasePtr(DAG);
2755       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2756       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2757                            PtrVT, GOTReg, TGA);
2758     } else
2759       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2760     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2761                                    PtrVT, TGA, GOTPtr);
2762     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2763   }
2764 
2765   if (Model == TLSModel::GeneralDynamic) {
2766     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2767     SDValue GOTPtr;
2768     if (is64bit) {
2769       setUsesTOCBasePtr(DAG);
2770       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2771       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2772                                    GOTReg, TGA);
2773     } else {
2774       if (picLevel == PICLevel::SmallPIC)
2775         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2776       else
2777         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2778     }
2779     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2780                        GOTPtr, TGA, TGA);
2781   }
2782 
2783   if (Model == TLSModel::LocalDynamic) {
2784     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2785     SDValue GOTPtr;
2786     if (is64bit) {
2787       setUsesTOCBasePtr(DAG);
2788       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2789       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2790                            GOTReg, TGA);
2791     } else {
2792       if (picLevel == PICLevel::SmallPIC)
2793         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2794       else
2795         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2796     }
2797     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2798                                   PtrVT, GOTPtr, TGA, TGA);
2799     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2800                                       PtrVT, TLSAddr, TGA);
2801     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2802   }
2803 
2804   llvm_unreachable("Unknown TLS model!");
2805 }
2806 
2807 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2808                                               SelectionDAG &DAG) const {
2809   EVT PtrVT = Op.getValueType();
2810   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2811   SDLoc DL(GSDN);
2812   const GlobalValue *GV = GSDN->getGlobal();
2813 
2814   // 64-bit SVR4 ABI code is always position-independent.
2815   // The actual address of the GlobalValue is stored in the TOC.
2816   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2817     setUsesTOCBasePtr(DAG);
2818     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2819     return getTOCEntry(DAG, DL, true, GA);
2820   }
2821 
2822   unsigned MOHiFlag, MOLoFlag;
2823   bool IsPIC = isPositionIndependent();
2824   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2825 
2826   if (IsPIC && Subtarget.isSVR4ABI()) {
2827     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
2828                                             GSDN->getOffset(),
2829                                             PPCII::MO_PIC_FLAG);
2830     return getTOCEntry(DAG, DL, false, GA);
2831   }
2832 
2833   SDValue GAHi =
2834     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
2835   SDValue GALo =
2836     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
2837 
2838   SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
2839 
2840   // If the global reference is actually to a non-lazy-pointer, we have to do an
2841   // extra load to get the address of the global.
2842   if (MOHiFlag & PPCII::MO_NLP_FLAG)
2843     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2844   return Ptr;
2845 }
2846 
2847 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2848   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2849   SDLoc dl(Op);
2850 
2851   if (Op.getValueType() == MVT::v2i64) {
2852     // When the operands themselves are v2i64 values, we need to do something
2853     // special because VSX has no underlying comparison operations for these.
2854     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
2855       // Equality can be handled by casting to the legal type for Altivec
2856       // comparisons, everything else needs to be expanded.
2857       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
2858         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
2859                  DAG.getSetCC(dl, MVT::v4i32,
2860                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
2861                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
2862                    CC));
2863       }
2864 
2865       return SDValue();
2866     }
2867 
2868     // We handle most of these in the usual way.
2869     return Op;
2870   }
2871 
2872   // If we're comparing for equality to zero, expose the fact that this is
2873   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
2874   // fold the new nodes.
2875   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
2876     return V;
2877 
2878   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2879     // Leave comparisons against 0 and -1 alone for now, since they're usually
2880     // optimized.  FIXME: revisit this when we can custom lower all setcc
2881     // optimizations.
2882     if (C->isAllOnesValue() || C->isNullValue())
2883       return SDValue();
2884   }
2885 
2886   // If we have an integer seteq/setne, turn it into a compare against zero
2887   // by xor'ing the rhs with the lhs, which is faster than setting a
2888   // condition register, reading it back out, and masking the correct bit.  The
2889   // normal approach here uses sub to do this instead of xor.  Using xor exposes
2890   // the result to other bit-twiddling opportunities.
2891   EVT LHSVT = Op.getOperand(0).getValueType();
2892   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2893     EVT VT = Op.getValueType();
2894     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
2895                                 Op.getOperand(1));
2896     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
2897   }
2898   return SDValue();
2899 }
2900 
2901 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2902   SDNode *Node = Op.getNode();
2903   EVT VT = Node->getValueType(0);
2904   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2905   SDValue InChain = Node->getOperand(0);
2906   SDValue VAListPtr = Node->getOperand(1);
2907   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2908   SDLoc dl(Node);
2909 
2910   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
2911 
2912   // gpr_index
2913   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2914                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
2915   InChain = GprIndex.getValue(1);
2916 
2917   if (VT == MVT::i64) {
2918     // Check if GprIndex is even
2919     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
2920                                  DAG.getConstant(1, dl, MVT::i32));
2921     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
2922                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
2923     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
2924                                           DAG.getConstant(1, dl, MVT::i32));
2925     // Align GprIndex to be even if it isn't
2926     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
2927                            GprIndex);
2928   }
2929 
2930   // fpr index is 1 byte after gpr
2931   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2932                                DAG.getConstant(1, dl, MVT::i32));
2933 
2934   // fpr
2935   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2936                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
2937   InChain = FprIndex.getValue(1);
2938 
2939   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2940                                        DAG.getConstant(8, dl, MVT::i32));
2941 
2942   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2943                                         DAG.getConstant(4, dl, MVT::i32));
2944 
2945   // areas
2946   SDValue OverflowArea =
2947       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
2948   InChain = OverflowArea.getValue(1);
2949 
2950   SDValue RegSaveArea =
2951       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
2952   InChain = RegSaveArea.getValue(1);
2953 
2954   // select overflow_area if index > 8
2955   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
2956                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
2957 
2958   // adjustment constant gpr_index * 4/8
2959   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
2960                                     VT.isInteger() ? GprIndex : FprIndex,
2961                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
2962                                                     MVT::i32));
2963 
2964   // OurReg = RegSaveArea + RegConstant
2965   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
2966                                RegConstant);
2967 
2968   // Floating types are 32 bytes into RegSaveArea
2969   if (VT.isFloatingPoint())
2970     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
2971                          DAG.getConstant(32, dl, MVT::i32));
2972 
2973   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
2974   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2975                                    VT.isInteger() ? GprIndex : FprIndex,
2976                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
2977                                                    MVT::i32));
2978 
2979   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
2980                               VT.isInteger() ? VAListPtr : FprPtr,
2981                               MachinePointerInfo(SV), MVT::i8);
2982 
2983   // determine if we should load from reg_save_area or overflow_area
2984   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
2985 
2986   // increase overflow_area by 4/8 if gpr/fpr > 8
2987   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
2988                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
2989                                           dl, MVT::i32));
2990 
2991   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
2992                              OverflowAreaPlusN);
2993 
2994   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
2995                               MachinePointerInfo(), MVT::i32);
2996 
2997   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
2998 }
2999 
3000 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3001   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3002 
3003   // We have to copy the entire va_list struct:
3004   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3005   return DAG.getMemcpy(Op.getOperand(0), Op,
3006                        Op.getOperand(1), Op.getOperand(2),
3007                        DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
3008                        false, MachinePointerInfo(), MachinePointerInfo());
3009 }
3010 
3011 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3012                                                   SelectionDAG &DAG) const {
3013   return Op.getOperand(0);
3014 }
3015 
3016 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3017                                                 SelectionDAG &DAG) const {
3018   SDValue Chain = Op.getOperand(0);
3019   SDValue Trmp = Op.getOperand(1); // trampoline
3020   SDValue FPtr = Op.getOperand(2); // nested function
3021   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3022   SDLoc dl(Op);
3023 
3024   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3025   bool isPPC64 = (PtrVT == MVT::i64);
3026   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3027 
3028   TargetLowering::ArgListTy Args;
3029   TargetLowering::ArgListEntry Entry;
3030 
3031   Entry.Ty = IntPtrTy;
3032   Entry.Node = Trmp; Args.push_back(Entry);
3033 
3034   // TrampSize == (isPPC64 ? 48 : 40);
3035   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3036                                isPPC64 ? MVT::i64 : MVT::i32);
3037   Args.push_back(Entry);
3038 
3039   Entry.Node = FPtr; Args.push_back(Entry);
3040   Entry.Node = Nest; Args.push_back(Entry);
3041 
3042   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3043   TargetLowering::CallLoweringInfo CLI(DAG);
3044   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3045       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3046       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3047 
3048   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3049   return CallResult.second;
3050 }
3051 
3052 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3053   MachineFunction &MF = DAG.getMachineFunction();
3054   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3055   EVT PtrVT = getPointerTy(MF.getDataLayout());
3056 
3057   SDLoc dl(Op);
3058 
3059   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
3060     // vastart just stores the address of the VarArgsFrameIndex slot into the
3061     // memory location argument.
3062     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3063     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3064     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3065                         MachinePointerInfo(SV));
3066   }
3067 
3068   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3069   // We suppose the given va_list is already allocated.
3070   //
3071   // typedef struct {
3072   //  char gpr;     /* index into the array of 8 GPRs
3073   //                 * stored in the register save area
3074   //                 * gpr=0 corresponds to r3,
3075   //                 * gpr=1 to r4, etc.
3076   //                 */
3077   //  char fpr;     /* index into the array of 8 FPRs
3078   //                 * stored in the register save area
3079   //                 * fpr=0 corresponds to f1,
3080   //                 * fpr=1 to f2, etc.
3081   //                 */
3082   //  char *overflow_arg_area;
3083   //                /* location on stack that holds
3084   //                 * the next overflow argument
3085   //                 */
3086   //  char *reg_save_area;
3087   //               /* where r3:r10 and f1:f8 (if saved)
3088   //                * are stored
3089   //                */
3090   // } va_list[1];
3091 
3092   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3093   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3094   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3095                                             PtrVT);
3096   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3097                                  PtrVT);
3098 
3099   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3100   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3101 
3102   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3103   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3104 
3105   uint64_t FPROffset = 1;
3106   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3107 
3108   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3109 
3110   // Store first byte : number of int regs
3111   SDValue firstStore =
3112       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3113                         MachinePointerInfo(SV), MVT::i8);
3114   uint64_t nextOffset = FPROffset;
3115   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3116                                   ConstFPROffset);
3117 
3118   // Store second byte : number of float regs
3119   SDValue secondStore =
3120       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3121                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3122   nextOffset += StackOffset;
3123   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3124 
3125   // Store second word : arguments given on stack
3126   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3127                                     MachinePointerInfo(SV, nextOffset));
3128   nextOffset += FrameOffset;
3129   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3130 
3131   // Store third word : arguments given in registers
3132   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3133                       MachinePointerInfo(SV, nextOffset));
3134 }
3135 
3136 #include "PPCGenCallingConv.inc"
3137 
3138 // Function whose sole purpose is to kill compiler warnings
3139 // stemming from unused functions included from PPCGenCallingConv.inc.
3140 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
3141   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
3142 }
3143 
3144 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
3145                                       CCValAssign::LocInfo &LocInfo,
3146                                       ISD::ArgFlagsTy &ArgFlags,
3147                                       CCState &State) {
3148   return true;
3149 }
3150 
3151 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
3152                                              MVT &LocVT,
3153                                              CCValAssign::LocInfo &LocInfo,
3154                                              ISD::ArgFlagsTy &ArgFlags,
3155                                              CCState &State) {
3156   static const MCPhysReg ArgRegs[] = {
3157     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3158     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3159   };
3160   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3161 
3162   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3163 
3164   // Skip one register if the first unallocated register has an even register
3165   // number and there are still argument registers available which have not been
3166   // allocated yet. RegNum is actually an index into ArgRegs, which means we
3167   // need to skip a register if RegNum is odd.
3168   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
3169     State.AllocateReg(ArgRegs[RegNum]);
3170   }
3171 
3172   // Always return false here, as this function only makes sure that the first
3173   // unallocated register has an odd register number and does not actually
3174   // allocate a register for the current argument.
3175   return false;
3176 }
3177 
3178 bool
3179 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
3180                                                   MVT &LocVT,
3181                                                   CCValAssign::LocInfo &LocInfo,
3182                                                   ISD::ArgFlagsTy &ArgFlags,
3183                                                   CCState &State) {
3184   static const MCPhysReg ArgRegs[] = {
3185     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3186     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3187   };
3188   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3189 
3190   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3191   int RegsLeft = NumArgRegs - RegNum;
3192 
3193   // Skip if there is not enough registers left for long double type (4 gpr regs
3194   // in soft float mode) and put long double argument on the stack.
3195   if (RegNum != NumArgRegs && RegsLeft < 4) {
3196     for (int i = 0; i < RegsLeft; i++) {
3197       State.AllocateReg(ArgRegs[RegNum + i]);
3198     }
3199   }
3200 
3201   return false;
3202 }
3203 
3204 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
3205                                                MVT &LocVT,
3206                                                CCValAssign::LocInfo &LocInfo,
3207                                                ISD::ArgFlagsTy &ArgFlags,
3208                                                CCState &State) {
3209   static const MCPhysReg ArgRegs[] = {
3210     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3211     PPC::F8
3212   };
3213 
3214   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3215 
3216   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3217 
3218   // If there is only one Floating-point register left we need to put both f64
3219   // values of a split ppc_fp128 value on the stack.
3220   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
3221     State.AllocateReg(ArgRegs[RegNum]);
3222   }
3223 
3224   // Always return false here, as this function only makes sure that the two f64
3225   // values a ppc_fp128 value is split into are both passed in registers or both
3226   // passed on the stack and does not actually allocate a register for the
3227   // current argument.
3228   return false;
3229 }
3230 
3231 /// FPR - The set of FP registers that should be allocated for arguments,
3232 /// on Darwin.
3233 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3234                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3235                                 PPC::F11, PPC::F12, PPC::F13};
3236 
3237 /// QFPR - The set of QPX registers that should be allocated for arguments.
3238 static const MCPhysReg QFPR[] = {
3239     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3240     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3241 
3242 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3243 /// the stack.
3244 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3245                                        unsigned PtrByteSize) {
3246   unsigned ArgSize = ArgVT.getStoreSize();
3247   if (Flags.isByVal())
3248     ArgSize = Flags.getByValSize();
3249 
3250   // Round up to multiples of the pointer size, except for array members,
3251   // which are always packed.
3252   if (!Flags.isInConsecutiveRegs())
3253     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3254 
3255   return ArgSize;
3256 }
3257 
3258 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3259 /// on the stack.
3260 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3261                                             ISD::ArgFlagsTy Flags,
3262                                             unsigned PtrByteSize) {
3263   unsigned Align = PtrByteSize;
3264 
3265   // Altivec parameters are padded to a 16 byte boundary.
3266   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3267       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3268       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3269       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3270     Align = 16;
3271   // QPX vector types stored in double-precision are padded to a 32 byte
3272   // boundary.
3273   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3274     Align = 32;
3275 
3276   // ByVal parameters are aligned as requested.
3277   if (Flags.isByVal()) {
3278     unsigned BVAlign = Flags.getByValAlign();
3279     if (BVAlign > PtrByteSize) {
3280       if (BVAlign % PtrByteSize != 0)
3281           llvm_unreachable(
3282             "ByVal alignment is not a multiple of the pointer size");
3283 
3284       Align = BVAlign;
3285     }
3286   }
3287 
3288   // Array members are always packed to their original alignment.
3289   if (Flags.isInConsecutiveRegs()) {
3290     // If the array member was split into multiple registers, the first
3291     // needs to be aligned to the size of the full type.  (Except for
3292     // ppcf128, which is only aligned as its f64 components.)
3293     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3294       Align = OrigVT.getStoreSize();
3295     else
3296       Align = ArgVT.getStoreSize();
3297   }
3298 
3299   return Align;
3300 }
3301 
3302 /// CalculateStackSlotUsed - Return whether this argument will use its
3303 /// stack slot (instead of being passed in registers).  ArgOffset,
3304 /// AvailableFPRs, and AvailableVRs must hold the current argument
3305 /// position, and will be updated to account for this argument.
3306 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3307                                    ISD::ArgFlagsTy Flags,
3308                                    unsigned PtrByteSize,
3309                                    unsigned LinkageSize,
3310                                    unsigned ParamAreaSize,
3311                                    unsigned &ArgOffset,
3312                                    unsigned &AvailableFPRs,
3313                                    unsigned &AvailableVRs, bool HasQPX) {
3314   bool UseMemory = false;
3315 
3316   // Respect alignment of argument on the stack.
3317   unsigned Align =
3318     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3319   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3320   // If there's no space left in the argument save area, we must
3321   // use memory (this check also catches zero-sized arguments).
3322   if (ArgOffset >= LinkageSize + ParamAreaSize)
3323     UseMemory = true;
3324 
3325   // Allocate argument on the stack.
3326   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3327   if (Flags.isInConsecutiveRegsLast())
3328     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3329   // If we overran the argument save area, we must use memory
3330   // (this check catches arguments passed partially in memory)
3331   if (ArgOffset > LinkageSize + ParamAreaSize)
3332     UseMemory = true;
3333 
3334   // However, if the argument is actually passed in an FPR or a VR,
3335   // we don't use memory after all.
3336   if (!Flags.isByVal()) {
3337     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3338         // QPX registers overlap with the scalar FP registers.
3339         (HasQPX && (ArgVT == MVT::v4f32 ||
3340                     ArgVT == MVT::v4f64 ||
3341                     ArgVT == MVT::v4i1)))
3342       if (AvailableFPRs > 0) {
3343         --AvailableFPRs;
3344         return false;
3345       }
3346     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3347         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3348         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3349         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3350       if (AvailableVRs > 0) {
3351         --AvailableVRs;
3352         return false;
3353       }
3354   }
3355 
3356   return UseMemory;
3357 }
3358 
3359 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3360 /// ensure minimum alignment required for target.
3361 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3362                                      unsigned NumBytes) {
3363   unsigned TargetAlign = Lowering->getStackAlignment();
3364   unsigned AlignMask = TargetAlign - 1;
3365   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3366   return NumBytes;
3367 }
3368 
3369 SDValue PPCTargetLowering::LowerFormalArguments(
3370     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3371     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3372     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3373   if (Subtarget.isSVR4ABI()) {
3374     if (Subtarget.isPPC64())
3375       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
3376                                          dl, DAG, InVals);
3377     else
3378       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
3379                                          dl, DAG, InVals);
3380   } else {
3381     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
3382                                        dl, DAG, InVals);
3383   }
3384 }
3385 
3386 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3387     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3388     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3389     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3390 
3391   // 32-bit SVR4 ABI Stack Frame Layout:
3392   //              +-----------------------------------+
3393   //        +-->  |            Back chain             |
3394   //        |     +-----------------------------------+
3395   //        |     | Floating-point register save area |
3396   //        |     +-----------------------------------+
3397   //        |     |    General register save area     |
3398   //        |     +-----------------------------------+
3399   //        |     |          CR save word             |
3400   //        |     +-----------------------------------+
3401   //        |     |         VRSAVE save word          |
3402   //        |     +-----------------------------------+
3403   //        |     |         Alignment padding         |
3404   //        |     +-----------------------------------+
3405   //        |     |     Vector register save area     |
3406   //        |     +-----------------------------------+
3407   //        |     |       Local variable space        |
3408   //        |     +-----------------------------------+
3409   //        |     |        Parameter list area        |
3410   //        |     +-----------------------------------+
3411   //        |     |           LR save word            |
3412   //        |     +-----------------------------------+
3413   // SP-->  +---  |            Back chain             |
3414   //              +-----------------------------------+
3415   //
3416   // Specifications:
3417   //   System V Application Binary Interface PowerPC Processor Supplement
3418   //   AltiVec Technology Programming Interface Manual
3419 
3420   MachineFunction &MF = DAG.getMachineFunction();
3421   MachineFrameInfo &MFI = MF.getFrameInfo();
3422   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3423 
3424   EVT PtrVT = getPointerTy(MF.getDataLayout());
3425   // Potential tail calls could cause overwriting of argument stack slots.
3426   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3427                        (CallConv == CallingConv::Fast));
3428   unsigned PtrByteSize = 4;
3429 
3430   // Assign locations to all of the incoming arguments.
3431   SmallVector<CCValAssign, 16> ArgLocs;
3432   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3433                  *DAG.getContext());
3434 
3435   // Reserve space for the linkage area on the stack.
3436   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3437   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3438   if (useSoftFloat() || hasSPE())
3439     CCInfo.PreAnalyzeFormalArguments(Ins);
3440 
3441   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3442   CCInfo.clearWasPPCF128();
3443 
3444   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3445     CCValAssign &VA = ArgLocs[i];
3446 
3447     // Arguments stored in registers.
3448     if (VA.isRegLoc()) {
3449       const TargetRegisterClass *RC;
3450       EVT ValVT = VA.getValVT();
3451 
3452       switch (ValVT.getSimpleVT().SimpleTy) {
3453         default:
3454           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3455         case MVT::i1:
3456         case MVT::i32:
3457           RC = &PPC::GPRCRegClass;
3458           break;
3459         case MVT::f32:
3460           if (Subtarget.hasP8Vector())
3461             RC = &PPC::VSSRCRegClass;
3462           else if (Subtarget.hasSPE())
3463             RC = &PPC::SPE4RCRegClass;
3464           else
3465             RC = &PPC::F4RCRegClass;
3466           break;
3467         case MVT::f64:
3468           if (Subtarget.hasVSX())
3469             RC = &PPC::VSFRCRegClass;
3470           else if (Subtarget.hasSPE())
3471             RC = &PPC::SPERCRegClass;
3472           else
3473             RC = &PPC::F8RCRegClass;
3474           break;
3475         case MVT::v16i8:
3476         case MVT::v8i16:
3477         case MVT::v4i32:
3478           RC = &PPC::VRRCRegClass;
3479           break;
3480         case MVT::v4f32:
3481           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3482           break;
3483         case MVT::v2f64:
3484         case MVT::v2i64:
3485           RC = &PPC::VRRCRegClass;
3486           break;
3487         case MVT::v4f64:
3488           RC = &PPC::QFRCRegClass;
3489           break;
3490         case MVT::v4i1:
3491           RC = &PPC::QBRCRegClass;
3492           break;
3493       }
3494 
3495       // Transform the arguments stored in physical registers into virtual ones.
3496       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3497       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3498                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
3499 
3500       if (ValVT == MVT::i1)
3501         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3502 
3503       InVals.push_back(ArgValue);
3504     } else {
3505       // Argument stored in memory.
3506       assert(VA.isMemLoc());
3507 
3508       unsigned ArgSize = VA.getLocVT().getStoreSize();
3509       int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(),
3510                                      isImmutable);
3511 
3512       // Create load nodes to retrieve arguments from the stack.
3513       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3514       InVals.push_back(
3515           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3516     }
3517   }
3518 
3519   // Assign locations to all of the incoming aggregate by value arguments.
3520   // Aggregates passed by value are stored in the local variable space of the
3521   // caller's stack frame, right above the parameter list area.
3522   SmallVector<CCValAssign, 16> ByValArgLocs;
3523   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3524                       ByValArgLocs, *DAG.getContext());
3525 
3526   // Reserve stack space for the allocations in CCInfo.
3527   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3528 
3529   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3530 
3531   // Area that is at least reserved in the caller of this function.
3532   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3533   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3534 
3535   // Set the size that is at least reserved in caller of this function.  Tail
3536   // call optimized function's reserved stack space needs to be aligned so that
3537   // taking the difference between two stack areas will result in an aligned
3538   // stack.
3539   MinReservedArea =
3540       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3541   FuncInfo->setMinReservedArea(MinReservedArea);
3542 
3543   SmallVector<SDValue, 8> MemOps;
3544 
3545   // If the function takes variable number of arguments, make a frame index for
3546   // the start of the first vararg value... for expansion of llvm.va_start.
3547   if (isVarArg) {
3548     static const MCPhysReg GPArgRegs[] = {
3549       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3550       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3551     };
3552     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3553 
3554     static const MCPhysReg FPArgRegs[] = {
3555       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3556       PPC::F8
3557     };
3558     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3559 
3560     if (useSoftFloat() || hasSPE())
3561        NumFPArgRegs = 0;
3562 
3563     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3564     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3565 
3566     // Make room for NumGPArgRegs and NumFPArgRegs.
3567     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3568                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3569 
3570     FuncInfo->setVarArgsStackOffset(
3571       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3572                             CCInfo.getNextStackOffset(), true));
3573 
3574     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3575     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3576 
3577     // The fixed integer arguments of a variadic function are stored to the
3578     // VarArgsFrameIndex on the stack so that they may be loaded by
3579     // dereferencing the result of va_next.
3580     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3581       // Get an existing live-in vreg, or add a new one.
3582       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3583       if (!VReg)
3584         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3585 
3586       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3587       SDValue Store =
3588           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3589       MemOps.push_back(Store);
3590       // Increment the address by four for the next argument to store
3591       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3592       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3593     }
3594 
3595     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3596     // is set.
3597     // The double arguments are stored to the VarArgsFrameIndex
3598     // on the stack.
3599     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3600       // Get an existing live-in vreg, or add a new one.
3601       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3602       if (!VReg)
3603         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3604 
3605       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3606       SDValue Store =
3607           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3608       MemOps.push_back(Store);
3609       // Increment the address by eight for the next argument to store
3610       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3611                                          PtrVT);
3612       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3613     }
3614   }
3615 
3616   if (!MemOps.empty())
3617     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3618 
3619   return Chain;
3620 }
3621 
3622 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3623 // value to MVT::i64 and then truncate to the correct register size.
3624 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3625                                              EVT ObjectVT, SelectionDAG &DAG,
3626                                              SDValue ArgVal,
3627                                              const SDLoc &dl) const {
3628   if (Flags.isSExt())
3629     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3630                          DAG.getValueType(ObjectVT));
3631   else if (Flags.isZExt())
3632     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3633                          DAG.getValueType(ObjectVT));
3634 
3635   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3636 }
3637 
3638 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3639     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3640     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3641     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3642   // TODO: add description of PPC stack frame format, or at least some docs.
3643   //
3644   bool isELFv2ABI = Subtarget.isELFv2ABI();
3645   bool isLittleEndian = Subtarget.isLittleEndian();
3646   MachineFunction &MF = DAG.getMachineFunction();
3647   MachineFrameInfo &MFI = MF.getFrameInfo();
3648   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3649 
3650   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3651          "fastcc not supported on varargs functions");
3652 
3653   EVT PtrVT = getPointerTy(MF.getDataLayout());
3654   // Potential tail calls could cause overwriting of argument stack slots.
3655   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3656                        (CallConv == CallingConv::Fast));
3657   unsigned PtrByteSize = 8;
3658   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3659 
3660   static const MCPhysReg GPR[] = {
3661     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3662     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3663   };
3664   static const MCPhysReg VR[] = {
3665     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3666     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3667   };
3668 
3669   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3670   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3671   const unsigned Num_VR_Regs  = array_lengthof(VR);
3672   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3673 
3674   // Do a first pass over the arguments to determine whether the ABI
3675   // guarantees that our caller has allocated the parameter save area
3676   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3677   // in the ELFv2 ABI, it is true if this is a vararg function or if
3678   // any parameter is located in a stack slot.
3679 
3680   bool HasParameterArea = !isELFv2ABI || isVarArg;
3681   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3682   unsigned NumBytes = LinkageSize;
3683   unsigned AvailableFPRs = Num_FPR_Regs;
3684   unsigned AvailableVRs = Num_VR_Regs;
3685   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3686     if (Ins[i].Flags.isNest())
3687       continue;
3688 
3689     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3690                                PtrByteSize, LinkageSize, ParamAreaSize,
3691                                NumBytes, AvailableFPRs, AvailableVRs,
3692                                Subtarget.hasQPX()))
3693       HasParameterArea = true;
3694   }
3695 
3696   // Add DAG nodes to load the arguments or copy them out of registers.  On
3697   // entry to a function on PPC, the arguments start after the linkage area,
3698   // although the first ones are often in registers.
3699 
3700   unsigned ArgOffset = LinkageSize;
3701   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3702   unsigned &QFPR_idx = FPR_idx;
3703   SmallVector<SDValue, 8> MemOps;
3704   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3705   unsigned CurArgIdx = 0;
3706   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3707     SDValue ArgVal;
3708     bool needsLoad = false;
3709     EVT ObjectVT = Ins[ArgNo].VT;
3710     EVT OrigVT = Ins[ArgNo].ArgVT;
3711     unsigned ObjSize = ObjectVT.getStoreSize();
3712     unsigned ArgSize = ObjSize;
3713     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3714     if (Ins[ArgNo].isOrigArg()) {
3715       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3716       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3717     }
3718     // We re-align the argument offset for each argument, except when using the
3719     // fast calling convention, when we need to make sure we do that only when
3720     // we'll actually use a stack slot.
3721     unsigned CurArgOffset, Align;
3722     auto ComputeArgOffset = [&]() {
3723       /* Respect alignment of argument on the stack.  */
3724       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3725       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3726       CurArgOffset = ArgOffset;
3727     };
3728 
3729     if (CallConv != CallingConv::Fast) {
3730       ComputeArgOffset();
3731 
3732       /* Compute GPR index associated with argument offset.  */
3733       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3734       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3735     }
3736 
3737     // FIXME the codegen can be much improved in some cases.
3738     // We do not have to keep everything in memory.
3739     if (Flags.isByVal()) {
3740       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3741 
3742       if (CallConv == CallingConv::Fast)
3743         ComputeArgOffset();
3744 
3745       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3746       ObjSize = Flags.getByValSize();
3747       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3748       // Empty aggregate parameters do not take up registers.  Examples:
3749       //   struct { } a;
3750       //   union  { } b;
3751       //   int c[0];
3752       // etc.  However, we have to provide a place-holder in InVals, so
3753       // pretend we have an 8-byte item at the current address for that
3754       // purpose.
3755       if (!ObjSize) {
3756         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3757         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3758         InVals.push_back(FIN);
3759         continue;
3760       }
3761 
3762       // Create a stack object covering all stack doublewords occupied
3763       // by the argument.  If the argument is (fully or partially) on
3764       // the stack, or if the argument is fully in registers but the
3765       // caller has allocated the parameter save anyway, we can refer
3766       // directly to the caller's stack frame.  Otherwise, create a
3767       // local copy in our own frame.
3768       int FI;
3769       if (HasParameterArea ||
3770           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3771         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3772       else
3773         FI = MFI.CreateStackObject(ArgSize, Align, false);
3774       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3775 
3776       // Handle aggregates smaller than 8 bytes.
3777       if (ObjSize < PtrByteSize) {
3778         // The value of the object is its address, which differs from the
3779         // address of the enclosing doubleword on big-endian systems.
3780         SDValue Arg = FIN;
3781         if (!isLittleEndian) {
3782           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3783           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3784         }
3785         InVals.push_back(Arg);
3786 
3787         if (GPR_idx != Num_GPR_Regs) {
3788           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3789           FuncInfo->addLiveInAttr(VReg, Flags);
3790           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3791           SDValue Store;
3792 
3793           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3794             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3795                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3796             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3797                                       MachinePointerInfo(&*FuncArg), ObjType);
3798           } else {
3799             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3800             // store the whole register as-is to the parameter save area
3801             // slot.
3802             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3803                                  MachinePointerInfo(&*FuncArg));
3804           }
3805 
3806           MemOps.push_back(Store);
3807         }
3808         // Whether we copied from a register or not, advance the offset
3809         // into the parameter save area by a full doubleword.
3810         ArgOffset += PtrByteSize;
3811         continue;
3812       }
3813 
3814       // The value of the object is its address, which is the address of
3815       // its first stack doubleword.
3816       InVals.push_back(FIN);
3817 
3818       // Store whatever pieces of the object are in registers to memory.
3819       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3820         if (GPR_idx == Num_GPR_Regs)
3821           break;
3822 
3823         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3824         FuncInfo->addLiveInAttr(VReg, Flags);
3825         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3826         SDValue Addr = FIN;
3827         if (j) {
3828           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3829           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3830         }
3831         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3832                                      MachinePointerInfo(&*FuncArg, j));
3833         MemOps.push_back(Store);
3834         ++GPR_idx;
3835       }
3836       ArgOffset += ArgSize;
3837       continue;
3838     }
3839 
3840     switch (ObjectVT.getSimpleVT().SimpleTy) {
3841     default: llvm_unreachable("Unhandled argument type!");
3842     case MVT::i1:
3843     case MVT::i32:
3844     case MVT::i64:
3845       if (Flags.isNest()) {
3846         // The 'nest' parameter, if any, is passed in R11.
3847         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3848         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3849 
3850         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3851           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3852 
3853         break;
3854       }
3855 
3856       // These can be scalar arguments or elements of an integer array type
3857       // passed directly.  Clang may use those instead of "byval" aggregate
3858       // types to avoid forcing arguments to memory unnecessarily.
3859       if (GPR_idx != Num_GPR_Regs) {
3860         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3861         FuncInfo->addLiveInAttr(VReg, Flags);
3862         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3863 
3864         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3865           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3866           // value to MVT::i64 and then truncate to the correct register size.
3867           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3868       } else {
3869         if (CallConv == CallingConv::Fast)
3870           ComputeArgOffset();
3871 
3872         needsLoad = true;
3873         ArgSize = PtrByteSize;
3874       }
3875       if (CallConv != CallingConv::Fast || needsLoad)
3876         ArgOffset += 8;
3877       break;
3878 
3879     case MVT::f32:
3880     case MVT::f64:
3881       // These can be scalar arguments or elements of a float array type
3882       // passed directly.  The latter are used to implement ELFv2 homogenous
3883       // float aggregates.
3884       if (FPR_idx != Num_FPR_Regs) {
3885         unsigned VReg;
3886 
3887         if (ObjectVT == MVT::f32)
3888           VReg = MF.addLiveIn(FPR[FPR_idx],
3889                               Subtarget.hasP8Vector()
3890                                   ? &PPC::VSSRCRegClass
3891                                   : &PPC::F4RCRegClass);
3892         else
3893           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3894                                                 ? &PPC::VSFRCRegClass
3895                                                 : &PPC::F8RCRegClass);
3896 
3897         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3898         ++FPR_idx;
3899       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
3900         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
3901         // once we support fp <-> gpr moves.
3902 
3903         // This can only ever happen in the presence of f32 array types,
3904         // since otherwise we never run out of FPRs before running out
3905         // of GPRs.
3906         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3907         FuncInfo->addLiveInAttr(VReg, Flags);
3908         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3909 
3910         if (ObjectVT == MVT::f32) {
3911           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3912             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
3913                                  DAG.getConstant(32, dl, MVT::i32));
3914           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
3915         }
3916 
3917         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
3918       } else {
3919         if (CallConv == CallingConv::Fast)
3920           ComputeArgOffset();
3921 
3922         needsLoad = true;
3923       }
3924 
3925       // When passing an array of floats, the array occupies consecutive
3926       // space in the argument area; only round up to the next doubleword
3927       // at the end of the array.  Otherwise, each float takes 8 bytes.
3928       if (CallConv != CallingConv::Fast || needsLoad) {
3929         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
3930         ArgOffset += ArgSize;
3931         if (Flags.isInConsecutiveRegsLast())
3932           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3933       }
3934       break;
3935     case MVT::v4f32:
3936     case MVT::v4i32:
3937     case MVT::v8i16:
3938     case MVT::v16i8:
3939     case MVT::v2f64:
3940     case MVT::v2i64:
3941     case MVT::v1i128:
3942     case MVT::f128:
3943       if (!Subtarget.hasQPX()) {
3944         // These can be scalar arguments or elements of a vector array type
3945         // passed directly.  The latter are used to implement ELFv2 homogenous
3946         // vector aggregates.
3947         if (VR_idx != Num_VR_Regs) {
3948           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3949           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3950           ++VR_idx;
3951         } else {
3952           if (CallConv == CallingConv::Fast)
3953             ComputeArgOffset();
3954           needsLoad = true;
3955         }
3956         if (CallConv != CallingConv::Fast || needsLoad)
3957           ArgOffset += 16;
3958         break;
3959       } // not QPX
3960 
3961       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
3962              "Invalid QPX parameter type");
3963       /* fall through */
3964 
3965     case MVT::v4f64:
3966     case MVT::v4i1:
3967       // QPX vectors are treated like their scalar floating-point subregisters
3968       // (except that they're larger).
3969       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
3970       if (QFPR_idx != Num_QFPR_Regs) {
3971         const TargetRegisterClass *RC;
3972         switch (ObjectVT.getSimpleVT().SimpleTy) {
3973         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
3974         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
3975         default:         RC = &PPC::QBRCRegClass; break;
3976         }
3977 
3978         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
3979         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3980         ++QFPR_idx;
3981       } else {
3982         if (CallConv == CallingConv::Fast)
3983           ComputeArgOffset();
3984         needsLoad = true;
3985       }
3986       if (CallConv != CallingConv::Fast || needsLoad)
3987         ArgOffset += Sz;
3988       break;
3989     }
3990 
3991     // We need to load the argument to a virtual register if we determined
3992     // above that we ran out of physical registers of the appropriate type.
3993     if (needsLoad) {
3994       if (ObjSize < ArgSize && !isLittleEndian)
3995         CurArgOffset += ArgSize - ObjSize;
3996       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
3997       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3998       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
3999     }
4000 
4001     InVals.push_back(ArgVal);
4002   }
4003 
4004   // Area that is at least reserved in the caller of this function.
4005   unsigned MinReservedArea;
4006   if (HasParameterArea)
4007     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4008   else
4009     MinReservedArea = LinkageSize;
4010 
4011   // Set the size that is at least reserved in caller of this function.  Tail
4012   // call optimized functions' reserved stack space needs to be aligned so that
4013   // taking the difference between two stack areas will result in an aligned
4014   // stack.
4015   MinReservedArea =
4016       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4017   FuncInfo->setMinReservedArea(MinReservedArea);
4018 
4019   // If the function takes variable number of arguments, make a frame index for
4020   // the start of the first vararg value... for expansion of llvm.va_start.
4021   if (isVarArg) {
4022     int Depth = ArgOffset;
4023 
4024     FuncInfo->setVarArgsFrameIndex(
4025       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4026     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4027 
4028     // If this function is vararg, store any remaining integer argument regs
4029     // to their spots on the stack so that they may be loaded by dereferencing
4030     // the result of va_next.
4031     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4032          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4033       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4034       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4035       SDValue Store =
4036           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4037       MemOps.push_back(Store);
4038       // Increment the address by four for the next argument to store
4039       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4040       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4041     }
4042   }
4043 
4044   if (!MemOps.empty())
4045     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4046 
4047   return Chain;
4048 }
4049 
4050 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4051     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4052     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4053     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4054   // TODO: add description of PPC stack frame format, or at least some docs.
4055   //
4056   MachineFunction &MF = DAG.getMachineFunction();
4057   MachineFrameInfo &MFI = MF.getFrameInfo();
4058   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4059 
4060   EVT PtrVT = getPointerTy(MF.getDataLayout());
4061   bool isPPC64 = PtrVT == MVT::i64;
4062   // Potential tail calls could cause overwriting of argument stack slots.
4063   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4064                        (CallConv == CallingConv::Fast));
4065   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4066   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4067   unsigned ArgOffset = LinkageSize;
4068   // Area that is at least reserved in caller of this function.
4069   unsigned MinReservedArea = ArgOffset;
4070 
4071   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4072     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4073     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4074   };
4075   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4076     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4077     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4078   };
4079   static const MCPhysReg VR[] = {
4080     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4081     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4082   };
4083 
4084   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4085   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4086   const unsigned Num_VR_Regs  = array_lengthof( VR);
4087 
4088   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4089 
4090   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4091 
4092   // In 32-bit non-varargs functions, the stack space for vectors is after the
4093   // stack space for non-vectors.  We do not use this space unless we have
4094   // too many vectors to fit in registers, something that only occurs in
4095   // constructed examples:), but we have to walk the arglist to figure
4096   // that out...for the pathological case, compute VecArgOffset as the
4097   // start of the vector parameter area.  Computing VecArgOffset is the
4098   // entire point of the following loop.
4099   unsigned VecArgOffset = ArgOffset;
4100   if (!isVarArg && !isPPC64) {
4101     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4102          ++ArgNo) {
4103       EVT ObjectVT = Ins[ArgNo].VT;
4104       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4105 
4106       if (Flags.isByVal()) {
4107         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4108         unsigned ObjSize = Flags.getByValSize();
4109         unsigned ArgSize =
4110                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4111         VecArgOffset += ArgSize;
4112         continue;
4113       }
4114 
4115       switch(ObjectVT.getSimpleVT().SimpleTy) {
4116       default: llvm_unreachable("Unhandled argument type!");
4117       case MVT::i1:
4118       case MVT::i32:
4119       case MVT::f32:
4120         VecArgOffset += 4;
4121         break;
4122       case MVT::i64:  // PPC64
4123       case MVT::f64:
4124         // FIXME: We are guaranteed to be !isPPC64 at this point.
4125         // Does MVT::i64 apply?
4126         VecArgOffset += 8;
4127         break;
4128       case MVT::v4f32:
4129       case MVT::v4i32:
4130       case MVT::v8i16:
4131       case MVT::v16i8:
4132         // Nothing to do, we're only looking at Nonvector args here.
4133         break;
4134       }
4135     }
4136   }
4137   // We've found where the vector parameter area in memory is.  Skip the
4138   // first 12 parameters; these don't use that memory.
4139   VecArgOffset = ((VecArgOffset+15)/16)*16;
4140   VecArgOffset += 12*16;
4141 
4142   // Add DAG nodes to load the arguments or copy them out of registers.  On
4143   // entry to a function on PPC, the arguments start after the linkage area,
4144   // although the first ones are often in registers.
4145 
4146   SmallVector<SDValue, 8> MemOps;
4147   unsigned nAltivecParamsAtEnd = 0;
4148   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4149   unsigned CurArgIdx = 0;
4150   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4151     SDValue ArgVal;
4152     bool needsLoad = false;
4153     EVT ObjectVT = Ins[ArgNo].VT;
4154     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4155     unsigned ArgSize = ObjSize;
4156     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4157     if (Ins[ArgNo].isOrigArg()) {
4158       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4159       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4160     }
4161     unsigned CurArgOffset = ArgOffset;
4162 
4163     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4164     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4165         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4166       if (isVarArg || isPPC64) {
4167         MinReservedArea = ((MinReservedArea+15)/16)*16;
4168         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4169                                                   Flags,
4170                                                   PtrByteSize);
4171       } else  nAltivecParamsAtEnd++;
4172     } else
4173       // Calculate min reserved area.
4174       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4175                                                 Flags,
4176                                                 PtrByteSize);
4177 
4178     // FIXME the codegen can be much improved in some cases.
4179     // We do not have to keep everything in memory.
4180     if (Flags.isByVal()) {
4181       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4182 
4183       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4184       ObjSize = Flags.getByValSize();
4185       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4186       // Objects of size 1 and 2 are right justified, everything else is
4187       // left justified.  This means the memory address is adjusted forwards.
4188       if (ObjSize==1 || ObjSize==2) {
4189         CurArgOffset = CurArgOffset + (4 - ObjSize);
4190       }
4191       // The value of the object is its address.
4192       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4193       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4194       InVals.push_back(FIN);
4195       if (ObjSize==1 || ObjSize==2) {
4196         if (GPR_idx != Num_GPR_Regs) {
4197           unsigned VReg;
4198           if (isPPC64)
4199             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4200           else
4201             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4202           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4203           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4204           SDValue Store =
4205               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4206                                 MachinePointerInfo(&*FuncArg), ObjType);
4207           MemOps.push_back(Store);
4208           ++GPR_idx;
4209         }
4210 
4211         ArgOffset += PtrByteSize;
4212 
4213         continue;
4214       }
4215       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4216         // Store whatever pieces of the object are in registers
4217         // to memory.  ArgOffset will be the address of the beginning
4218         // of the object.
4219         if (GPR_idx != Num_GPR_Regs) {
4220           unsigned VReg;
4221           if (isPPC64)
4222             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4223           else
4224             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4225           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4226           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4227           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4228           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4229                                        MachinePointerInfo(&*FuncArg, j));
4230           MemOps.push_back(Store);
4231           ++GPR_idx;
4232           ArgOffset += PtrByteSize;
4233         } else {
4234           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4235           break;
4236         }
4237       }
4238       continue;
4239     }
4240 
4241     switch (ObjectVT.getSimpleVT().SimpleTy) {
4242     default: llvm_unreachable("Unhandled argument type!");
4243     case MVT::i1:
4244     case MVT::i32:
4245       if (!isPPC64) {
4246         if (GPR_idx != Num_GPR_Regs) {
4247           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4248           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4249 
4250           if (ObjectVT == MVT::i1)
4251             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4252 
4253           ++GPR_idx;
4254         } else {
4255           needsLoad = true;
4256           ArgSize = PtrByteSize;
4257         }
4258         // All int arguments reserve stack space in the Darwin ABI.
4259         ArgOffset += PtrByteSize;
4260         break;
4261       }
4262       LLVM_FALLTHROUGH;
4263     case MVT::i64:  // PPC64
4264       if (GPR_idx != Num_GPR_Regs) {
4265         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4266         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4267 
4268         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4269           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4270           // value to MVT::i64 and then truncate to the correct register size.
4271           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4272 
4273         ++GPR_idx;
4274       } else {
4275         needsLoad = true;
4276         ArgSize = PtrByteSize;
4277       }
4278       // All int arguments reserve stack space in the Darwin ABI.
4279       ArgOffset += 8;
4280       break;
4281 
4282     case MVT::f32:
4283     case MVT::f64:
4284       // Every 4 bytes of argument space consumes one of the GPRs available for
4285       // argument passing.
4286       if (GPR_idx != Num_GPR_Regs) {
4287         ++GPR_idx;
4288         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4289           ++GPR_idx;
4290       }
4291       if (FPR_idx != Num_FPR_Regs) {
4292         unsigned VReg;
4293 
4294         if (ObjectVT == MVT::f32)
4295           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4296         else
4297           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4298 
4299         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4300         ++FPR_idx;
4301       } else {
4302         needsLoad = true;
4303       }
4304 
4305       // All FP arguments reserve stack space in the Darwin ABI.
4306       ArgOffset += isPPC64 ? 8 : ObjSize;
4307       break;
4308     case MVT::v4f32:
4309     case MVT::v4i32:
4310     case MVT::v8i16:
4311     case MVT::v16i8:
4312       // Note that vector arguments in registers don't reserve stack space,
4313       // except in varargs functions.
4314       if (VR_idx != Num_VR_Regs) {
4315         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4316         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4317         if (isVarArg) {
4318           while ((ArgOffset % 16) != 0) {
4319             ArgOffset += PtrByteSize;
4320             if (GPR_idx != Num_GPR_Regs)
4321               GPR_idx++;
4322           }
4323           ArgOffset += 16;
4324           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4325         }
4326         ++VR_idx;
4327       } else {
4328         if (!isVarArg && !isPPC64) {
4329           // Vectors go after all the nonvectors.
4330           CurArgOffset = VecArgOffset;
4331           VecArgOffset += 16;
4332         } else {
4333           // Vectors are aligned.
4334           ArgOffset = ((ArgOffset+15)/16)*16;
4335           CurArgOffset = ArgOffset;
4336           ArgOffset += 16;
4337         }
4338         needsLoad = true;
4339       }
4340       break;
4341     }
4342 
4343     // We need to load the argument to a virtual register if we determined above
4344     // that we ran out of physical registers of the appropriate type.
4345     if (needsLoad) {
4346       int FI = MFI.CreateFixedObject(ObjSize,
4347                                      CurArgOffset + (ArgSize - ObjSize),
4348                                      isImmutable);
4349       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4350       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4351     }
4352 
4353     InVals.push_back(ArgVal);
4354   }
4355 
4356   // Allow for Altivec parameters at the end, if needed.
4357   if (nAltivecParamsAtEnd) {
4358     MinReservedArea = ((MinReservedArea+15)/16)*16;
4359     MinReservedArea += 16*nAltivecParamsAtEnd;
4360   }
4361 
4362   // Area that is at least reserved in the caller of this function.
4363   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4364 
4365   // Set the size that is at least reserved in caller of this function.  Tail
4366   // call optimized functions' reserved stack space needs to be aligned so that
4367   // taking the difference between two stack areas will result in an aligned
4368   // stack.
4369   MinReservedArea =
4370       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4371   FuncInfo->setMinReservedArea(MinReservedArea);
4372 
4373   // If the function takes variable number of arguments, make a frame index for
4374   // the start of the first vararg value... for expansion of llvm.va_start.
4375   if (isVarArg) {
4376     int Depth = ArgOffset;
4377 
4378     FuncInfo->setVarArgsFrameIndex(
4379       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4380                             Depth, true));
4381     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4382 
4383     // If this function is vararg, store any remaining integer argument regs
4384     // to their spots on the stack so that they may be loaded by dereferencing
4385     // the result of va_next.
4386     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4387       unsigned VReg;
4388 
4389       if (isPPC64)
4390         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4391       else
4392         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4393 
4394       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4395       SDValue Store =
4396           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4397       MemOps.push_back(Store);
4398       // Increment the address by four for the next argument to store
4399       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4400       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4401     }
4402   }
4403 
4404   if (!MemOps.empty())
4405     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4406 
4407   return Chain;
4408 }
4409 
4410 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4411 /// adjusted to accommodate the arguments for the tailcall.
4412 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4413                                    unsigned ParamSize) {
4414 
4415   if (!isTailCall) return 0;
4416 
4417   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4418   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4419   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4420   // Remember only if the new adjustment is bigger.
4421   if (SPDiff < FI->getTailCallSPDelta())
4422     FI->setTailCallSPDelta(SPDiff);
4423 
4424   return SPDiff;
4425 }
4426 
4427 static bool isFunctionGlobalAddress(SDValue Callee);
4428 
4429 static bool
4430 callsShareTOCBase(const Function *Caller, SDValue Callee,
4431                     const TargetMachine &TM) {
4432   // If !G, Callee can be an external symbol.
4433   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4434   if (!G)
4435     return false;
4436 
4437   // The medium and large code models are expected to provide a sufficiently
4438   // large TOC to provide all data addressing needs of a module with a
4439   // single TOC. Since each module will be addressed with a single TOC then we
4440   // only need to check that caller and callee don't cross dso boundaries.
4441   if (CodeModel::Medium == TM.getCodeModel() ||
4442       CodeModel::Large == TM.getCodeModel())
4443     return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal());
4444 
4445   // Otherwise we need to ensure callee and caller are in the same section,
4446   // since the linker may allocate multiple TOCs, and we don't know which
4447   // sections will belong to the same TOC base.
4448 
4449   const GlobalValue *GV = G->getGlobal();
4450   if (!GV->isStrongDefinitionForLinker())
4451     return false;
4452 
4453   // Any explicitly-specified sections and section prefixes must also match.
4454   // Also, if we're using -ffunction-sections, then each function is always in
4455   // a different section (the same is true for COMDAT functions).
4456   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4457       GV->getSection() != Caller->getSection())
4458     return false;
4459   if (const auto *F = dyn_cast<Function>(GV)) {
4460     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4461       return false;
4462   }
4463 
4464   // If the callee might be interposed, then we can't assume the ultimate call
4465   // target will be in the same section. Even in cases where we can assume that
4466   // interposition won't happen, in any case where the linker might insert a
4467   // stub to allow for interposition, we must generate code as though
4468   // interposition might occur. To understand why this matters, consider a
4469   // situation where: a -> b -> c where the arrows indicate calls. b and c are
4470   // in the same section, but a is in a different module (i.e. has a different
4471   // TOC base pointer). If the linker allows for interposition between b and c,
4472   // then it will generate a stub for the call edge between b and c which will
4473   // save the TOC pointer into the designated stack slot allocated by b. If we
4474   // return true here, and therefore allow a tail call between b and c, that
4475   // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4476   // pointer into the stack slot allocated by a (where the a -> b stub saved
4477   // a's TOC base pointer). If we're not considering a tail call, but rather,
4478   // whether a nop is needed after the call instruction in b, because the linker
4479   // will insert a stub, it might complain about a missing nop if we omit it
4480   // (although many don't complain in this case).
4481   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4482     return false;
4483 
4484   return true;
4485 }
4486 
4487 static bool
4488 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4489                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4490   assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
4491 
4492   const unsigned PtrByteSize = 8;
4493   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4494 
4495   static const MCPhysReg GPR[] = {
4496     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4497     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4498   };
4499   static const MCPhysReg VR[] = {
4500     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4501     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4502   };
4503 
4504   const unsigned NumGPRs = array_lengthof(GPR);
4505   const unsigned NumFPRs = 13;
4506   const unsigned NumVRs = array_lengthof(VR);
4507   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4508 
4509   unsigned NumBytes = LinkageSize;
4510   unsigned AvailableFPRs = NumFPRs;
4511   unsigned AvailableVRs = NumVRs;
4512 
4513   for (const ISD::OutputArg& Param : Outs) {
4514     if (Param.Flags.isNest()) continue;
4515 
4516     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4517                                PtrByteSize, LinkageSize, ParamAreaSize,
4518                                NumBytes, AvailableFPRs, AvailableVRs,
4519                                Subtarget.hasQPX()))
4520       return true;
4521   }
4522   return false;
4523 }
4524 
4525 static bool
4526 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4527   if (CS.arg_size() != CallerFn->arg_size())
4528     return false;
4529 
4530   ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4531   ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4532   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4533 
4534   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4535     const Value* CalleeArg = *CalleeArgIter;
4536     const Value* CallerArg = &(*CallerArgIter);
4537     if (CalleeArg == CallerArg)
4538       continue;
4539 
4540     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4541     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4542     //      }
4543     // 1st argument of callee is undef and has the same type as caller.
4544     if (CalleeArg->getType() == CallerArg->getType() &&
4545         isa<UndefValue>(CalleeArg))
4546       continue;
4547 
4548     return false;
4549   }
4550 
4551   return true;
4552 }
4553 
4554 // Returns true if TCO is possible between the callers and callees
4555 // calling conventions.
4556 static bool
4557 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4558                                     CallingConv::ID CalleeCC) {
4559   // Tail calls are possible with fastcc and ccc.
4560   auto isTailCallableCC  = [] (CallingConv::ID CC){
4561       return  CC == CallingConv::C || CC == CallingConv::Fast;
4562   };
4563   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4564     return false;
4565 
4566   // We can safely tail call both fastcc and ccc callees from a c calling
4567   // convention caller. If the caller is fastcc, we may have less stack space
4568   // than a non-fastcc caller with the same signature so disable tail-calls in
4569   // that case.
4570   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4571 }
4572 
4573 bool
4574 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4575                                     SDValue Callee,
4576                                     CallingConv::ID CalleeCC,
4577                                     ImmutableCallSite CS,
4578                                     bool isVarArg,
4579                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4580                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4581                                     SelectionDAG& DAG) const {
4582   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4583 
4584   if (DisableSCO && !TailCallOpt) return false;
4585 
4586   // Variadic argument functions are not supported.
4587   if (isVarArg) return false;
4588 
4589   auto &Caller = DAG.getMachineFunction().getFunction();
4590   // Check that the calling conventions are compatible for tco.
4591   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4592     return false;
4593 
4594   // Caller contains any byval parameter is not supported.
4595   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4596     return false;
4597 
4598   // Callee contains any byval parameter is not supported, too.
4599   // Note: This is a quick work around, because in some cases, e.g.
4600   // caller's stack size > callee's stack size, we are still able to apply
4601   // sibling call optimization. For example, gcc is able to do SCO for caller1
4602   // in the following example, but not for caller2.
4603   //   struct test {
4604   //     long int a;
4605   //     char ary[56];
4606   //   } gTest;
4607   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4608   //     b->a = v.a;
4609   //     return 0;
4610   //   }
4611   //   void caller1(struct test a, struct test c, struct test *b) {
4612   //     callee(gTest, b); }
4613   //   void caller2(struct test *b) { callee(gTest, b); }
4614   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4615     return false;
4616 
4617   // If callee and caller use different calling conventions, we cannot pass
4618   // parameters on stack since offsets for the parameter area may be different.
4619   if (Caller.getCallingConv() != CalleeCC &&
4620       needStackSlotPassParameters(Subtarget, Outs))
4621     return false;
4622 
4623   // No TCO/SCO on indirect call because Caller have to restore its TOC
4624   if (!isFunctionGlobalAddress(Callee) &&
4625       !isa<ExternalSymbolSDNode>(Callee))
4626     return false;
4627 
4628   // If the caller and callee potentially have different TOC bases then we
4629   // cannot tail call since we need to restore the TOC pointer after the call.
4630   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4631   if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4632     return false;
4633 
4634   // TCO allows altering callee ABI, so we don't have to check further.
4635   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4636     return true;
4637 
4638   if (DisableSCO) return false;
4639 
4640   // If callee use the same argument list that caller is using, then we can
4641   // apply SCO on this case. If it is not, then we need to check if callee needs
4642   // stack for passing arguments.
4643   if (!hasSameArgumentList(&Caller, CS) &&
4644       needStackSlotPassParameters(Subtarget, Outs)) {
4645     return false;
4646   }
4647 
4648   return true;
4649 }
4650 
4651 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4652 /// for tail call optimization. Targets which want to do tail call
4653 /// optimization should implement this function.
4654 bool
4655 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4656                                                      CallingConv::ID CalleeCC,
4657                                                      bool isVarArg,
4658                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4659                                                      SelectionDAG& DAG) const {
4660   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4661     return false;
4662 
4663   // Variable argument functions are not supported.
4664   if (isVarArg)
4665     return false;
4666 
4667   MachineFunction &MF = DAG.getMachineFunction();
4668   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4669   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4670     // Functions containing by val parameters are not supported.
4671     for (unsigned i = 0; i != Ins.size(); i++) {
4672        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4673        if (Flags.isByVal()) return false;
4674     }
4675 
4676     // Non-PIC/GOT tail calls are supported.
4677     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4678       return true;
4679 
4680     // At the moment we can only do local tail calls (in same module, hidden
4681     // or protected) if we are generating PIC.
4682     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4683       return G->getGlobal()->hasHiddenVisibility()
4684           || G->getGlobal()->hasProtectedVisibility();
4685   }
4686 
4687   return false;
4688 }
4689 
4690 /// isCallCompatibleAddress - Return the immediate to use if the specified
4691 /// 32-bit value is representable in the immediate field of a BxA instruction.
4692 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4693   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4694   if (!C) return nullptr;
4695 
4696   int Addr = C->getZExtValue();
4697   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4698       SignExtend32<26>(Addr) != Addr)
4699     return nullptr;  // Top 6 bits have to be sext of immediate.
4700 
4701   return DAG
4702       .getConstant(
4703           (int)C->getZExtValue() >> 2, SDLoc(Op),
4704           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4705       .getNode();
4706 }
4707 
4708 namespace {
4709 
4710 struct TailCallArgumentInfo {
4711   SDValue Arg;
4712   SDValue FrameIdxOp;
4713   int FrameIdx = 0;
4714 
4715   TailCallArgumentInfo() = default;
4716 };
4717 
4718 } // end anonymous namespace
4719 
4720 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4721 static void StoreTailCallArgumentsToStackSlot(
4722     SelectionDAG &DAG, SDValue Chain,
4723     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4724     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4725   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4726     SDValue Arg = TailCallArgs[i].Arg;
4727     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4728     int FI = TailCallArgs[i].FrameIdx;
4729     // Store relative to framepointer.
4730     MemOpChains.push_back(DAG.getStore(
4731         Chain, dl, Arg, FIN,
4732         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4733   }
4734 }
4735 
4736 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4737 /// the appropriate stack slot for the tail call optimized function call.
4738 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4739                                              SDValue OldRetAddr, SDValue OldFP,
4740                                              int SPDiff, const SDLoc &dl) {
4741   if (SPDiff) {
4742     // Calculate the new stack slot for the return address.
4743     MachineFunction &MF = DAG.getMachineFunction();
4744     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4745     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4746     bool isPPC64 = Subtarget.isPPC64();
4747     int SlotSize = isPPC64 ? 8 : 4;
4748     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4749     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4750                                                          NewRetAddrLoc, true);
4751     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4752     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4753     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4754                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4755 
4756     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4757     // slot as the FP is never overwritten.
4758     if (Subtarget.isDarwinABI()) {
4759       int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4760       int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4761                                                          true);
4762       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4763       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4764                            MachinePointerInfo::getFixedStack(
4765                                DAG.getMachineFunction(), NewFPIdx));
4766     }
4767   }
4768   return Chain;
4769 }
4770 
4771 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4772 /// the position of the argument.
4773 static void
4774 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4775                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4776                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4777   int Offset = ArgOffset + SPDiff;
4778   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4779   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4780   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4781   SDValue FIN = DAG.getFrameIndex(FI, VT);
4782   TailCallArgumentInfo Info;
4783   Info.Arg = Arg;
4784   Info.FrameIdxOp = FIN;
4785   Info.FrameIdx = FI;
4786   TailCallArguments.push_back(Info);
4787 }
4788 
4789 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4790 /// stack slot. Returns the chain as result and the loaded frame pointers in
4791 /// LROpOut/FPOpout. Used when tail calling.
4792 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4793     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4794     SDValue &FPOpOut, const SDLoc &dl) const {
4795   if (SPDiff) {
4796     // Load the LR and FP stack slot for later adjusting.
4797     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4798     LROpOut = getReturnAddrFrameIndex(DAG);
4799     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4800     Chain = SDValue(LROpOut.getNode(), 1);
4801 
4802     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4803     // slot as the FP is never overwritten.
4804     if (Subtarget.isDarwinABI()) {
4805       FPOpOut = getFramePointerFrameIndex(DAG);
4806       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4807       Chain = SDValue(FPOpOut.getNode(), 1);
4808     }
4809   }
4810   return Chain;
4811 }
4812 
4813 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4814 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4815 /// specified by the specific parameter attribute. The copy will be passed as
4816 /// a byval function parameter.
4817 /// Sometimes what we are copying is the end of a larger object, the part that
4818 /// does not fit in registers.
4819 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4820                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4821                                          SelectionDAG &DAG, const SDLoc &dl) {
4822   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4823   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4824                        false, false, false, MachinePointerInfo(),
4825                        MachinePointerInfo());
4826 }
4827 
4828 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4829 /// tail calls.
4830 static void LowerMemOpCallTo(
4831     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4832     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4833     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4834     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4835   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4836   if (!isTailCall) {
4837     if (isVector) {
4838       SDValue StackPtr;
4839       if (isPPC64)
4840         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4841       else
4842         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4843       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4844                            DAG.getConstant(ArgOffset, dl, PtrVT));
4845     }
4846     MemOpChains.push_back(
4847         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4848     // Calculate and remember argument location.
4849   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4850                                   TailCallArguments);
4851 }
4852 
4853 static void
4854 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4855                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4856                 SDValue FPOp,
4857                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4858   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4859   // might overwrite each other in case of tail call optimization.
4860   SmallVector<SDValue, 8> MemOpChains2;
4861   // Do not flag preceding copytoreg stuff together with the following stuff.
4862   InFlag = SDValue();
4863   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4864                                     MemOpChains2, dl);
4865   if (!MemOpChains2.empty())
4866     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4867 
4868   // Store the return address to the appropriate stack slot.
4869   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4870 
4871   // Emit callseq_end just before tailcall node.
4872   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4873                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4874   InFlag = Chain.getValue(1);
4875 }
4876 
4877 // Is this global address that of a function that can be called by name? (as
4878 // opposed to something that must hold a descriptor for an indirect call).
4879 static bool isFunctionGlobalAddress(SDValue Callee) {
4880   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4881     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4882         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4883       return false;
4884 
4885     return G->getGlobal()->getValueType()->isFunctionTy();
4886   }
4887 
4888   return false;
4889 }
4890 
4891 static unsigned
4892 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
4893             SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
4894             bool isPatchPoint, bool hasNest,
4895             SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
4896             SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
4897             ImmutableCallSite CS, const PPCSubtarget &Subtarget) {
4898   bool isPPC64 = Subtarget.isPPC64();
4899   bool isSVR4ABI = Subtarget.isSVR4ABI();
4900   bool isELFv2ABI = Subtarget.isELFv2ABI();
4901 
4902   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4903   NodeTys.push_back(MVT::Other);   // Returns a chain
4904   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
4905 
4906   unsigned CallOpc = PPCISD::CALL;
4907 
4908   bool needIndirectCall = true;
4909   if (!isSVR4ABI || !isPPC64)
4910     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
4911       // If this is an absolute destination address, use the munged value.
4912       Callee = SDValue(Dest, 0);
4913       needIndirectCall = false;
4914     }
4915 
4916   // PC-relative references to external symbols should go through $stub, unless
4917   // we're building with the leopard linker or later, which automatically
4918   // synthesizes these stubs.
4919   const TargetMachine &TM = DAG.getTarget();
4920   const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
4921   const GlobalValue *GV = nullptr;
4922   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
4923     GV = G->getGlobal();
4924   bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
4925   bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
4926 
4927   if (isFunctionGlobalAddress(Callee)) {
4928     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
4929     // A call to a TLS address is actually an indirect call to a
4930     // thread-specific pointer.
4931     unsigned OpFlags = 0;
4932     if (UsePlt)
4933       OpFlags = PPCII::MO_PLT;
4934 
4935     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
4936     // every direct call is) turn it into a TargetGlobalAddress /
4937     // TargetExternalSymbol node so that legalize doesn't hack it.
4938     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
4939                                         Callee.getValueType(), 0, OpFlags);
4940     needIndirectCall = false;
4941   }
4942 
4943   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
4944     unsigned char OpFlags = 0;
4945 
4946     if (UsePlt)
4947       OpFlags = PPCII::MO_PLT;
4948 
4949     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
4950                                          OpFlags);
4951     needIndirectCall = false;
4952   }
4953 
4954   if (isPatchPoint) {
4955     // We'll form an invalid direct call when lowering a patchpoint; the full
4956     // sequence for an indirect call is complicated, and many of the
4957     // instructions introduced might have side effects (and, thus, can't be
4958     // removed later). The call itself will be removed as soon as the
4959     // argument/return lowering is complete, so the fact that it has the wrong
4960     // kind of operands should not really matter.
4961     needIndirectCall = false;
4962   }
4963 
4964   if (needIndirectCall) {
4965     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
4966     // to do the call, we can't use PPCISD::CALL.
4967     SDValue MTCTROps[] = {Chain, Callee, InFlag};
4968 
4969     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4970       // Function pointers in the 64-bit SVR4 ABI do not point to the function
4971       // entry point, but to the function descriptor (the function entry point
4972       // address is part of the function descriptor though).
4973       // The function descriptor is a three doubleword structure with the
4974       // following fields: function entry point, TOC base address and
4975       // environment pointer.
4976       // Thus for a call through a function pointer, the following actions need
4977       // to be performed:
4978       //   1. Save the TOC of the caller in the TOC save area of its stack
4979       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
4980       //   2. Load the address of the function entry point from the function
4981       //      descriptor.
4982       //   3. Load the TOC of the callee from the function descriptor into r2.
4983       //   4. Load the environment pointer from the function descriptor into
4984       //      r11.
4985       //   5. Branch to the function entry point address.
4986       //   6. On return of the callee, the TOC of the caller needs to be
4987       //      restored (this is done in FinishCall()).
4988       //
4989       // The loads are scheduled at the beginning of the call sequence, and the
4990       // register copies are flagged together to ensure that no other
4991       // operations can be scheduled in between. E.g. without flagging the
4992       // copies together, a TOC access in the caller could be scheduled between
4993       // the assignment of the callee TOC and the branch to the callee, which
4994       // results in the TOC access going through the TOC of the callee instead
4995       // of going through the TOC of the caller, which leads to incorrect code.
4996 
4997       // Load the address of the function entry point from the function
4998       // descriptor.
4999       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
5000       if (LDChain.getValueType() == MVT::Glue)
5001         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
5002 
5003       auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5004                           ? (MachineMemOperand::MODereferenceable |
5005                              MachineMemOperand::MOInvariant)
5006                           : MachineMemOperand::MONone;
5007 
5008       MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5009       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
5010                                         /* Alignment = */ 8, MMOFlags);
5011 
5012       // Load environment pointer into r11.
5013       SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
5014       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
5015       SDValue LoadEnvPtr =
5016           DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
5017                       /* Alignment = */ 8, MMOFlags);
5018 
5019       SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
5020       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
5021       SDValue TOCPtr =
5022           DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
5023                       /* Alignment = */ 8, MMOFlags);
5024 
5025       setUsesTOCBasePtr(DAG);
5026       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
5027                                         InFlag);
5028       Chain = TOCVal.getValue(0);
5029       InFlag = TOCVal.getValue(1);
5030 
5031       // If the function call has an explicit 'nest' parameter, it takes the
5032       // place of the environment pointer.
5033       if (!hasNest) {
5034         SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
5035                                           InFlag);
5036 
5037         Chain = EnvVal.getValue(0);
5038         InFlag = EnvVal.getValue(1);
5039       }
5040 
5041       MTCTROps[0] = Chain;
5042       MTCTROps[1] = LoadFuncPtr;
5043       MTCTROps[2] = InFlag;
5044     }
5045 
5046     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
5047                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
5048     InFlag = Chain.getValue(1);
5049 
5050     NodeTys.clear();
5051     NodeTys.push_back(MVT::Other);
5052     NodeTys.push_back(MVT::Glue);
5053     Ops.push_back(Chain);
5054     CallOpc = PPCISD::BCTRL;
5055     Callee.setNode(nullptr);
5056     // Add use of X11 (holding environment pointer)
5057     if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
5058       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
5059     // Add CTR register as callee so a bctr can be emitted later.
5060     if (isTailCall)
5061       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
5062   }
5063 
5064   // If this is a direct call, pass the chain and the callee.
5065   if (Callee.getNode()) {
5066     Ops.push_back(Chain);
5067     Ops.push_back(Callee);
5068   }
5069   // If this is a tail call add stack pointer delta.
5070   if (isTailCall)
5071     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5072 
5073   // Add argument registers to the end of the list so that they are known live
5074   // into the call.
5075   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5076     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5077                                   RegsToPass[i].second.getValueType()));
5078 
5079   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
5080   // into the call.
5081   if (isSVR4ABI && isPPC64 && !isPatchPoint) {
5082     setUsesTOCBasePtr(DAG);
5083     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
5084   }
5085 
5086   return CallOpc;
5087 }
5088 
5089 SDValue PPCTargetLowering::LowerCallResult(
5090     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5091     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5092     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5093   SmallVector<CCValAssign, 16> RVLocs;
5094   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5095                     *DAG.getContext());
5096 
5097   CCRetInfo.AnalyzeCallResult(
5098       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5099                ? RetCC_PPC_Cold
5100                : RetCC_PPC);
5101 
5102   // Copy all of the result registers out of their specified physreg.
5103   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5104     CCValAssign &VA = RVLocs[i];
5105     assert(VA.isRegLoc() && "Can only return in registers!");
5106 
5107     SDValue Val = DAG.getCopyFromReg(Chain, dl,
5108                                      VA.getLocReg(), VA.getLocVT(), InFlag);
5109     Chain = Val.getValue(1);
5110     InFlag = Val.getValue(2);
5111 
5112     switch (VA.getLocInfo()) {
5113     default: llvm_unreachable("Unknown loc info!");
5114     case CCValAssign::Full: break;
5115     case CCValAssign::AExt:
5116       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5117       break;
5118     case CCValAssign::ZExt:
5119       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5120                         DAG.getValueType(VA.getValVT()));
5121       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5122       break;
5123     case CCValAssign::SExt:
5124       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5125                         DAG.getValueType(VA.getValVT()));
5126       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5127       break;
5128     }
5129 
5130     InVals.push_back(Val);
5131   }
5132 
5133   return Chain;
5134 }
5135 
5136 SDValue PPCTargetLowering::FinishCall(
5137     CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
5138     bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5139     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
5140     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5141     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5142     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5143   std::vector<EVT> NodeTys;
5144   SmallVector<SDValue, 8> Ops;
5145   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
5146                                  SPDiff, isTailCall, isPatchPoint, hasNest,
5147                                  RegsToPass, Ops, NodeTys, CS, Subtarget);
5148 
5149   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5150   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
5151     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5152 
5153   // When performing tail call optimization the callee pops its arguments off
5154   // the stack. Account for this here so these bytes can be pushed back on in
5155   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5156   int BytesCalleePops =
5157     (CallConv == CallingConv::Fast &&
5158      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
5159 
5160   // Add a register mask operand representing the call-preserved registers.
5161   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5162   const uint32_t *Mask =
5163       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5164   assert(Mask && "Missing call preserved mask for calling convention");
5165   Ops.push_back(DAG.getRegisterMask(Mask));
5166 
5167   if (InFlag.getNode())
5168     Ops.push_back(InFlag);
5169 
5170   // Emit tail call.
5171   if (isTailCall) {
5172     assert(((Callee.getOpcode() == ISD::Register &&
5173              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5174             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5175             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5176             isa<ConstantSDNode>(Callee)) &&
5177     "Expecting an global address, external symbol, absolute value or register");
5178 
5179     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5180     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
5181   }
5182 
5183   // Add a NOP immediately after the branch instruction when using the 64-bit
5184   // SVR4 ABI. At link time, if caller and callee are in a different module and
5185   // thus have a different TOC, the call will be replaced with a call to a stub
5186   // function which saves the current TOC, loads the TOC of the callee and
5187   // branches to the callee. The NOP will be replaced with a load instruction
5188   // which restores the TOC of the caller from the TOC save slot of the current
5189   // stack frame. If caller and callee belong to the same module (and have the
5190   // same TOC), the NOP will remain unchanged.
5191 
5192   MachineFunction &MF = DAG.getMachineFunction();
5193   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
5194       !isPatchPoint) {
5195     if (CallOpc == PPCISD::BCTRL) {
5196       // This is a call through a function pointer.
5197       // Restore the caller TOC from the save area into R2.
5198       // See PrepareCall() for more information about calls through function
5199       // pointers in the 64-bit SVR4 ABI.
5200       // We are using a target-specific load with r2 hard coded, because the
5201       // result of a target-independent load would never go directly into r2,
5202       // since r2 is a reserved register (which prevents the register allocator
5203       // from allocating it), resulting in an additional register being
5204       // allocated and an unnecessary move instruction being generated.
5205       CallOpc = PPCISD::BCTRL_LOAD_TOC;
5206 
5207       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5208       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
5209       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5210       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5211       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
5212 
5213       // The address needs to go after the chain input but before the flag (or
5214       // any other variadic arguments).
5215       Ops.insert(std::next(Ops.begin()), AddTOC);
5216     } else if (CallOpc == PPCISD::CALL &&
5217       !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
5218       // Otherwise insert NOP for non-local calls.
5219       CallOpc = PPCISD::CALL_NOP;
5220     }
5221   }
5222 
5223   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
5224   InFlag = Chain.getValue(1);
5225 
5226   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5227                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5228                              InFlag, dl);
5229   if (!Ins.empty())
5230     InFlag = Chain.getValue(1);
5231 
5232   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
5233                          Ins, dl, DAG, InVals);
5234 }
5235 
5236 SDValue
5237 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5238                              SmallVectorImpl<SDValue> &InVals) const {
5239   SelectionDAG &DAG                     = CLI.DAG;
5240   SDLoc &dl                             = CLI.DL;
5241   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5242   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5243   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5244   SDValue Chain                         = CLI.Chain;
5245   SDValue Callee                        = CLI.Callee;
5246   bool &isTailCall                      = CLI.IsTailCall;
5247   CallingConv::ID CallConv              = CLI.CallConv;
5248   bool isVarArg                         = CLI.IsVarArg;
5249   bool isPatchPoint                     = CLI.IsPatchPoint;
5250   ImmutableCallSite CS                  = CLI.CS;
5251 
5252   if (isTailCall) {
5253     if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5254       isTailCall = false;
5255     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5256       isTailCall =
5257         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5258                                                  isVarArg, Outs, Ins, DAG);
5259     else
5260       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5261                                                      Ins, DAG);
5262     if (isTailCall) {
5263       ++NumTailCalls;
5264       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5265         ++NumSiblingCalls;
5266 
5267       assert(isa<GlobalAddressSDNode>(Callee) &&
5268              "Callee should be an llvm::Function object.");
5269       LLVM_DEBUG(
5270           const GlobalValue *GV =
5271               cast<GlobalAddressSDNode>(Callee)->getGlobal();
5272           const unsigned Width =
5273               80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5274           dbgs() << "TCO caller: "
5275                  << left_justify(DAG.getMachineFunction().getName(), Width)
5276                  << ", callee linkage: " << GV->getVisibility() << ", "
5277                  << GV->getLinkage() << "\n");
5278     }
5279   }
5280 
5281   if (!isTailCall && CS && CS.isMustTailCall())
5282     report_fatal_error("failed to perform tail call elimination on a call "
5283                        "site marked musttail");
5284 
5285   // When long calls (i.e. indirect calls) are always used, calls are always
5286   // made via function pointer. If we have a function name, first translate it
5287   // into a pointer.
5288   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5289       !isTailCall)
5290     Callee = LowerGlobalAddress(Callee, DAG);
5291 
5292   if (Subtarget.isSVR4ABI()) {
5293     if (Subtarget.isPPC64())
5294       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5295                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5296                               dl, DAG, InVals, CS);
5297     else
5298       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5299                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5300                               dl, DAG, InVals, CS);
5301   }
5302 
5303   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5304                           isTailCall, isPatchPoint, Outs, OutVals, Ins,
5305                           dl, DAG, InVals, CS);
5306 }
5307 
5308 SDValue PPCTargetLowering::LowerCall_32SVR4(
5309     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5310     bool isTailCall, bool isPatchPoint,
5311     const SmallVectorImpl<ISD::OutputArg> &Outs,
5312     const SmallVectorImpl<SDValue> &OutVals,
5313     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5314     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5315     ImmutableCallSite CS) const {
5316   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5317   // of the 32-bit SVR4 ABI stack frame layout.
5318 
5319   assert((CallConv == CallingConv::C ||
5320           CallConv == CallingConv::Cold ||
5321           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5322 
5323   unsigned PtrByteSize = 4;
5324 
5325   MachineFunction &MF = DAG.getMachineFunction();
5326 
5327   // Mark this function as potentially containing a function that contains a
5328   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5329   // and restoring the callers stack pointer in this functions epilog. This is
5330   // done because by tail calling the called function might overwrite the value
5331   // in this function's (MF) stack pointer stack slot 0(SP).
5332   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5333       CallConv == CallingConv::Fast)
5334     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5335 
5336   // Count how many bytes are to be pushed on the stack, including the linkage
5337   // area, parameter list area and the part of the local variable space which
5338   // contains copies of aggregates which are passed by value.
5339 
5340   // Assign locations to all of the outgoing arguments.
5341   SmallVector<CCValAssign, 16> ArgLocs;
5342   PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5343 
5344   // Reserve space for the linkage area on the stack.
5345   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5346                        PtrByteSize);
5347   if (useSoftFloat())
5348     CCInfo.PreAnalyzeCallOperands(Outs);
5349 
5350   if (isVarArg) {
5351     // Handle fixed and variable vector arguments differently.
5352     // Fixed vector arguments go into registers as long as registers are
5353     // available. Variable vector arguments always go into memory.
5354     unsigned NumArgs = Outs.size();
5355 
5356     for (unsigned i = 0; i != NumArgs; ++i) {
5357       MVT ArgVT = Outs[i].VT;
5358       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5359       bool Result;
5360 
5361       if (Outs[i].IsFixed) {
5362         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5363                                CCInfo);
5364       } else {
5365         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5366                                       ArgFlags, CCInfo);
5367       }
5368 
5369       if (Result) {
5370 #ifndef NDEBUG
5371         errs() << "Call operand #" << i << " has unhandled type "
5372              << EVT(ArgVT).getEVTString() << "\n";
5373 #endif
5374         llvm_unreachable(nullptr);
5375       }
5376     }
5377   } else {
5378     // All arguments are treated the same.
5379     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5380   }
5381   CCInfo.clearWasPPCF128();
5382 
5383   // Assign locations to all of the outgoing aggregate by value arguments.
5384   SmallVector<CCValAssign, 16> ByValArgLocs;
5385   CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5386 
5387   // Reserve stack space for the allocations in CCInfo.
5388   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5389 
5390   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5391 
5392   // Size of the linkage area, parameter list area and the part of the local
5393   // space variable where copies of aggregates which are passed by value are
5394   // stored.
5395   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5396 
5397   // Calculate by how many bytes the stack has to be adjusted in case of tail
5398   // call optimization.
5399   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5400 
5401   // Adjust the stack pointer for the new arguments...
5402   // These operations are automatically eliminated by the prolog/epilog pass
5403   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5404   SDValue CallSeqStart = Chain;
5405 
5406   // Load the return address and frame pointer so it can be moved somewhere else
5407   // later.
5408   SDValue LROp, FPOp;
5409   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5410 
5411   // Set up a copy of the stack pointer for use loading and storing any
5412   // arguments that may not fit in the registers available for argument
5413   // passing.
5414   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5415 
5416   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5417   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5418   SmallVector<SDValue, 8> MemOpChains;
5419 
5420   bool seenFloatArg = false;
5421   // Walk the register/memloc assignments, inserting copies/loads.
5422   for (unsigned i = 0, j = 0, e = ArgLocs.size();
5423        i != e;
5424        ++i) {
5425     CCValAssign &VA = ArgLocs[i];
5426     SDValue Arg = OutVals[i];
5427     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5428 
5429     if (Flags.isByVal()) {
5430       // Argument is an aggregate which is passed by value, thus we need to
5431       // create a copy of it in the local variable space of the current stack
5432       // frame (which is the stack frame of the caller) and pass the address of
5433       // this copy to the callee.
5434       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5435       CCValAssign &ByValVA = ByValArgLocs[j++];
5436       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5437 
5438       // Memory reserved in the local variable space of the callers stack frame.
5439       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5440 
5441       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5442       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5443                            StackPtr, PtrOff);
5444 
5445       // Create a copy of the argument in the local area of the current
5446       // stack frame.
5447       SDValue MemcpyCall =
5448         CreateCopyOfByValArgument(Arg, PtrOff,
5449                                   CallSeqStart.getNode()->getOperand(0),
5450                                   Flags, DAG, dl);
5451 
5452       // This must go outside the CALLSEQ_START..END.
5453       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5454                                                      SDLoc(MemcpyCall));
5455       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5456                              NewCallSeqStart.getNode());
5457       Chain = CallSeqStart = NewCallSeqStart;
5458 
5459       // Pass the address of the aggregate copy on the stack either in a
5460       // physical register or in the parameter list area of the current stack
5461       // frame to the callee.
5462       Arg = PtrOff;
5463     }
5464 
5465     if (VA.isRegLoc()) {
5466       if (Arg.getValueType() == MVT::i1)
5467         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
5468 
5469       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5470       // Put argument in a physical register.
5471       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5472     } else {
5473       // Put argument in the parameter list area of the current stack frame.
5474       assert(VA.isMemLoc());
5475       unsigned LocMemOffset = VA.getLocMemOffset();
5476 
5477       if (!isTailCall) {
5478         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5479         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5480                              StackPtr, PtrOff);
5481 
5482         MemOpChains.push_back(
5483             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5484       } else {
5485         // Calculate and remember argument location.
5486         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5487                                  TailCallArguments);
5488       }
5489     }
5490   }
5491 
5492   if (!MemOpChains.empty())
5493     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5494 
5495   // Build a sequence of copy-to-reg nodes chained together with token chain
5496   // and flag operands which copy the outgoing args into the appropriate regs.
5497   SDValue InFlag;
5498   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5499     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5500                              RegsToPass[i].second, InFlag);
5501     InFlag = Chain.getValue(1);
5502   }
5503 
5504   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5505   // registers.
5506   if (isVarArg) {
5507     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5508     SDValue Ops[] = { Chain, InFlag };
5509 
5510     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5511                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5512 
5513     InFlag = Chain.getValue(1);
5514   }
5515 
5516   if (isTailCall)
5517     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5518                     TailCallArguments);
5519 
5520   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5521                     /* unused except on PPC64 ELFv1 */ false, DAG,
5522                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5523                     NumBytes, Ins, InVals, CS);
5524 }
5525 
5526 // Copy an argument into memory, being careful to do this outside the
5527 // call sequence for the call to which the argument belongs.
5528 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5529     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5530     SelectionDAG &DAG, const SDLoc &dl) const {
5531   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5532                         CallSeqStart.getNode()->getOperand(0),
5533                         Flags, DAG, dl);
5534   // The MEMCPY must go outside the CALLSEQ_START..END.
5535   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5536   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5537                                                  SDLoc(MemcpyCall));
5538   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5539                          NewCallSeqStart.getNode());
5540   return NewCallSeqStart;
5541 }
5542 
5543 SDValue PPCTargetLowering::LowerCall_64SVR4(
5544     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5545     bool isTailCall, bool isPatchPoint,
5546     const SmallVectorImpl<ISD::OutputArg> &Outs,
5547     const SmallVectorImpl<SDValue> &OutVals,
5548     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5549     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5550     ImmutableCallSite CS) const {
5551   bool isELFv2ABI = Subtarget.isELFv2ABI();
5552   bool isLittleEndian = Subtarget.isLittleEndian();
5553   unsigned NumOps = Outs.size();
5554   bool hasNest = false;
5555   bool IsSibCall = false;
5556 
5557   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5558   unsigned PtrByteSize = 8;
5559 
5560   MachineFunction &MF = DAG.getMachineFunction();
5561 
5562   if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5563     IsSibCall = true;
5564 
5565   // Mark this function as potentially containing a function that contains a
5566   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5567   // and restoring the callers stack pointer in this functions epilog. This is
5568   // done because by tail calling the called function might overwrite the value
5569   // in this function's (MF) stack pointer stack slot 0(SP).
5570   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5571       CallConv == CallingConv::Fast)
5572     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5573 
5574   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5575          "fastcc not supported on varargs functions");
5576 
5577   // Count how many bytes are to be pushed on the stack, including the linkage
5578   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5579   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5580   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5581   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5582   unsigned NumBytes = LinkageSize;
5583   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5584   unsigned &QFPR_idx = FPR_idx;
5585 
5586   static const MCPhysReg GPR[] = {
5587     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5588     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5589   };
5590   static const MCPhysReg VR[] = {
5591     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5592     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5593   };
5594 
5595   const unsigned NumGPRs = array_lengthof(GPR);
5596   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5597   const unsigned NumVRs  = array_lengthof(VR);
5598   const unsigned NumQFPRs = NumFPRs;
5599 
5600   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5601   // can be passed to the callee in registers.
5602   // For the fast calling convention, there is another check below.
5603   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5604   bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5605   if (!HasParameterArea) {
5606     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5607     unsigned AvailableFPRs = NumFPRs;
5608     unsigned AvailableVRs = NumVRs;
5609     unsigned NumBytesTmp = NumBytes;
5610     for (unsigned i = 0; i != NumOps; ++i) {
5611       if (Outs[i].Flags.isNest()) continue;
5612       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5613                                 PtrByteSize, LinkageSize, ParamAreaSize,
5614                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
5615                                 Subtarget.hasQPX()))
5616         HasParameterArea = true;
5617     }
5618   }
5619 
5620   // When using the fast calling convention, we don't provide backing for
5621   // arguments that will be in registers.
5622   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5623 
5624   // Avoid allocating parameter area for fastcc functions if all the arguments
5625   // can be passed in the registers.
5626   if (CallConv == CallingConv::Fast)
5627     HasParameterArea = false;
5628 
5629   // Add up all the space actually used.
5630   for (unsigned i = 0; i != NumOps; ++i) {
5631     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5632     EVT ArgVT = Outs[i].VT;
5633     EVT OrigVT = Outs[i].ArgVT;
5634 
5635     if (Flags.isNest())
5636       continue;
5637 
5638     if (CallConv == CallingConv::Fast) {
5639       if (Flags.isByVal()) {
5640         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5641         if (NumGPRsUsed > NumGPRs)
5642           HasParameterArea = true;
5643       } else {
5644         switch (ArgVT.getSimpleVT().SimpleTy) {
5645         default: llvm_unreachable("Unexpected ValueType for argument!");
5646         case MVT::i1:
5647         case MVT::i32:
5648         case MVT::i64:
5649           if (++NumGPRsUsed <= NumGPRs)
5650             continue;
5651           break;
5652         case MVT::v4i32:
5653         case MVT::v8i16:
5654         case MVT::v16i8:
5655         case MVT::v2f64:
5656         case MVT::v2i64:
5657         case MVT::v1i128:
5658         case MVT::f128:
5659           if (++NumVRsUsed <= NumVRs)
5660             continue;
5661           break;
5662         case MVT::v4f32:
5663           // When using QPX, this is handled like a FP register, otherwise, it
5664           // is an Altivec register.
5665           if (Subtarget.hasQPX()) {
5666             if (++NumFPRsUsed <= NumFPRs)
5667               continue;
5668           } else {
5669             if (++NumVRsUsed <= NumVRs)
5670               continue;
5671           }
5672           break;
5673         case MVT::f32:
5674         case MVT::f64:
5675         case MVT::v4f64: // QPX
5676         case MVT::v4i1:  // QPX
5677           if (++NumFPRsUsed <= NumFPRs)
5678             continue;
5679           break;
5680         }
5681         HasParameterArea = true;
5682       }
5683     }
5684 
5685     /* Respect alignment of argument on the stack.  */
5686     unsigned Align =
5687       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5688     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5689 
5690     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5691     if (Flags.isInConsecutiveRegsLast())
5692       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5693   }
5694 
5695   unsigned NumBytesActuallyUsed = NumBytes;
5696 
5697   // In the old ELFv1 ABI,
5698   // the prolog code of the callee may store up to 8 GPR argument registers to
5699   // the stack, allowing va_start to index over them in memory if its varargs.
5700   // Because we cannot tell if this is needed on the caller side, we have to
5701   // conservatively assume that it is needed.  As such, make sure we have at
5702   // least enough stack space for the caller to store the 8 GPRs.
5703   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5704   // really requires memory operands, e.g. a vararg function.
5705   if (HasParameterArea)
5706     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5707   else
5708     NumBytes = LinkageSize;
5709 
5710   // Tail call needs the stack to be aligned.
5711   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5712       CallConv == CallingConv::Fast)
5713     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5714 
5715   int SPDiff = 0;
5716 
5717   // Calculate by how many bytes the stack has to be adjusted in case of tail
5718   // call optimization.
5719   if (!IsSibCall)
5720     SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5721 
5722   // To protect arguments on the stack from being clobbered in a tail call,
5723   // force all the loads to happen before doing any other lowering.
5724   if (isTailCall)
5725     Chain = DAG.getStackArgumentTokenFactor(Chain);
5726 
5727   // Adjust the stack pointer for the new arguments...
5728   // These operations are automatically eliminated by the prolog/epilog pass
5729   if (!IsSibCall)
5730     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5731   SDValue CallSeqStart = Chain;
5732 
5733   // Load the return address and frame pointer so it can be move somewhere else
5734   // later.
5735   SDValue LROp, FPOp;
5736   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5737 
5738   // Set up a copy of the stack pointer for use loading and storing any
5739   // arguments that may not fit in the registers available for argument
5740   // passing.
5741   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5742 
5743   // Figure out which arguments are going to go in registers, and which in
5744   // memory.  Also, if this is a vararg function, floating point operations
5745   // must be stored to our stack, and loaded into integer regs as well, if
5746   // any integer regs are available for argument passing.
5747   unsigned ArgOffset = LinkageSize;
5748 
5749   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5750   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5751 
5752   SmallVector<SDValue, 8> MemOpChains;
5753   for (unsigned i = 0; i != NumOps; ++i) {
5754     SDValue Arg = OutVals[i];
5755     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5756     EVT ArgVT = Outs[i].VT;
5757     EVT OrigVT = Outs[i].ArgVT;
5758 
5759     // PtrOff will be used to store the current argument to the stack if a
5760     // register cannot be found for it.
5761     SDValue PtrOff;
5762 
5763     // We re-align the argument offset for each argument, except when using the
5764     // fast calling convention, when we need to make sure we do that only when
5765     // we'll actually use a stack slot.
5766     auto ComputePtrOff = [&]() {
5767       /* Respect alignment of argument on the stack.  */
5768       unsigned Align =
5769         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5770       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5771 
5772       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5773 
5774       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5775     };
5776 
5777     if (CallConv != CallingConv::Fast) {
5778       ComputePtrOff();
5779 
5780       /* Compute GPR index associated with argument offset.  */
5781       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5782       GPR_idx = std::min(GPR_idx, NumGPRs);
5783     }
5784 
5785     // Promote integers to 64-bit values.
5786     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5787       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5788       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5789       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5790     }
5791 
5792     // FIXME memcpy is used way more than necessary.  Correctness first.
5793     // Note: "by value" is code for passing a structure by value, not
5794     // basic types.
5795     if (Flags.isByVal()) {
5796       // Note: Size includes alignment padding, so
5797       //   struct x { short a; char b; }
5798       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5799       // These are the proper values we need for right-justifying the
5800       // aggregate in a parameter register.
5801       unsigned Size = Flags.getByValSize();
5802 
5803       // An empty aggregate parameter takes up no storage and no
5804       // registers.
5805       if (Size == 0)
5806         continue;
5807 
5808       if (CallConv == CallingConv::Fast)
5809         ComputePtrOff();
5810 
5811       // All aggregates smaller than 8 bytes must be passed right-justified.
5812       if (Size==1 || Size==2 || Size==4) {
5813         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5814         if (GPR_idx != NumGPRs) {
5815           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5816                                         MachinePointerInfo(), VT);
5817           MemOpChains.push_back(Load.getValue(1));
5818           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5819 
5820           ArgOffset += PtrByteSize;
5821           continue;
5822         }
5823       }
5824 
5825       if (GPR_idx == NumGPRs && Size < 8) {
5826         SDValue AddPtr = PtrOff;
5827         if (!isLittleEndian) {
5828           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5829                                           PtrOff.getValueType());
5830           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5831         }
5832         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5833                                                           CallSeqStart,
5834                                                           Flags, DAG, dl);
5835         ArgOffset += PtrByteSize;
5836         continue;
5837       }
5838       // Copy entire object into memory.  There are cases where gcc-generated
5839       // code assumes it is there, even if it could be put entirely into
5840       // registers.  (This is not what the doc says.)
5841 
5842       // FIXME: The above statement is likely due to a misunderstanding of the
5843       // documents.  All arguments must be copied into the parameter area BY
5844       // THE CALLEE in the event that the callee takes the address of any
5845       // formal argument.  That has not yet been implemented.  However, it is
5846       // reasonable to use the stack area as a staging area for the register
5847       // load.
5848 
5849       // Skip this for small aggregates, as we will use the same slot for a
5850       // right-justified copy, below.
5851       if (Size >= 8)
5852         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5853                                                           CallSeqStart,
5854                                                           Flags, DAG, dl);
5855 
5856       // When a register is available, pass a small aggregate right-justified.
5857       if (Size < 8 && GPR_idx != NumGPRs) {
5858         // The easiest way to get this right-justified in a register
5859         // is to copy the structure into the rightmost portion of a
5860         // local variable slot, then load the whole slot into the
5861         // register.
5862         // FIXME: The memcpy seems to produce pretty awful code for
5863         // small aggregates, particularly for packed ones.
5864         // FIXME: It would be preferable to use the slot in the
5865         // parameter save area instead of a new local variable.
5866         SDValue AddPtr = PtrOff;
5867         if (!isLittleEndian) {
5868           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5869           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5870         }
5871         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5872                                                           CallSeqStart,
5873                                                           Flags, DAG, dl);
5874 
5875         // Load the slot into the register.
5876         SDValue Load =
5877             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5878         MemOpChains.push_back(Load.getValue(1));
5879         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5880 
5881         // Done with this argument.
5882         ArgOffset += PtrByteSize;
5883         continue;
5884       }
5885 
5886       // For aggregates larger than PtrByteSize, copy the pieces of the
5887       // object that fit into registers from the parameter save area.
5888       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5889         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5890         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5891         if (GPR_idx != NumGPRs) {
5892           SDValue Load =
5893               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5894           MemOpChains.push_back(Load.getValue(1));
5895           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5896           ArgOffset += PtrByteSize;
5897         } else {
5898           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5899           break;
5900         }
5901       }
5902       continue;
5903     }
5904 
5905     switch (Arg.getSimpleValueType().SimpleTy) {
5906     default: llvm_unreachable("Unexpected ValueType for argument!");
5907     case MVT::i1:
5908     case MVT::i32:
5909     case MVT::i64:
5910       if (Flags.isNest()) {
5911         // The 'nest' parameter, if any, is passed in R11.
5912         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
5913         hasNest = true;
5914         break;
5915       }
5916 
5917       // These can be scalar arguments or elements of an integer array type
5918       // passed directly.  Clang may use those instead of "byval" aggregate
5919       // types to avoid forcing arguments to memory unnecessarily.
5920       if (GPR_idx != NumGPRs) {
5921         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
5922       } else {
5923         if (CallConv == CallingConv::Fast)
5924           ComputePtrOff();
5925 
5926         assert(HasParameterArea &&
5927                "Parameter area must exist to pass an argument in memory.");
5928         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5929                          true, isTailCall, false, MemOpChains,
5930                          TailCallArguments, dl);
5931         if (CallConv == CallingConv::Fast)
5932           ArgOffset += PtrByteSize;
5933       }
5934       if (CallConv != CallingConv::Fast)
5935         ArgOffset += PtrByteSize;
5936       break;
5937     case MVT::f32:
5938     case MVT::f64: {
5939       // These can be scalar arguments or elements of a float array type
5940       // passed directly.  The latter are used to implement ELFv2 homogenous
5941       // float aggregates.
5942 
5943       // Named arguments go into FPRs first, and once they overflow, the
5944       // remaining arguments go into GPRs and then the parameter save area.
5945       // Unnamed arguments for vararg functions always go to GPRs and
5946       // then the parameter save area.  For now, put all arguments to vararg
5947       // routines always in both locations (FPR *and* GPR or stack slot).
5948       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5949       bool NeededLoad = false;
5950 
5951       // First load the argument into the next available FPR.
5952       if (FPR_idx != NumFPRs)
5953         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
5954 
5955       // Next, load the argument into GPR or stack slot if needed.
5956       if (!NeedGPROrStack)
5957         ;
5958       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
5959         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
5960         // once we support fp <-> gpr moves.
5961 
5962         // In the non-vararg case, this can only ever happen in the
5963         // presence of f32 array types, since otherwise we never run
5964         // out of FPRs before running out of GPRs.
5965         SDValue ArgVal;
5966 
5967         // Double values are always passed in a single GPR.
5968         if (Arg.getValueType() != MVT::f32) {
5969           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
5970 
5971         // Non-array float values are extended and passed in a GPR.
5972         } else if (!Flags.isInConsecutiveRegs()) {
5973           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5974           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5975 
5976         // If we have an array of floats, we collect every odd element
5977         // together with its predecessor into one GPR.
5978         } else if (ArgOffset % PtrByteSize != 0) {
5979           SDValue Lo, Hi;
5980           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
5981           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5982           if (!isLittleEndian)
5983             std::swap(Lo, Hi);
5984           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5985 
5986         // The final element, if even, goes into the first half of a GPR.
5987         } else if (Flags.isInConsecutiveRegsLast()) {
5988           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5989           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5990           if (!isLittleEndian)
5991             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
5992                                  DAG.getConstant(32, dl, MVT::i32));
5993 
5994         // Non-final even elements are skipped; they will be handled
5995         // together the with subsequent argument on the next go-around.
5996         } else
5997           ArgVal = SDValue();
5998 
5999         if (ArgVal.getNode())
6000           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6001       } else {
6002         if (CallConv == CallingConv::Fast)
6003           ComputePtrOff();
6004 
6005         // Single-precision floating-point values are mapped to the
6006         // second (rightmost) word of the stack doubleword.
6007         if (Arg.getValueType() == MVT::f32 &&
6008             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6009           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6010           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6011         }
6012 
6013         assert(HasParameterArea &&
6014                "Parameter area must exist to pass an argument in memory.");
6015         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6016                          true, isTailCall, false, MemOpChains,
6017                          TailCallArguments, dl);
6018 
6019         NeededLoad = true;
6020       }
6021       // When passing an array of floats, the array occupies consecutive
6022       // space in the argument area; only round up to the next doubleword
6023       // at the end of the array.  Otherwise, each float takes 8 bytes.
6024       if (CallConv != CallingConv::Fast || NeededLoad) {
6025         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6026                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6027         if (Flags.isInConsecutiveRegsLast())
6028           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6029       }
6030       break;
6031     }
6032     case MVT::v4f32:
6033     case MVT::v4i32:
6034     case MVT::v8i16:
6035     case MVT::v16i8:
6036     case MVT::v2f64:
6037     case MVT::v2i64:
6038     case MVT::v1i128:
6039     case MVT::f128:
6040       if (!Subtarget.hasQPX()) {
6041       // These can be scalar arguments or elements of a vector array type
6042       // passed directly.  The latter are used to implement ELFv2 homogenous
6043       // vector aggregates.
6044 
6045       // For a varargs call, named arguments go into VRs or on the stack as
6046       // usual; unnamed arguments always go to the stack or the corresponding
6047       // GPRs when within range.  For now, we always put the value in both
6048       // locations (or even all three).
6049       if (isVarArg) {
6050         assert(HasParameterArea &&
6051                "Parameter area must exist if we have a varargs call.");
6052         // We could elide this store in the case where the object fits
6053         // entirely in R registers.  Maybe later.
6054         SDValue Store =
6055             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6056         MemOpChains.push_back(Store);
6057         if (VR_idx != NumVRs) {
6058           SDValue Load =
6059               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6060           MemOpChains.push_back(Load.getValue(1));
6061           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6062         }
6063         ArgOffset += 16;
6064         for (unsigned i=0; i<16; i+=PtrByteSize) {
6065           if (GPR_idx == NumGPRs)
6066             break;
6067           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6068                                    DAG.getConstant(i, dl, PtrVT));
6069           SDValue Load =
6070               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6071           MemOpChains.push_back(Load.getValue(1));
6072           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6073         }
6074         break;
6075       }
6076 
6077       // Non-varargs Altivec params go into VRs or on the stack.
6078       if (VR_idx != NumVRs) {
6079         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6080       } else {
6081         if (CallConv == CallingConv::Fast)
6082           ComputePtrOff();
6083 
6084         assert(HasParameterArea &&
6085                "Parameter area must exist to pass an argument in memory.");
6086         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6087                          true, isTailCall, true, MemOpChains,
6088                          TailCallArguments, dl);
6089         if (CallConv == CallingConv::Fast)
6090           ArgOffset += 16;
6091       }
6092 
6093       if (CallConv != CallingConv::Fast)
6094         ArgOffset += 16;
6095       break;
6096       } // not QPX
6097 
6098       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6099              "Invalid QPX parameter type");
6100 
6101       /* fall through */
6102     case MVT::v4f64:
6103     case MVT::v4i1: {
6104       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6105       if (isVarArg) {
6106         assert(HasParameterArea &&
6107                "Parameter area must exist if we have a varargs call.");
6108         // We could elide this store in the case where the object fits
6109         // entirely in R registers.  Maybe later.
6110         SDValue Store =
6111             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6112         MemOpChains.push_back(Store);
6113         if (QFPR_idx != NumQFPRs) {
6114           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6115                                      PtrOff, MachinePointerInfo());
6116           MemOpChains.push_back(Load.getValue(1));
6117           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6118         }
6119         ArgOffset += (IsF32 ? 16 : 32);
6120         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6121           if (GPR_idx == NumGPRs)
6122             break;
6123           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6124                                    DAG.getConstant(i, dl, PtrVT));
6125           SDValue Load =
6126               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6127           MemOpChains.push_back(Load.getValue(1));
6128           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6129         }
6130         break;
6131       }
6132 
6133       // Non-varargs QPX params go into registers or on the stack.
6134       if (QFPR_idx != NumQFPRs) {
6135         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6136       } else {
6137         if (CallConv == CallingConv::Fast)
6138           ComputePtrOff();
6139 
6140         assert(HasParameterArea &&
6141                "Parameter area must exist to pass an argument in memory.");
6142         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6143                          true, isTailCall, true, MemOpChains,
6144                          TailCallArguments, dl);
6145         if (CallConv == CallingConv::Fast)
6146           ArgOffset += (IsF32 ? 16 : 32);
6147       }
6148 
6149       if (CallConv != CallingConv::Fast)
6150         ArgOffset += (IsF32 ? 16 : 32);
6151       break;
6152       }
6153     }
6154   }
6155 
6156   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6157          "mismatch in size of parameter area");
6158   (void)NumBytesActuallyUsed;
6159 
6160   if (!MemOpChains.empty())
6161     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6162 
6163   // Check if this is an indirect call (MTCTR/BCTRL).
6164   // See PrepareCall() for more information about calls through function
6165   // pointers in the 64-bit SVR4 ABI.
6166   if (!isTailCall && !isPatchPoint &&
6167       !isFunctionGlobalAddress(Callee) &&
6168       !isa<ExternalSymbolSDNode>(Callee)) {
6169     // Load r2 into a virtual register and store it to the TOC save area.
6170     setUsesTOCBasePtr(DAG);
6171     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6172     // TOC save area offset.
6173     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6174     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6175     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6176     Chain = DAG.getStore(
6177         Val.getValue(1), dl, Val, AddPtr,
6178         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6179     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6180     // This does not mean the MTCTR instruction must use R12; it's easier
6181     // to model this as an extra parameter, so do that.
6182     if (isELFv2ABI && !isPatchPoint)
6183       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6184   }
6185 
6186   // Build a sequence of copy-to-reg nodes chained together with token chain
6187   // and flag operands which copy the outgoing args into the appropriate regs.
6188   SDValue InFlag;
6189   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6190     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6191                              RegsToPass[i].second, InFlag);
6192     InFlag = Chain.getValue(1);
6193   }
6194 
6195   if (isTailCall && !IsSibCall)
6196     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6197                     TailCallArguments);
6198 
6199   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6200                     DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6201                     SPDiff, NumBytes, Ins, InVals, CS);
6202 }
6203 
6204 SDValue PPCTargetLowering::LowerCall_Darwin(
6205     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6206     bool isTailCall, bool isPatchPoint,
6207     const SmallVectorImpl<ISD::OutputArg> &Outs,
6208     const SmallVectorImpl<SDValue> &OutVals,
6209     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6210     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6211     ImmutableCallSite CS) const {
6212   unsigned NumOps = Outs.size();
6213 
6214   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6215   bool isPPC64 = PtrVT == MVT::i64;
6216   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6217 
6218   MachineFunction &MF = DAG.getMachineFunction();
6219 
6220   // Mark this function as potentially containing a function that contains a
6221   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6222   // and restoring the callers stack pointer in this functions epilog. This is
6223   // done because by tail calling the called function might overwrite the value
6224   // in this function's (MF) stack pointer stack slot 0(SP).
6225   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6226       CallConv == CallingConv::Fast)
6227     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6228 
6229   // Count how many bytes are to be pushed on the stack, including the linkage
6230   // area, and parameter passing area.  We start with 24/48 bytes, which is
6231   // prereserved space for [SP][CR][LR][3 x unused].
6232   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6233   unsigned NumBytes = LinkageSize;
6234 
6235   // Add up all the space actually used.
6236   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6237   // they all go in registers, but we must reserve stack space for them for
6238   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6239   // assigned stack space in order, with padding so Altivec parameters are
6240   // 16-byte aligned.
6241   unsigned nAltivecParamsAtEnd = 0;
6242   for (unsigned i = 0; i != NumOps; ++i) {
6243     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6244     EVT ArgVT = Outs[i].VT;
6245     // Varargs Altivec parameters are padded to a 16 byte boundary.
6246     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6247         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6248         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6249       if (!isVarArg && !isPPC64) {
6250         // Non-varargs Altivec parameters go after all the non-Altivec
6251         // parameters; handle those later so we know how much padding we need.
6252         nAltivecParamsAtEnd++;
6253         continue;
6254       }
6255       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6256       NumBytes = ((NumBytes+15)/16)*16;
6257     }
6258     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6259   }
6260 
6261   // Allow for Altivec parameters at the end, if needed.
6262   if (nAltivecParamsAtEnd) {
6263     NumBytes = ((NumBytes+15)/16)*16;
6264     NumBytes += 16*nAltivecParamsAtEnd;
6265   }
6266 
6267   // The prolog code of the callee may store up to 8 GPR argument registers to
6268   // the stack, allowing va_start to index over them in memory if its varargs.
6269   // Because we cannot tell if this is needed on the caller side, we have to
6270   // conservatively assume that it is needed.  As such, make sure we have at
6271   // least enough stack space for the caller to store the 8 GPRs.
6272   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6273 
6274   // Tail call needs the stack to be aligned.
6275   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6276       CallConv == CallingConv::Fast)
6277     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6278 
6279   // Calculate by how many bytes the stack has to be adjusted in case of tail
6280   // call optimization.
6281   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6282 
6283   // To protect arguments on the stack from being clobbered in a tail call,
6284   // force all the loads to happen before doing any other lowering.
6285   if (isTailCall)
6286     Chain = DAG.getStackArgumentTokenFactor(Chain);
6287 
6288   // Adjust the stack pointer for the new arguments...
6289   // These operations are automatically eliminated by the prolog/epilog pass
6290   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6291   SDValue CallSeqStart = Chain;
6292 
6293   // Load the return address and frame pointer so it can be move somewhere else
6294   // later.
6295   SDValue LROp, FPOp;
6296   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6297 
6298   // Set up a copy of the stack pointer for use loading and storing any
6299   // arguments that may not fit in the registers available for argument
6300   // passing.
6301   SDValue StackPtr;
6302   if (isPPC64)
6303     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6304   else
6305     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6306 
6307   // Figure out which arguments are going to go in registers, and which in
6308   // memory.  Also, if this is a vararg function, floating point operations
6309   // must be stored to our stack, and loaded into integer regs as well, if
6310   // any integer regs are available for argument passing.
6311   unsigned ArgOffset = LinkageSize;
6312   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6313 
6314   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6315     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6316     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6317   };
6318   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6319     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6320     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6321   };
6322   static const MCPhysReg VR[] = {
6323     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6324     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6325   };
6326   const unsigned NumGPRs = array_lengthof(GPR_32);
6327   const unsigned NumFPRs = 13;
6328   const unsigned NumVRs  = array_lengthof(VR);
6329 
6330   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6331 
6332   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6333   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6334 
6335   SmallVector<SDValue, 8> MemOpChains;
6336   for (unsigned i = 0; i != NumOps; ++i) {
6337     SDValue Arg = OutVals[i];
6338     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6339 
6340     // PtrOff will be used to store the current argument to the stack if a
6341     // register cannot be found for it.
6342     SDValue PtrOff;
6343 
6344     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6345 
6346     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6347 
6348     // On PPC64, promote integers to 64-bit values.
6349     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6350       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6351       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6352       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6353     }
6354 
6355     // FIXME memcpy is used way more than necessary.  Correctness first.
6356     // Note: "by value" is code for passing a structure by value, not
6357     // basic types.
6358     if (Flags.isByVal()) {
6359       unsigned Size = Flags.getByValSize();
6360       // Very small objects are passed right-justified.  Everything else is
6361       // passed left-justified.
6362       if (Size==1 || Size==2) {
6363         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6364         if (GPR_idx != NumGPRs) {
6365           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6366                                         MachinePointerInfo(), VT);
6367           MemOpChains.push_back(Load.getValue(1));
6368           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6369 
6370           ArgOffset += PtrByteSize;
6371         } else {
6372           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6373                                           PtrOff.getValueType());
6374           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6375           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6376                                                             CallSeqStart,
6377                                                             Flags, DAG, dl);
6378           ArgOffset += PtrByteSize;
6379         }
6380         continue;
6381       }
6382       // Copy entire object into memory.  There are cases where gcc-generated
6383       // code assumes it is there, even if it could be put entirely into
6384       // registers.  (This is not what the doc says.)
6385       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6386                                                         CallSeqStart,
6387                                                         Flags, DAG, dl);
6388 
6389       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6390       // copy the pieces of the object that fit into registers from the
6391       // parameter save area.
6392       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6393         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6394         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6395         if (GPR_idx != NumGPRs) {
6396           SDValue Load =
6397               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6398           MemOpChains.push_back(Load.getValue(1));
6399           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6400           ArgOffset += PtrByteSize;
6401         } else {
6402           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6403           break;
6404         }
6405       }
6406       continue;
6407     }
6408 
6409     switch (Arg.getSimpleValueType().SimpleTy) {
6410     default: llvm_unreachable("Unexpected ValueType for argument!");
6411     case MVT::i1:
6412     case MVT::i32:
6413     case MVT::i64:
6414       if (GPR_idx != NumGPRs) {
6415         if (Arg.getValueType() == MVT::i1)
6416           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6417 
6418         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6419       } else {
6420         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6421                          isPPC64, isTailCall, false, MemOpChains,
6422                          TailCallArguments, dl);
6423       }
6424       ArgOffset += PtrByteSize;
6425       break;
6426     case MVT::f32:
6427     case MVT::f64:
6428       if (FPR_idx != NumFPRs) {
6429         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6430 
6431         if (isVarArg) {
6432           SDValue Store =
6433               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6434           MemOpChains.push_back(Store);
6435 
6436           // Float varargs are always shadowed in available integer registers
6437           if (GPR_idx != NumGPRs) {
6438             SDValue Load =
6439                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6440             MemOpChains.push_back(Load.getValue(1));
6441             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6442           }
6443           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6444             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6445             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6446             SDValue Load =
6447                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6448             MemOpChains.push_back(Load.getValue(1));
6449             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6450           }
6451         } else {
6452           // If we have any FPRs remaining, we may also have GPRs remaining.
6453           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6454           // GPRs.
6455           if (GPR_idx != NumGPRs)
6456             ++GPR_idx;
6457           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6458               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6459             ++GPR_idx;
6460         }
6461       } else
6462         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6463                          isPPC64, isTailCall, false, MemOpChains,
6464                          TailCallArguments, dl);
6465       if (isPPC64)
6466         ArgOffset += 8;
6467       else
6468         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6469       break;
6470     case MVT::v4f32:
6471     case MVT::v4i32:
6472     case MVT::v8i16:
6473     case MVT::v16i8:
6474       if (isVarArg) {
6475         // These go aligned on the stack, or in the corresponding R registers
6476         // when within range.  The Darwin PPC ABI doc claims they also go in
6477         // V registers; in fact gcc does this only for arguments that are
6478         // prototyped, not for those that match the ...  We do it for all
6479         // arguments, seems to work.
6480         while (ArgOffset % 16 !=0) {
6481           ArgOffset += PtrByteSize;
6482           if (GPR_idx != NumGPRs)
6483             GPR_idx++;
6484         }
6485         // We could elide this store in the case where the object fits
6486         // entirely in R registers.  Maybe later.
6487         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6488                              DAG.getConstant(ArgOffset, dl, PtrVT));
6489         SDValue Store =
6490             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6491         MemOpChains.push_back(Store);
6492         if (VR_idx != NumVRs) {
6493           SDValue Load =
6494               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6495           MemOpChains.push_back(Load.getValue(1));
6496           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6497         }
6498         ArgOffset += 16;
6499         for (unsigned i=0; i<16; i+=PtrByteSize) {
6500           if (GPR_idx == NumGPRs)
6501             break;
6502           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6503                                    DAG.getConstant(i, dl, PtrVT));
6504           SDValue Load =
6505               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6506           MemOpChains.push_back(Load.getValue(1));
6507           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6508         }
6509         break;
6510       }
6511 
6512       // Non-varargs Altivec params generally go in registers, but have
6513       // stack space allocated at the end.
6514       if (VR_idx != NumVRs) {
6515         // Doesn't have GPR space allocated.
6516         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6517       } else if (nAltivecParamsAtEnd==0) {
6518         // We are emitting Altivec params in order.
6519         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6520                          isPPC64, isTailCall, true, MemOpChains,
6521                          TailCallArguments, dl);
6522         ArgOffset += 16;
6523       }
6524       break;
6525     }
6526   }
6527   // If all Altivec parameters fit in registers, as they usually do,
6528   // they get stack space following the non-Altivec parameters.  We
6529   // don't track this here because nobody below needs it.
6530   // If there are more Altivec parameters than fit in registers emit
6531   // the stores here.
6532   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6533     unsigned j = 0;
6534     // Offset is aligned; skip 1st 12 params which go in V registers.
6535     ArgOffset = ((ArgOffset+15)/16)*16;
6536     ArgOffset += 12*16;
6537     for (unsigned i = 0; i != NumOps; ++i) {
6538       SDValue Arg = OutVals[i];
6539       EVT ArgType = Outs[i].VT;
6540       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6541           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6542         if (++j > NumVRs) {
6543           SDValue PtrOff;
6544           // We are emitting Altivec params in order.
6545           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6546                            isPPC64, isTailCall, true, MemOpChains,
6547                            TailCallArguments, dl);
6548           ArgOffset += 16;
6549         }
6550       }
6551     }
6552   }
6553 
6554   if (!MemOpChains.empty())
6555     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6556 
6557   // On Darwin, R12 must contain the address of an indirect callee.  This does
6558   // not mean the MTCTR instruction must use R12; it's easier to model this as
6559   // an extra parameter, so do that.
6560   if (!isTailCall &&
6561       !isFunctionGlobalAddress(Callee) &&
6562       !isa<ExternalSymbolSDNode>(Callee) &&
6563       !isBLACompatibleAddress(Callee, DAG))
6564     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6565                                                    PPC::R12), Callee));
6566 
6567   // Build a sequence of copy-to-reg nodes chained together with token chain
6568   // and flag operands which copy the outgoing args into the appropriate regs.
6569   SDValue InFlag;
6570   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6571     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6572                              RegsToPass[i].second, InFlag);
6573     InFlag = Chain.getValue(1);
6574   }
6575 
6576   if (isTailCall)
6577     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6578                     TailCallArguments);
6579 
6580   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6581                     /* unused except on PPC64 ELFv1 */ false, DAG,
6582                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6583                     NumBytes, Ins, InVals, CS);
6584 }
6585 
6586 bool
6587 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
6588                                   MachineFunction &MF, bool isVarArg,
6589                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
6590                                   LLVMContext &Context) const {
6591   SmallVector<CCValAssign, 16> RVLocs;
6592   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6593   return CCInfo.CheckReturn(
6594       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6595                 ? RetCC_PPC_Cold
6596                 : RetCC_PPC);
6597 }
6598 
6599 SDValue
6600 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6601                                bool isVarArg,
6602                                const SmallVectorImpl<ISD::OutputArg> &Outs,
6603                                const SmallVectorImpl<SDValue> &OutVals,
6604                                const SDLoc &dl, SelectionDAG &DAG) const {
6605   SmallVector<CCValAssign, 16> RVLocs;
6606   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6607                  *DAG.getContext());
6608   CCInfo.AnalyzeReturn(Outs,
6609                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6610                            ? RetCC_PPC_Cold
6611                            : RetCC_PPC);
6612 
6613   SDValue Flag;
6614   SmallVector<SDValue, 4> RetOps(1, Chain);
6615 
6616   // Copy the result values into the output registers.
6617   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6618     CCValAssign &VA = RVLocs[i];
6619     assert(VA.isRegLoc() && "Can only return in registers!");
6620 
6621     SDValue Arg = OutVals[i];
6622 
6623     switch (VA.getLocInfo()) {
6624     default: llvm_unreachable("Unknown loc info!");
6625     case CCValAssign::Full: break;
6626     case CCValAssign::AExt:
6627       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
6628       break;
6629     case CCValAssign::ZExt:
6630       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6631       break;
6632     case CCValAssign::SExt:
6633       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6634       break;
6635     }
6636 
6637     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
6638     Flag = Chain.getValue(1);
6639     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6640   }
6641 
6642   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
6643   const MCPhysReg *I =
6644     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
6645   if (I) {
6646     for (; *I; ++I) {
6647 
6648       if (PPC::G8RCRegClass.contains(*I))
6649         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
6650       else if (PPC::F8RCRegClass.contains(*I))
6651         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
6652       else if (PPC::CRRCRegClass.contains(*I))
6653         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
6654       else if (PPC::VRRCRegClass.contains(*I))
6655         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
6656       else
6657         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
6658     }
6659   }
6660 
6661   RetOps[0] = Chain;  // Update chain.
6662 
6663   // Add the flag if we have it.
6664   if (Flag.getNode())
6665     RetOps.push_back(Flag);
6666 
6667   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
6668 }
6669 
6670 SDValue
6671 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
6672                                                 SelectionDAG &DAG) const {
6673   SDLoc dl(Op);
6674 
6675   // Get the correct type for integers.
6676   EVT IntVT = Op.getValueType();
6677 
6678   // Get the inputs.
6679   SDValue Chain = Op.getOperand(0);
6680   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6681   // Build a DYNAREAOFFSET node.
6682   SDValue Ops[2] = {Chain, FPSIdx};
6683   SDVTList VTs = DAG.getVTList(IntVT);
6684   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
6685 }
6686 
6687 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
6688                                              SelectionDAG &DAG) const {
6689   // When we pop the dynamic allocation we need to restore the SP link.
6690   SDLoc dl(Op);
6691 
6692   // Get the correct type for pointers.
6693   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6694 
6695   // Construct the stack pointer operand.
6696   bool isPPC64 = Subtarget.isPPC64();
6697   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6698   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
6699 
6700   // Get the operands for the STACKRESTORE.
6701   SDValue Chain = Op.getOperand(0);
6702   SDValue SaveSP = Op.getOperand(1);
6703 
6704   // Load the old link SP.
6705   SDValue LoadLinkSP =
6706       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
6707 
6708   // Restore the stack pointer.
6709   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
6710 
6711   // Store the old link SP.
6712   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
6713 }
6714 
6715 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
6716   MachineFunction &MF = DAG.getMachineFunction();
6717   bool isPPC64 = Subtarget.isPPC64();
6718   EVT PtrVT = getPointerTy(MF.getDataLayout());
6719 
6720   // Get current frame pointer save index.  The users of this index will be
6721   // primarily DYNALLOC instructions.
6722   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6723   int RASI = FI->getReturnAddrSaveIndex();
6724 
6725   // If the frame pointer save index hasn't been defined yet.
6726   if (!RASI) {
6727     // Find out what the fix offset of the frame pointer save area.
6728     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6729     // Allocate the frame index for frame pointer save area.
6730     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
6731     // Save the result.
6732     FI->setReturnAddrSaveIndex(RASI);
6733   }
6734   return DAG.getFrameIndex(RASI, PtrVT);
6735 }
6736 
6737 SDValue
6738 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
6739   MachineFunction &MF = DAG.getMachineFunction();
6740   bool isPPC64 = Subtarget.isPPC64();
6741   EVT PtrVT = getPointerTy(MF.getDataLayout());
6742 
6743   // Get current frame pointer save index.  The users of this index will be
6744   // primarily DYNALLOC instructions.
6745   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6746   int FPSI = FI->getFramePointerSaveIndex();
6747 
6748   // If the frame pointer save index hasn't been defined yet.
6749   if (!FPSI) {
6750     // Find out what the fix offset of the frame pointer save area.
6751     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6752     // Allocate the frame index for frame pointer save area.
6753     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
6754     // Save the result.
6755     FI->setFramePointerSaveIndex(FPSI);
6756   }
6757   return DAG.getFrameIndex(FPSI, PtrVT);
6758 }
6759 
6760 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
6761                                                    SelectionDAG &DAG) const {
6762   // Get the inputs.
6763   SDValue Chain = Op.getOperand(0);
6764   SDValue Size  = Op.getOperand(1);
6765   SDLoc dl(Op);
6766 
6767   // Get the correct type for pointers.
6768   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6769   // Negate the size.
6770   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
6771                                 DAG.getConstant(0, dl, PtrVT), Size);
6772   // Construct a node for the frame pointer save index.
6773   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6774   // Build a DYNALLOC node.
6775   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6776   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
6777   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
6778 }
6779 
6780 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
6781                                                      SelectionDAG &DAG) const {
6782   MachineFunction &MF = DAG.getMachineFunction();
6783 
6784   bool isPPC64 = Subtarget.isPPC64();
6785   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6786 
6787   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
6788   return DAG.getFrameIndex(FI, PtrVT);
6789 }
6790 
6791 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
6792                                                SelectionDAG &DAG) const {
6793   SDLoc DL(Op);
6794   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
6795                      DAG.getVTList(MVT::i32, MVT::Other),
6796                      Op.getOperand(0), Op.getOperand(1));
6797 }
6798 
6799 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
6800                                                 SelectionDAG &DAG) const {
6801   SDLoc DL(Op);
6802   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
6803                      Op.getOperand(0), Op.getOperand(1));
6804 }
6805 
6806 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6807   if (Op.getValueType().isVector())
6808     return LowerVectorLoad(Op, DAG);
6809 
6810   assert(Op.getValueType() == MVT::i1 &&
6811          "Custom lowering only for i1 loads");
6812 
6813   // First, load 8 bits into 32 bits, then truncate to 1 bit.
6814 
6815   SDLoc dl(Op);
6816   LoadSDNode *LD = cast<LoadSDNode>(Op);
6817 
6818   SDValue Chain = LD->getChain();
6819   SDValue BasePtr = LD->getBasePtr();
6820   MachineMemOperand *MMO = LD->getMemOperand();
6821 
6822   SDValue NewLD =
6823       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
6824                      BasePtr, MVT::i8, MMO);
6825   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
6826 
6827   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
6828   return DAG.getMergeValues(Ops, dl);
6829 }
6830 
6831 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6832   if (Op.getOperand(1).getValueType().isVector())
6833     return LowerVectorStore(Op, DAG);
6834 
6835   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
6836          "Custom lowering only for i1 stores");
6837 
6838   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
6839 
6840   SDLoc dl(Op);
6841   StoreSDNode *ST = cast<StoreSDNode>(Op);
6842 
6843   SDValue Chain = ST->getChain();
6844   SDValue BasePtr = ST->getBasePtr();
6845   SDValue Value = ST->getValue();
6846   MachineMemOperand *MMO = ST->getMemOperand();
6847 
6848   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
6849                       Value);
6850   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
6851 }
6852 
6853 // FIXME: Remove this once the ANDI glue bug is fixed:
6854 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
6855   assert(Op.getValueType() == MVT::i1 &&
6856          "Custom lowering only for i1 results");
6857 
6858   SDLoc DL(Op);
6859   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
6860                      Op.getOperand(0));
6861 }
6862 
6863 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
6864 /// possible.
6865 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
6866   // Not FP? Not a fsel.
6867   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
6868       !Op.getOperand(2).getValueType().isFloatingPoint())
6869     return Op;
6870 
6871   // We might be able to do better than this under some circumstances, but in
6872   // general, fsel-based lowering of select is a finite-math-only optimization.
6873   // For more information, see section F.3 of the 2.06 ISA specification.
6874   if (!DAG.getTarget().Options.NoInfsFPMath ||
6875       !DAG.getTarget().Options.NoNaNsFPMath)
6876     return Op;
6877   // TODO: Propagate flags from the select rather than global settings.
6878   SDNodeFlags Flags;
6879   Flags.setNoInfs(true);
6880   Flags.setNoNaNs(true);
6881 
6882   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
6883 
6884   EVT ResVT = Op.getValueType();
6885   EVT CmpVT = Op.getOperand(0).getValueType();
6886   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
6887   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
6888   SDLoc dl(Op);
6889 
6890   // If the RHS of the comparison is a 0.0, we don't need to do the
6891   // subtraction at all.
6892   SDValue Sel1;
6893   if (isFloatingPointZero(RHS))
6894     switch (CC) {
6895     default: break;       // SETUO etc aren't handled by fsel.
6896     case ISD::SETNE:
6897       std::swap(TV, FV);
6898       LLVM_FALLTHROUGH;
6899     case ISD::SETEQ:
6900       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6901         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6902       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6903       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6904         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6905       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6906                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
6907     case ISD::SETULT:
6908     case ISD::SETLT:
6909       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6910       LLVM_FALLTHROUGH;
6911     case ISD::SETOGE:
6912     case ISD::SETGE:
6913       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6914         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6915       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6916     case ISD::SETUGT:
6917     case ISD::SETGT:
6918       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6919       LLVM_FALLTHROUGH;
6920     case ISD::SETOLE:
6921     case ISD::SETLE:
6922       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6923         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6924       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6925                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
6926     }
6927 
6928   SDValue Cmp;
6929   switch (CC) {
6930   default: break;       // SETUO etc aren't handled by fsel.
6931   case ISD::SETNE:
6932     std::swap(TV, FV);
6933     LLVM_FALLTHROUGH;
6934   case ISD::SETEQ:
6935     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6936     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6937       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6938     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6939     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6940       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6941     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6942                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
6943   case ISD::SETULT:
6944   case ISD::SETLT:
6945     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6946     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6947       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6948     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6949   case ISD::SETOGE:
6950   case ISD::SETGE:
6951     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6952     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6953       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6954     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6955   case ISD::SETUGT:
6956   case ISD::SETGT:
6957     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6958     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6959       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6960     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6961   case ISD::SETOLE:
6962   case ISD::SETLE:
6963     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6964     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6965       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6966     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6967   }
6968   return Op;
6969 }
6970 
6971 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
6972                                                SelectionDAG &DAG,
6973                                                const SDLoc &dl) const {
6974   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6975   SDValue Src = Op.getOperand(0);
6976   if (Src.getValueType() == MVT::f32)
6977     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6978 
6979   SDValue Tmp;
6980   switch (Op.getSimpleValueType().SimpleTy) {
6981   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6982   case MVT::i32:
6983     Tmp = DAG.getNode(
6984         Op.getOpcode() == ISD::FP_TO_SINT
6985             ? PPCISD::FCTIWZ
6986             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6987         dl, MVT::f64, Src);
6988     break;
6989   case MVT::i64:
6990     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6991            "i64 FP_TO_UINT is supported only with FPCVT");
6992     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6993                                                         PPCISD::FCTIDUZ,
6994                       dl, MVT::f64, Src);
6995     break;
6996   }
6997 
6998   // Convert the FP value to an int value through memory.
6999   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7000     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7001   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7002   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7003   MachinePointerInfo MPI =
7004       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7005 
7006   // Emit a store to the stack slot.
7007   SDValue Chain;
7008   if (i32Stack) {
7009     MachineFunction &MF = DAG.getMachineFunction();
7010     MachineMemOperand *MMO =
7011       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
7012     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7013     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7014               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7015   } else
7016     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
7017 
7018   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7019   // add in a bias on big endian.
7020   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7021     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7022                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7023     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7024   }
7025 
7026   RLI.Chain = Chain;
7027   RLI.Ptr = FIPtr;
7028   RLI.MPI = MPI;
7029 }
7030 
7031 /// Custom lowers floating point to integer conversions to use
7032 /// the direct move instructions available in ISA 2.07 to avoid the
7033 /// need for load/store combinations.
7034 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7035                                                     SelectionDAG &DAG,
7036                                                     const SDLoc &dl) const {
7037   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7038   SDValue Src = Op.getOperand(0);
7039 
7040   if (Src.getValueType() == MVT::f32)
7041     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7042 
7043   SDValue Tmp;
7044   switch (Op.getSimpleValueType().SimpleTy) {
7045   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7046   case MVT::i32:
7047     Tmp = DAG.getNode(
7048         Op.getOpcode() == ISD::FP_TO_SINT
7049             ? PPCISD::FCTIWZ
7050             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7051         dl, MVT::f64, Src);
7052     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7053     break;
7054   case MVT::i64:
7055     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7056            "i64 FP_TO_UINT is supported only with FPCVT");
7057     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7058                                                         PPCISD::FCTIDUZ,
7059                       dl, MVT::f64, Src);
7060     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7061     break;
7062   }
7063   return Tmp;
7064 }
7065 
7066 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7067                                           const SDLoc &dl) const {
7068 
7069   // FP to INT conversions are legal for f128.
7070   if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7071     return Op;
7072 
7073   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7074   // PPC (the libcall is not available).
7075   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7076     if (Op.getValueType() == MVT::i32) {
7077       if (Op.getOpcode() == ISD::FP_TO_SINT) {
7078         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7079                                  MVT::f64, Op.getOperand(0),
7080                                  DAG.getIntPtrConstant(0, dl));
7081         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7082                                  MVT::f64, Op.getOperand(0),
7083                                  DAG.getIntPtrConstant(1, dl));
7084 
7085         // Add the two halves of the long double in round-to-zero mode.
7086         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7087 
7088         // Now use a smaller FP_TO_SINT.
7089         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7090       }
7091       if (Op.getOpcode() == ISD::FP_TO_UINT) {
7092         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7093         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7094         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
7095         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7096         // FIXME: generated code sucks.
7097         // TODO: Are there fast-math-flags to propagate to this FSUB?
7098         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
7099                                    Op.getOperand(0), Tmp);
7100         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7101         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
7102                            DAG.getConstant(0x80000000, dl, MVT::i32));
7103         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
7104                                     Op.getOperand(0));
7105         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
7106                                ISD::SETGE);
7107       }
7108     }
7109 
7110     return SDValue();
7111   }
7112 
7113   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7114     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7115 
7116   ReuseLoadInfo RLI;
7117   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7118 
7119   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7120                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7121 }
7122 
7123 // We're trying to insert a regular store, S, and then a load, L. If the
7124 // incoming value, O, is a load, we might just be able to have our load use the
7125 // address used by O. However, we don't know if anything else will store to
7126 // that address before we can load from it. To prevent this situation, we need
7127 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7128 // the same chain operand as O, we create a token factor from the chain results
7129 // of O and L, and we replace all uses of O's chain result with that token
7130 // factor (see spliceIntoChain below for this last part).
7131 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7132                                             ReuseLoadInfo &RLI,
7133                                             SelectionDAG &DAG,
7134                                             ISD::LoadExtType ET) const {
7135   SDLoc dl(Op);
7136   if (ET == ISD::NON_EXTLOAD &&
7137       (Op.getOpcode() == ISD::FP_TO_UINT ||
7138        Op.getOpcode() == ISD::FP_TO_SINT) &&
7139       isOperationLegalOrCustom(Op.getOpcode(),
7140                                Op.getOperand(0).getValueType())) {
7141 
7142     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7143     return true;
7144   }
7145 
7146   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7147   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7148       LD->isNonTemporal())
7149     return false;
7150   if (LD->getMemoryVT() != MemVT)
7151     return false;
7152 
7153   RLI.Ptr = LD->getBasePtr();
7154   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7155     assert(LD->getAddressingMode() == ISD::PRE_INC &&
7156            "Non-pre-inc AM on PPC?");
7157     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7158                           LD->getOffset());
7159   }
7160 
7161   RLI.Chain = LD->getChain();
7162   RLI.MPI = LD->getPointerInfo();
7163   RLI.IsDereferenceable = LD->isDereferenceable();
7164   RLI.IsInvariant = LD->isInvariant();
7165   RLI.Alignment = LD->getAlignment();
7166   RLI.AAInfo = LD->getAAInfo();
7167   RLI.Ranges = LD->getRanges();
7168 
7169   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7170   return true;
7171 }
7172 
7173 // Given the head of the old chain, ResChain, insert a token factor containing
7174 // it and NewResChain, and make users of ResChain now be users of that token
7175 // factor.
7176 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7177 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7178                                         SDValue NewResChain,
7179                                         SelectionDAG &DAG) const {
7180   if (!ResChain)
7181     return;
7182 
7183   SDLoc dl(NewResChain);
7184 
7185   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7186                            NewResChain, DAG.getUNDEF(MVT::Other));
7187   assert(TF.getNode() != NewResChain.getNode() &&
7188          "A new TF really is required here");
7189 
7190   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7191   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7192 }
7193 
7194 /// Analyze profitability of direct move
7195 /// prefer float load to int load plus direct move
7196 /// when there is no integer use of int load
7197 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7198   SDNode *Origin = Op.getOperand(0).getNode();
7199   if (Origin->getOpcode() != ISD::LOAD)
7200     return true;
7201 
7202   // If there is no LXSIBZX/LXSIHZX, like Power8,
7203   // prefer direct move if the memory size is 1 or 2 bytes.
7204   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7205   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7206     return true;
7207 
7208   for (SDNode::use_iterator UI = Origin->use_begin(),
7209                             UE = Origin->use_end();
7210        UI != UE; ++UI) {
7211 
7212     // Only look at the users of the loaded value.
7213     if (UI.getUse().get().getResNo() != 0)
7214       continue;
7215 
7216     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7217         UI->getOpcode() != ISD::UINT_TO_FP)
7218       return true;
7219   }
7220 
7221   return false;
7222 }
7223 
7224 /// Custom lowers integer to floating point conversions to use
7225 /// the direct move instructions available in ISA 2.07 to avoid the
7226 /// need for load/store combinations.
7227 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7228                                                     SelectionDAG &DAG,
7229                                                     const SDLoc &dl) const {
7230   assert((Op.getValueType() == MVT::f32 ||
7231           Op.getValueType() == MVT::f64) &&
7232          "Invalid floating point type as target of conversion");
7233   assert(Subtarget.hasFPCVT() &&
7234          "Int to FP conversions with direct moves require FPCVT");
7235   SDValue FP;
7236   SDValue Src = Op.getOperand(0);
7237   bool SinglePrec = Op.getValueType() == MVT::f32;
7238   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7239   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7240   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7241                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7242 
7243   if (WordInt) {
7244     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7245                      dl, MVT::f64, Src);
7246     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7247   }
7248   else {
7249     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7250     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7251   }
7252 
7253   return FP;
7254 }
7255 
7256 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7257                                           SelectionDAG &DAG) const {
7258   SDLoc dl(Op);
7259 
7260   // Conversions to f128 are legal.
7261   if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
7262     return Op;
7263 
7264   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
7265     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
7266       return SDValue();
7267 
7268     SDValue Value = Op.getOperand(0);
7269     // The values are now known to be -1 (false) or 1 (true). To convert this
7270     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
7271     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
7272     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
7273 
7274     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
7275 
7276     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7277 
7278     if (Op.getValueType() != MVT::v4f64)
7279       Value = DAG.getNode(ISD::FP_ROUND, dl,
7280                           Op.getValueType(), Value,
7281                           DAG.getIntPtrConstant(1, dl));
7282     return Value;
7283   }
7284 
7285   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7286   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7287     return SDValue();
7288 
7289   if (Op.getOperand(0).getValueType() == MVT::i1)
7290     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
7291                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
7292                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
7293 
7294   // If we have direct moves, we can do all the conversion, skip the store/load
7295   // however, without FPCVT we can't do most conversions.
7296   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
7297       Subtarget.isPPC64() && Subtarget.hasFPCVT())
7298     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
7299 
7300   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
7301          "UINT_TO_FP is supported only with FPCVT");
7302 
7303   // If we have FCFIDS, then use it when converting to single-precision.
7304   // Otherwise, convert to double-precision and then round.
7305   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7306                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
7307                                                             : PPCISD::FCFIDS)
7308                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
7309                                                             : PPCISD::FCFID);
7310   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7311                   ? MVT::f32
7312                   : MVT::f64;
7313 
7314   if (Op.getOperand(0).getValueType() == MVT::i64) {
7315     SDValue SINT = Op.getOperand(0);
7316     // When converting to single-precision, we actually need to convert
7317     // to double-precision first and then round to single-precision.
7318     // To avoid double-rounding effects during that operation, we have
7319     // to prepare the input operand.  Bits that might be truncated when
7320     // converting to double-precision are replaced by a bit that won't
7321     // be lost at this stage, but is below the single-precision rounding
7322     // position.
7323     //
7324     // However, if -enable-unsafe-fp-math is in effect, accept double
7325     // rounding to avoid the extra overhead.
7326     if (Op.getValueType() == MVT::f32 &&
7327         !Subtarget.hasFPCVT() &&
7328         !DAG.getTarget().Options.UnsafeFPMath) {
7329 
7330       // Twiddle input to make sure the low 11 bits are zero.  (If this
7331       // is the case, we are guaranteed the value will fit into the 53 bit
7332       // mantissa of an IEEE double-precision value without rounding.)
7333       // If any of those low 11 bits were not zero originally, make sure
7334       // bit 12 (value 2048) is set instead, so that the final rounding
7335       // to single-precision gets the correct result.
7336       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7337                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
7338       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
7339                           Round, DAG.getConstant(2047, dl, MVT::i64));
7340       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
7341       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7342                           Round, DAG.getConstant(-2048, dl, MVT::i64));
7343 
7344       // However, we cannot use that value unconditionally: if the magnitude
7345       // of the input value is small, the bit-twiddling we did above might
7346       // end up visibly changing the output.  Fortunately, in that case, we
7347       // don't need to twiddle bits since the original input will convert
7348       // exactly to double-precision floating-point already.  Therefore,
7349       // construct a conditional to use the original value if the top 11
7350       // bits are all sign-bit copies, and use the rounded value computed
7351       // above otherwise.
7352       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
7353                                  SINT, DAG.getConstant(53, dl, MVT::i32));
7354       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
7355                          Cond, DAG.getConstant(1, dl, MVT::i64));
7356       Cond = DAG.getSetCC(dl, MVT::i32,
7357                           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
7358 
7359       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
7360     }
7361 
7362     ReuseLoadInfo RLI;
7363     SDValue Bits;
7364 
7365     MachineFunction &MF = DAG.getMachineFunction();
7366     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
7367       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7368                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7369       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7370     } else if (Subtarget.hasLFIWAX() &&
7371                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
7372       MachineMemOperand *MMO =
7373         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7374                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7375       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7376       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
7377                                      DAG.getVTList(MVT::f64, MVT::Other),
7378                                      Ops, MVT::i32, MMO);
7379       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7380     } else if (Subtarget.hasFPCVT() &&
7381                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
7382       MachineMemOperand *MMO =
7383         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7384                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7385       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7386       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
7387                                      DAG.getVTList(MVT::f64, MVT::Other),
7388                                      Ops, MVT::i32, MMO);
7389       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7390     } else if (((Subtarget.hasLFIWAX() &&
7391                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
7392                 (Subtarget.hasFPCVT() &&
7393                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
7394                SINT.getOperand(0).getValueType() == MVT::i32) {
7395       MachineFrameInfo &MFI = MF.getFrameInfo();
7396       EVT PtrVT = getPointerTy(DAG.getDataLayout());
7397 
7398       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7399       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7400 
7401       SDValue Store =
7402           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
7403                        MachinePointerInfo::getFixedStack(
7404                            DAG.getMachineFunction(), FrameIdx));
7405 
7406       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7407              "Expected an i32 store");
7408 
7409       RLI.Ptr = FIdx;
7410       RLI.Chain = Store;
7411       RLI.MPI =
7412           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7413       RLI.Alignment = 4;
7414 
7415       MachineMemOperand *MMO =
7416         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7417                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7418       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7419       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
7420                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
7421                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
7422                                      Ops, MVT::i32, MMO);
7423     } else
7424       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
7425 
7426     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
7427 
7428     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7429       FP = DAG.getNode(ISD::FP_ROUND, dl,
7430                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
7431     return FP;
7432   }
7433 
7434   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
7435          "Unhandled INT_TO_FP type in custom expander!");
7436   // Since we only generate this in 64-bit mode, we can take advantage of
7437   // 64-bit registers.  In particular, sign extend the input value into the
7438   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
7439   // then lfd it and fcfid it.
7440   MachineFunction &MF = DAG.getMachineFunction();
7441   MachineFrameInfo &MFI = MF.getFrameInfo();
7442   EVT PtrVT = getPointerTy(MF.getDataLayout());
7443 
7444   SDValue Ld;
7445   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
7446     ReuseLoadInfo RLI;
7447     bool ReusingLoad;
7448     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
7449                                             DAG))) {
7450       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7451       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7452 
7453       SDValue Store =
7454           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
7455                        MachinePointerInfo::getFixedStack(
7456                            DAG.getMachineFunction(), FrameIdx));
7457 
7458       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7459              "Expected an i32 store");
7460 
7461       RLI.Ptr = FIdx;
7462       RLI.Chain = Store;
7463       RLI.MPI =
7464           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7465       RLI.Alignment = 4;
7466     }
7467 
7468     MachineMemOperand *MMO =
7469       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7470                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7471     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7472     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
7473                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
7474                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
7475                                  Ops, MVT::i32, MMO);
7476     if (ReusingLoad)
7477       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
7478   } else {
7479     assert(Subtarget.isPPC64() &&
7480            "i32->FP without LFIWAX supported only on PPC64");
7481 
7482     int FrameIdx = MFI.CreateStackObject(8, 8, false);
7483     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7484 
7485     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
7486                                 Op.getOperand(0));
7487 
7488     // STD the extended value into the stack slot.
7489     SDValue Store = DAG.getStore(
7490         DAG.getEntryNode(), dl, Ext64, FIdx,
7491         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7492 
7493     // Load the value as a double.
7494     Ld = DAG.getLoad(
7495         MVT::f64, dl, Store, FIdx,
7496         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7497   }
7498 
7499   // FCFID it and return it.
7500   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
7501   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7502     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
7503                      DAG.getIntPtrConstant(0, dl));
7504   return FP;
7505 }
7506 
7507 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
7508                                             SelectionDAG &DAG) const {
7509   SDLoc dl(Op);
7510   /*
7511    The rounding mode is in bits 30:31 of FPSR, and has the following
7512    settings:
7513      00 Round to nearest
7514      01 Round to 0
7515      10 Round to +inf
7516      11 Round to -inf
7517 
7518   FLT_ROUNDS, on the other hand, expects the following:
7519     -1 Undefined
7520      0 Round to 0
7521      1 Round to nearest
7522      2 Round to +inf
7523      3 Round to -inf
7524 
7525   To perform the conversion, we do:
7526     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
7527   */
7528 
7529   MachineFunction &MF = DAG.getMachineFunction();
7530   EVT VT = Op.getValueType();
7531   EVT PtrVT = getPointerTy(MF.getDataLayout());
7532 
7533   // Save FP Control Word to register
7534   EVT NodeTys[] = {
7535     MVT::f64,    // return register
7536     MVT::Glue    // unused in this context
7537   };
7538   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
7539 
7540   // Save FP register to stack slot
7541   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
7542   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
7543   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
7544                                MachinePointerInfo());
7545 
7546   // Load FP Control Word from low 32 bits of stack slot.
7547   SDValue Four = DAG.getConstant(4, dl, PtrVT);
7548   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
7549   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
7550 
7551   // Transform as necessary
7552   SDValue CWD1 =
7553     DAG.getNode(ISD::AND, dl, MVT::i32,
7554                 CWD, DAG.getConstant(3, dl, MVT::i32));
7555   SDValue CWD2 =
7556     DAG.getNode(ISD::SRL, dl, MVT::i32,
7557                 DAG.getNode(ISD::AND, dl, MVT::i32,
7558                             DAG.getNode(ISD::XOR, dl, MVT::i32,
7559                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
7560                             DAG.getConstant(3, dl, MVT::i32)),
7561                 DAG.getConstant(1, dl, MVT::i32));
7562 
7563   SDValue RetVal =
7564     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
7565 
7566   return DAG.getNode((VT.getSizeInBits() < 16 ?
7567                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
7568 }
7569 
7570 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7571   EVT VT = Op.getValueType();
7572   unsigned BitWidth = VT.getSizeInBits();
7573   SDLoc dl(Op);
7574   assert(Op.getNumOperands() == 3 &&
7575          VT == Op.getOperand(1).getValueType() &&
7576          "Unexpected SHL!");
7577 
7578   // Expand into a bunch of logical ops.  Note that these ops
7579   // depend on the PPC behavior for oversized shift amounts.
7580   SDValue Lo = Op.getOperand(0);
7581   SDValue Hi = Op.getOperand(1);
7582   SDValue Amt = Op.getOperand(2);
7583   EVT AmtVT = Amt.getValueType();
7584 
7585   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7586                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7587   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
7588   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
7589   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
7590   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7591                              DAG.getConstant(-BitWidth, dl, AmtVT));
7592   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
7593   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7594   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
7595   SDValue OutOps[] = { OutLo, OutHi };
7596   return DAG.getMergeValues(OutOps, dl);
7597 }
7598 
7599 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7600   EVT VT = Op.getValueType();
7601   SDLoc dl(Op);
7602   unsigned BitWidth = VT.getSizeInBits();
7603   assert(Op.getNumOperands() == 3 &&
7604          VT == Op.getOperand(1).getValueType() &&
7605          "Unexpected SRL!");
7606 
7607   // Expand into a bunch of logical ops.  Note that these ops
7608   // depend on the PPC behavior for oversized shift amounts.
7609   SDValue Lo = Op.getOperand(0);
7610   SDValue Hi = Op.getOperand(1);
7611   SDValue Amt = Op.getOperand(2);
7612   EVT AmtVT = Amt.getValueType();
7613 
7614   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7615                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7616   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7617   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7618   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7619   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7620                              DAG.getConstant(-BitWidth, dl, AmtVT));
7621   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
7622   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7623   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
7624   SDValue OutOps[] = { OutLo, OutHi };
7625   return DAG.getMergeValues(OutOps, dl);
7626 }
7627 
7628 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
7629   SDLoc dl(Op);
7630   EVT VT = Op.getValueType();
7631   unsigned BitWidth = VT.getSizeInBits();
7632   assert(Op.getNumOperands() == 3 &&
7633          VT == Op.getOperand(1).getValueType() &&
7634          "Unexpected SRA!");
7635 
7636   // Expand into a bunch of logical ops, followed by a select_cc.
7637   SDValue Lo = Op.getOperand(0);
7638   SDValue Hi = Op.getOperand(1);
7639   SDValue Amt = Op.getOperand(2);
7640   EVT AmtVT = Amt.getValueType();
7641 
7642   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7643                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7644   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7645   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7646   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7647   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7648                              DAG.getConstant(-BitWidth, dl, AmtVT));
7649   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
7650   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
7651   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
7652                                   Tmp4, Tmp6, ISD::SETLE);
7653   SDValue OutOps[] = { OutLo, OutHi };
7654   return DAG.getMergeValues(OutOps, dl);
7655 }
7656 
7657 //===----------------------------------------------------------------------===//
7658 // Vector related lowering.
7659 //
7660 
7661 /// BuildSplatI - Build a canonical splati of Val with an element size of
7662 /// SplatSize.  Cast the result to VT.
7663 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
7664                            SelectionDAG &DAG, const SDLoc &dl) {
7665   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
7666 
7667   static const MVT VTys[] = { // canonical VT to use for each size.
7668     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
7669   };
7670 
7671   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
7672 
7673   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
7674   if (Val == -1)
7675     SplatSize = 1;
7676 
7677   EVT CanonicalVT = VTys[SplatSize-1];
7678 
7679   // Build a canonical splat for this value.
7680   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
7681 }
7682 
7683 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
7684 /// specified intrinsic ID.
7685 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
7686                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
7687   if (DestVT == MVT::Other) DestVT = Op.getValueType();
7688   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7689                      DAG.getConstant(IID, dl, MVT::i32), Op);
7690 }
7691 
7692 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
7693 /// specified intrinsic ID.
7694 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
7695                                 SelectionDAG &DAG, const SDLoc &dl,
7696                                 EVT DestVT = MVT::Other) {
7697   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
7698   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7699                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
7700 }
7701 
7702 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
7703 /// specified intrinsic ID.
7704 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
7705                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
7706                                 EVT DestVT = MVT::Other) {
7707   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
7708   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7709                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
7710 }
7711 
7712 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
7713 /// amount.  The result has the specified value type.
7714 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
7715                            SelectionDAG &DAG, const SDLoc &dl) {
7716   // Force LHS/RHS to be the right type.
7717   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
7718   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
7719 
7720   int Ops[16];
7721   for (unsigned i = 0; i != 16; ++i)
7722     Ops[i] = i + Amt;
7723   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
7724   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7725 }
7726 
7727 /// Do we have an efficient pattern in a .td file for this node?
7728 ///
7729 /// \param V - pointer to the BuildVectorSDNode being matched
7730 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
7731 ///
7732 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
7733 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
7734 /// the opposite is true (expansion is beneficial) are:
7735 /// - The node builds a vector out of integers that are not 32 or 64-bits
7736 /// - The node builds a vector out of constants
7737 /// - The node is a "load-and-splat"
7738 /// In all other cases, we will choose to keep the BUILD_VECTOR.
7739 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
7740                                             bool HasDirectMove,
7741                                             bool HasP8Vector) {
7742   EVT VecVT = V->getValueType(0);
7743   bool RightType = VecVT == MVT::v2f64 ||
7744     (HasP8Vector && VecVT == MVT::v4f32) ||
7745     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
7746   if (!RightType)
7747     return false;
7748 
7749   bool IsSplat = true;
7750   bool IsLoad = false;
7751   SDValue Op0 = V->getOperand(0);
7752 
7753   // This function is called in a block that confirms the node is not a constant
7754   // splat. So a constant BUILD_VECTOR here means the vector is built out of
7755   // different constants.
7756   if (V->isConstant())
7757     return false;
7758   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
7759     if (V->getOperand(i).isUndef())
7760       return false;
7761     // We want to expand nodes that represent load-and-splat even if the
7762     // loaded value is a floating point truncation or conversion to int.
7763     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
7764         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
7765          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7766         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
7767          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7768         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
7769          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
7770       IsLoad = true;
7771     // If the operands are different or the input is not a load and has more
7772     // uses than just this BV node, then it isn't a splat.
7773     if (V->getOperand(i) != Op0 ||
7774         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
7775       IsSplat = false;
7776   }
7777   return !(IsSplat && IsLoad);
7778 }
7779 
7780 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
7781 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
7782 
7783   SDLoc dl(Op);
7784   SDValue Op0 = Op->getOperand(0);
7785 
7786   if (!EnableQuadPrecision ||
7787       (Op.getValueType() != MVT::f128 ) ||
7788       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
7789       (Op0.getOperand(0).getValueType() !=  MVT::i64) ||
7790       (Op0.getOperand(1).getValueType() != MVT::i64))
7791     return SDValue();
7792 
7793   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
7794                      Op0.getOperand(1));
7795 }
7796 
7797 // If this is a case we can't handle, return null and let the default
7798 // expansion code take care of it.  If we CAN select this case, and if it
7799 // selects to a single instruction, return Op.  Otherwise, if we can codegen
7800 // this case more efficiently than a constant pool load, lower it to the
7801 // sequence of ops that should be used.
7802 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
7803                                              SelectionDAG &DAG) const {
7804   SDLoc dl(Op);
7805   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7806   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7807 
7808   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
7809     // We first build an i32 vector, load it into a QPX register,
7810     // then convert it to a floating-point vector and compare it
7811     // to a zero vector to get the boolean result.
7812     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7813     int FrameIdx = MFI.CreateStackObject(16, 16, false);
7814     MachinePointerInfo PtrInfo =
7815         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7816     EVT PtrVT = getPointerTy(DAG.getDataLayout());
7817     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7818 
7819     assert(BVN->getNumOperands() == 4 &&
7820       "BUILD_VECTOR for v4i1 does not have 4 operands");
7821 
7822     bool IsConst = true;
7823     for (unsigned i = 0; i < 4; ++i) {
7824       if (BVN->getOperand(i).isUndef()) continue;
7825       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
7826         IsConst = false;
7827         break;
7828       }
7829     }
7830 
7831     if (IsConst) {
7832       Constant *One =
7833         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
7834       Constant *NegOne =
7835         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
7836 
7837       Constant *CV[4];
7838       for (unsigned i = 0; i < 4; ++i) {
7839         if (BVN->getOperand(i).isUndef())
7840           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
7841         else if (isNullConstant(BVN->getOperand(i)))
7842           CV[i] = NegOne;
7843         else
7844           CV[i] = One;
7845       }
7846 
7847       Constant *CP = ConstantVector::get(CV);
7848       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
7849                                           16 /* alignment */);
7850 
7851       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
7852       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
7853       return DAG.getMemIntrinsicNode(
7854           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
7855           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
7856     }
7857 
7858     SmallVector<SDValue, 4> Stores;
7859     for (unsigned i = 0; i < 4; ++i) {
7860       if (BVN->getOperand(i).isUndef()) continue;
7861 
7862       unsigned Offset = 4*i;
7863       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
7864       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
7865 
7866       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
7867       if (StoreSize > 4) {
7868         Stores.push_back(
7869             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
7870                               PtrInfo.getWithOffset(Offset), MVT::i32));
7871       } else {
7872         SDValue StoreValue = BVN->getOperand(i);
7873         if (StoreSize < 4)
7874           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
7875 
7876         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
7877                                       PtrInfo.getWithOffset(Offset)));
7878       }
7879     }
7880 
7881     SDValue StoreChain;
7882     if (!Stores.empty())
7883       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7884     else
7885       StoreChain = DAG.getEntryNode();
7886 
7887     // Now load from v4i32 into the QPX register; this will extend it to
7888     // v4i64 but not yet convert it to a floating point. Nevertheless, this
7889     // is typed as v4f64 because the QPX register integer states are not
7890     // explicitly represented.
7891 
7892     SDValue Ops[] = {StoreChain,
7893                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
7894                      FIdx};
7895     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
7896 
7897     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
7898       dl, VTs, Ops, MVT::v4i32, PtrInfo);
7899     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
7900       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
7901       LoadedVect);
7902 
7903     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
7904 
7905     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
7906   }
7907 
7908   // All other QPX vectors are handled by generic code.
7909   if (Subtarget.hasQPX())
7910     return SDValue();
7911 
7912   // Check if this is a splat of a constant value.
7913   APInt APSplatBits, APSplatUndef;
7914   unsigned SplatBitSize;
7915   bool HasAnyUndefs;
7916   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
7917                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
7918       SplatBitSize > 32) {
7919     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
7920     // lowered to VSX instructions under certain conditions.
7921     // Without VSX, there is no pattern more efficient than expanding the node.
7922     if (Subtarget.hasVSX() &&
7923         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
7924                                         Subtarget.hasP8Vector()))
7925       return Op;
7926     return SDValue();
7927   }
7928 
7929   unsigned SplatBits = APSplatBits.getZExtValue();
7930   unsigned SplatUndef = APSplatUndef.getZExtValue();
7931   unsigned SplatSize = SplatBitSize / 8;
7932 
7933   // First, handle single instruction cases.
7934 
7935   // All zeros?
7936   if (SplatBits == 0) {
7937     // Canonicalize all zero vectors to be v4i32.
7938     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
7939       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
7940       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
7941     }
7942     return Op;
7943   }
7944 
7945   // We have XXSPLTIB for constant splats one byte wide
7946   if (Subtarget.hasP9Vector() && SplatSize == 1) {
7947     // This is a splat of 1-byte elements with some elements potentially undef.
7948     // Rather than trying to match undef in the SDAG patterns, ensure that all
7949     // elements are the same constant.
7950     if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) {
7951       SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits,
7952                                                        dl, MVT::i32));
7953       SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
7954       if (Op.getValueType() != MVT::v16i8)
7955         return DAG.getBitcast(Op.getValueType(), NewBV);
7956       return NewBV;
7957     }
7958 
7959     // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll
7960     // detect that constant splats like v8i16: 0xABAB are really just splats
7961     // of a 1-byte constant. In this case, we need to convert the node to a
7962     // splat of v16i8 and a bitcast.
7963     if (Op.getValueType() != MVT::v16i8)
7964       return DAG.getBitcast(Op.getValueType(),
7965                             DAG.getConstant(SplatBits, dl, MVT::v16i8));
7966 
7967     return Op;
7968   }
7969 
7970   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
7971   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
7972                     (32-SplatBitSize));
7973   if (SextVal >= -16 && SextVal <= 15)
7974     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
7975 
7976   // Two instruction sequences.
7977 
7978   // If this value is in the range [-32,30] and is even, use:
7979   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
7980   // If this value is in the range [17,31] and is odd, use:
7981   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
7982   // If this value is in the range [-31,-17] and is odd, use:
7983   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
7984   // Note the last two are three-instruction sequences.
7985   if (SextVal >= -32 && SextVal <= 31) {
7986     // To avoid having these optimizations undone by constant folding,
7987     // we convert to a pseudo that will be expanded later into one of
7988     // the above forms.
7989     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
7990     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
7991               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
7992     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
7993     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
7994     if (VT == Op.getValueType())
7995       return RetVal;
7996     else
7997       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
7998   }
7999 
8000   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
8001   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
8002   // for fneg/fabs.
8003   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8004     // Make -1 and vspltisw -1:
8005     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
8006 
8007     // Make the VSLW intrinsic, computing 0x8000_0000.
8008     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8009                                    OnesV, DAG, dl);
8010 
8011     // xor by OnesV to invert it.
8012     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8013     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8014   }
8015 
8016   // Check to see if this is a wide variety of vsplti*, binop self cases.
8017   static const signed char SplatCsts[] = {
8018     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8019     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8020   };
8021 
8022   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8023     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8024     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
8025     int i = SplatCsts[idx];
8026 
8027     // Figure out what shift amount will be used by altivec if shifted by i in
8028     // this splat size.
8029     unsigned TypeShiftAmt = i & (SplatBitSize-1);
8030 
8031     // vsplti + shl self.
8032     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8033       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8034       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8035         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8036         Intrinsic::ppc_altivec_vslw
8037       };
8038       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8039       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8040     }
8041 
8042     // vsplti + srl self.
8043     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8044       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8045       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8046         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8047         Intrinsic::ppc_altivec_vsrw
8048       };
8049       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8050       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8051     }
8052 
8053     // vsplti + sra self.
8054     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8055       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8056       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8057         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
8058         Intrinsic::ppc_altivec_vsraw
8059       };
8060       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8061       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8062     }
8063 
8064     // vsplti + rol self.
8065     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8066                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8067       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8068       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8069         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8070         Intrinsic::ppc_altivec_vrlw
8071       };
8072       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8073       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8074     }
8075 
8076     // t = vsplti c, result = vsldoi t, t, 1
8077     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8078       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8079       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8080       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8081     }
8082     // t = vsplti c, result = vsldoi t, t, 2
8083     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8084       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8085       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8086       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8087     }
8088     // t = vsplti c, result = vsldoi t, t, 3
8089     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8090       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8091       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8092       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8093     }
8094   }
8095 
8096   return SDValue();
8097 }
8098 
8099 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8100 /// the specified operations to build the shuffle.
8101 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8102                                       SDValue RHS, SelectionDAG &DAG,
8103                                       const SDLoc &dl) {
8104   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8105   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8106   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8107 
8108   enum {
8109     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8110     OP_VMRGHW,
8111     OP_VMRGLW,
8112     OP_VSPLTISW0,
8113     OP_VSPLTISW1,
8114     OP_VSPLTISW2,
8115     OP_VSPLTISW3,
8116     OP_VSLDOI4,
8117     OP_VSLDOI8,
8118     OP_VSLDOI12
8119   };
8120 
8121   if (OpNum == OP_COPY) {
8122     if (LHSID == (1*9+2)*9+3) return LHS;
8123     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8124     return RHS;
8125   }
8126 
8127   SDValue OpLHS, OpRHS;
8128   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8129   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8130 
8131   int ShufIdxs[16];
8132   switch (OpNum) {
8133   default: llvm_unreachable("Unknown i32 permute!");
8134   case OP_VMRGHW:
8135     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
8136     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8137     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
8138     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8139     break;
8140   case OP_VMRGLW:
8141     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8142     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8143     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8144     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8145     break;
8146   case OP_VSPLTISW0:
8147     for (unsigned i = 0; i != 16; ++i)
8148       ShufIdxs[i] = (i&3)+0;
8149     break;
8150   case OP_VSPLTISW1:
8151     for (unsigned i = 0; i != 16; ++i)
8152       ShufIdxs[i] = (i&3)+4;
8153     break;
8154   case OP_VSPLTISW2:
8155     for (unsigned i = 0; i != 16; ++i)
8156       ShufIdxs[i] = (i&3)+8;
8157     break;
8158   case OP_VSPLTISW3:
8159     for (unsigned i = 0; i != 16; ++i)
8160       ShufIdxs[i] = (i&3)+12;
8161     break;
8162   case OP_VSLDOI4:
8163     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8164   case OP_VSLDOI8:
8165     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8166   case OP_VSLDOI12:
8167     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8168   }
8169   EVT VT = OpLHS.getValueType();
8170   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8171   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8172   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8173   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8174 }
8175 
8176 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8177 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8178 /// SDValue.
8179 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8180                                            SelectionDAG &DAG) const {
8181   const unsigned BytesInVector = 16;
8182   bool IsLE = Subtarget.isLittleEndian();
8183   SDLoc dl(N);
8184   SDValue V1 = N->getOperand(0);
8185   SDValue V2 = N->getOperand(1);
8186   unsigned ShiftElts = 0, InsertAtByte = 0;
8187   bool Swap = false;
8188 
8189   // Shifts required to get the byte we want at element 7.
8190   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
8191                                    0, 15, 14, 13, 12, 11, 10, 9};
8192   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8193                                 1, 2,  3,  4,  5,  6,  7,  8};
8194 
8195   ArrayRef<int> Mask = N->getMask();
8196   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8197 
8198   // For each mask element, find out if we're just inserting something
8199   // from V2 into V1 or vice versa.
8200   // Possible permutations inserting an element from V2 into V1:
8201   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8202   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8203   //   ...
8204   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
8205   // Inserting from V1 into V2 will be similar, except mask range will be
8206   // [16,31].
8207 
8208   bool FoundCandidate = false;
8209   // If both vector operands for the shuffle are the same vector, the mask
8210   // will contain only elements from the first one and the second one will be
8211   // undef.
8212   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8213   // Go through the mask of half-words to find an element that's being moved
8214   // from one vector to the other.
8215   for (unsigned i = 0; i < BytesInVector; ++i) {
8216     unsigned CurrentElement = Mask[i];
8217     // If 2nd operand is undefined, we should only look for element 7 in the
8218     // Mask.
8219     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
8220       continue;
8221 
8222     bool OtherElementsInOrder = true;
8223     // Examine the other elements in the Mask to see if they're in original
8224     // order.
8225     for (unsigned j = 0; j < BytesInVector; ++j) {
8226       if (j == i)
8227         continue;
8228       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8229       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8230       // in which we always assume we're always picking from the 1st operand.
8231       int MaskOffset =
8232           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8233       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8234         OtherElementsInOrder = false;
8235         break;
8236       }
8237     }
8238     // If other elements are in original order, we record the number of shifts
8239     // we need to get the element we want into element 7. Also record which byte
8240     // in the vector we should insert into.
8241     if (OtherElementsInOrder) {
8242       // If 2nd operand is undefined, we assume no shifts and no swapping.
8243       if (V2.isUndef()) {
8244         ShiftElts = 0;
8245         Swap = false;
8246       } else {
8247         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8248         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8249                          : BigEndianShifts[CurrentElement & 0xF];
8250         Swap = CurrentElement < BytesInVector;
8251       }
8252       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8253       FoundCandidate = true;
8254       break;
8255     }
8256   }
8257 
8258   if (!FoundCandidate)
8259     return SDValue();
8260 
8261   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8262   // optionally with VECSHL if shift is required.
8263   if (Swap)
8264     std::swap(V1, V2);
8265   if (V2.isUndef())
8266     V2 = V1;
8267   if (ShiftElts) {
8268     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8269                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8270     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8271                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8272   }
8273   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8274                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8275 }
8276 
8277 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8278 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
8279 /// SDValue.
8280 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
8281                                            SelectionDAG &DAG) const {
8282   const unsigned NumHalfWords = 8;
8283   const unsigned BytesInVector = NumHalfWords * 2;
8284   // Check that the shuffle is on half-words.
8285   if (!isNByteElemShuffleMask(N, 2, 1))
8286     return SDValue();
8287 
8288   bool IsLE = Subtarget.isLittleEndian();
8289   SDLoc dl(N);
8290   SDValue V1 = N->getOperand(0);
8291   SDValue V2 = N->getOperand(1);
8292   unsigned ShiftElts = 0, InsertAtByte = 0;
8293   bool Swap = false;
8294 
8295   // Shifts required to get the half-word we want at element 3.
8296   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
8297   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
8298 
8299   uint32_t Mask = 0;
8300   uint32_t OriginalOrderLow = 0x1234567;
8301   uint32_t OriginalOrderHigh = 0x89ABCDEF;
8302   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
8303   // 32-bit space, only need 4-bit nibbles per element.
8304   for (unsigned i = 0; i < NumHalfWords; ++i) {
8305     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8306     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
8307   }
8308 
8309   // For each mask element, find out if we're just inserting something
8310   // from V2 into V1 or vice versa.  Possible permutations inserting an element
8311   // from V2 into V1:
8312   //   X, 1, 2, 3, 4, 5, 6, 7
8313   //   0, X, 2, 3, 4, 5, 6, 7
8314   //   0, 1, X, 3, 4, 5, 6, 7
8315   //   0, 1, 2, X, 4, 5, 6, 7
8316   //   0, 1, 2, 3, X, 5, 6, 7
8317   //   0, 1, 2, 3, 4, X, 6, 7
8318   //   0, 1, 2, 3, 4, 5, X, 7
8319   //   0, 1, 2, 3, 4, 5, 6, X
8320   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
8321 
8322   bool FoundCandidate = false;
8323   // Go through the mask of half-words to find an element that's being moved
8324   // from one vector to the other.
8325   for (unsigned i = 0; i < NumHalfWords; ++i) {
8326     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8327     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
8328     uint32_t MaskOtherElts = ~(0xF << MaskShift);
8329     uint32_t TargetOrder = 0x0;
8330 
8331     // If both vector operands for the shuffle are the same vector, the mask
8332     // will contain only elements from the first one and the second one will be
8333     // undef.
8334     if (V2.isUndef()) {
8335       ShiftElts = 0;
8336       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
8337       TargetOrder = OriginalOrderLow;
8338       Swap = false;
8339       // Skip if not the correct element or mask of other elements don't equal
8340       // to our expected order.
8341       if (MaskOneElt == VINSERTHSrcElem &&
8342           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8343         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8344         FoundCandidate = true;
8345         break;
8346       }
8347     } else { // If both operands are defined.
8348       // Target order is [8,15] if the current mask is between [0,7].
8349       TargetOrder =
8350           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
8351       // Skip if mask of other elements don't equal our expected order.
8352       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8353         // We only need the last 3 bits for the number of shifts.
8354         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
8355                          : BigEndianShifts[MaskOneElt & 0x7];
8356         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8357         Swap = MaskOneElt < NumHalfWords;
8358         FoundCandidate = true;
8359         break;
8360       }
8361     }
8362   }
8363 
8364   if (!FoundCandidate)
8365     return SDValue();
8366 
8367   // Candidate found, construct the proper SDAG sequence with VINSERTH,
8368   // optionally with VECSHL if shift is required.
8369   if (Swap)
8370     std::swap(V1, V2);
8371   if (V2.isUndef())
8372     V2 = V1;
8373   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8374   if (ShiftElts) {
8375     // Double ShiftElts because we're left shifting on v16i8 type.
8376     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8377                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
8378     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
8379     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8380                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8381     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8382   }
8383   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
8384   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8385                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
8386   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8387 }
8388 
8389 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
8390 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
8391 /// return the code it can be lowered into.  Worst case, it can always be
8392 /// lowered into a vperm.
8393 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
8394                                                SelectionDAG &DAG) const {
8395   SDLoc dl(Op);
8396   SDValue V1 = Op.getOperand(0);
8397   SDValue V2 = Op.getOperand(1);
8398   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8399   EVT VT = Op.getValueType();
8400   bool isLittleEndian = Subtarget.isLittleEndian();
8401 
8402   unsigned ShiftElts, InsertAtByte;
8403   bool Swap = false;
8404   if (Subtarget.hasP9Vector() &&
8405       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
8406                            isLittleEndian)) {
8407     if (Swap)
8408       std::swap(V1, V2);
8409     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8410     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
8411     if (ShiftElts) {
8412       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
8413                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
8414       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
8415                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
8416       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8417     }
8418     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
8419                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8420     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8421   }
8422 
8423   if (Subtarget.hasP9Altivec()) {
8424     SDValue NewISDNode;
8425     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
8426       return NewISDNode;
8427 
8428     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
8429       return NewISDNode;
8430   }
8431 
8432   if (Subtarget.hasVSX() &&
8433       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8434     if (Swap)
8435       std::swap(V1, V2);
8436     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8437     SDValue Conv2 =
8438         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
8439 
8440     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
8441                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8442     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
8443   }
8444 
8445   if (Subtarget.hasVSX() &&
8446     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8447     if (Swap)
8448       std::swap(V1, V2);
8449     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8450     SDValue Conv2 =
8451         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
8452 
8453     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
8454                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8455     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
8456   }
8457 
8458   if (Subtarget.hasP9Vector()) {
8459      if (PPC::isXXBRHShuffleMask(SVOp)) {
8460       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8461       SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv);
8462       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
8463     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
8464       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8465       SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv);
8466       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
8467     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
8468       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8469       SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv);
8470       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
8471     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
8472       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
8473       SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv);
8474       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
8475     }
8476   }
8477 
8478   if (Subtarget.hasVSX()) {
8479     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
8480       int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
8481 
8482       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8483       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
8484                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
8485       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
8486     }
8487 
8488     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
8489     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
8490       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
8491       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
8492       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
8493     }
8494   }
8495 
8496   if (Subtarget.hasQPX()) {
8497     if (VT.getVectorNumElements() != 4)
8498       return SDValue();
8499 
8500     if (V2.isUndef()) V2 = V1;
8501 
8502     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
8503     if (AlignIdx != -1) {
8504       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
8505                          DAG.getConstant(AlignIdx, dl, MVT::i32));
8506     } else if (SVOp->isSplat()) {
8507       int SplatIdx = SVOp->getSplatIndex();
8508       if (SplatIdx >= 4) {
8509         std::swap(V1, V2);
8510         SplatIdx -= 4;
8511       }
8512 
8513       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
8514                          DAG.getConstant(SplatIdx, dl, MVT::i32));
8515     }
8516 
8517     // Lower this into a qvgpci/qvfperm pair.
8518 
8519     // Compute the qvgpci literal
8520     unsigned idx = 0;
8521     for (unsigned i = 0; i < 4; ++i) {
8522       int m = SVOp->getMaskElt(i);
8523       unsigned mm = m >= 0 ? (unsigned) m : i;
8524       idx |= mm << (3-i)*3;
8525     }
8526 
8527     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
8528                              DAG.getConstant(idx, dl, MVT::i32));
8529     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
8530   }
8531 
8532   // Cases that are handled by instructions that take permute immediates
8533   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
8534   // selected by the instruction selector.
8535   if (V2.isUndef()) {
8536     if (PPC::isSplatShuffleMask(SVOp, 1) ||
8537         PPC::isSplatShuffleMask(SVOp, 2) ||
8538         PPC::isSplatShuffleMask(SVOp, 4) ||
8539         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
8540         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
8541         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
8542         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
8543         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
8544         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
8545         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
8546         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
8547         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
8548         (Subtarget.hasP8Altivec() && (
8549          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
8550          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
8551          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
8552       return Op;
8553     }
8554   }
8555 
8556   // Altivec has a variety of "shuffle immediates" that take two vector inputs
8557   // and produce a fixed permutation.  If any of these match, do not lower to
8558   // VPERM.
8559   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
8560   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8561       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8562       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
8563       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8564       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8565       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8566       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8567       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8568       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8569       (Subtarget.hasP8Altivec() && (
8570        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8571        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
8572        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
8573     return Op;
8574 
8575   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
8576   // perfect shuffle table to emit an optimal matching sequence.
8577   ArrayRef<int> PermMask = SVOp->getMask();
8578 
8579   unsigned PFIndexes[4];
8580   bool isFourElementShuffle = true;
8581   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
8582     unsigned EltNo = 8;   // Start out undef.
8583     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
8584       if (PermMask[i*4+j] < 0)
8585         continue;   // Undef, ignore it.
8586 
8587       unsigned ByteSource = PermMask[i*4+j];
8588       if ((ByteSource & 3) != j) {
8589         isFourElementShuffle = false;
8590         break;
8591       }
8592 
8593       if (EltNo == 8) {
8594         EltNo = ByteSource/4;
8595       } else if (EltNo != ByteSource/4) {
8596         isFourElementShuffle = false;
8597         break;
8598       }
8599     }
8600     PFIndexes[i] = EltNo;
8601   }
8602 
8603   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
8604   // perfect shuffle vector to determine if it is cost effective to do this as
8605   // discrete instructions, or whether we should use a vperm.
8606   // For now, we skip this for little endian until such time as we have a
8607   // little-endian perfect shuffle table.
8608   if (isFourElementShuffle && !isLittleEndian) {
8609     // Compute the index in the perfect shuffle table.
8610     unsigned PFTableIndex =
8611       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8612 
8613     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8614     unsigned Cost  = (PFEntry >> 30);
8615 
8616     // Determining when to avoid vperm is tricky.  Many things affect the cost
8617     // of vperm, particularly how many times the perm mask needs to be computed.
8618     // For example, if the perm mask can be hoisted out of a loop or is already
8619     // used (perhaps because there are multiple permutes with the same shuffle
8620     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
8621     // the loop requires an extra register.
8622     //
8623     // As a compromise, we only emit discrete instructions if the shuffle can be
8624     // generated in 3 or fewer operations.  When we have loop information
8625     // available, if this block is within a loop, we should avoid using vperm
8626     // for 3-operation perms and use a constant pool load instead.
8627     if (Cost < 3)
8628       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8629   }
8630 
8631   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
8632   // vector that will get spilled to the constant pool.
8633   if (V2.isUndef()) V2 = V1;
8634 
8635   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
8636   // that it is in input element units, not in bytes.  Convert now.
8637 
8638   // For little endian, the order of the input vectors is reversed, and
8639   // the permutation mask is complemented with respect to 31.  This is
8640   // necessary to produce proper semantics with the big-endian-biased vperm
8641   // instruction.
8642   EVT EltVT = V1.getValueType().getVectorElementType();
8643   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
8644 
8645   SmallVector<SDValue, 16> ResultMask;
8646   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8647     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
8648 
8649     for (unsigned j = 0; j != BytesPerElement; ++j)
8650       if (isLittleEndian)
8651         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
8652                                              dl, MVT::i32));
8653       else
8654         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
8655                                              MVT::i32));
8656   }
8657 
8658   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
8659   if (isLittleEndian)
8660     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8661                        V2, V1, VPermMask);
8662   else
8663     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8664                        V1, V2, VPermMask);
8665 }
8666 
8667 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
8668 /// vector comparison.  If it is, return true and fill in Opc/isDot with
8669 /// information about the intrinsic.
8670 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
8671                                  bool &isDot, const PPCSubtarget &Subtarget) {
8672   unsigned IntrinsicID =
8673       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
8674   CompareOpc = -1;
8675   isDot = false;
8676   switch (IntrinsicID) {
8677   default:
8678     return false;
8679   // Comparison predicates.
8680   case Intrinsic::ppc_altivec_vcmpbfp_p:
8681     CompareOpc = 966;
8682     isDot = true;
8683     break;
8684   case Intrinsic::ppc_altivec_vcmpeqfp_p:
8685     CompareOpc = 198;
8686     isDot = true;
8687     break;
8688   case Intrinsic::ppc_altivec_vcmpequb_p:
8689     CompareOpc = 6;
8690     isDot = true;
8691     break;
8692   case Intrinsic::ppc_altivec_vcmpequh_p:
8693     CompareOpc = 70;
8694     isDot = true;
8695     break;
8696   case Intrinsic::ppc_altivec_vcmpequw_p:
8697     CompareOpc = 134;
8698     isDot = true;
8699     break;
8700   case Intrinsic::ppc_altivec_vcmpequd_p:
8701     if (Subtarget.hasP8Altivec()) {
8702       CompareOpc = 199;
8703       isDot = true;
8704     } else
8705       return false;
8706     break;
8707   case Intrinsic::ppc_altivec_vcmpneb_p:
8708   case Intrinsic::ppc_altivec_vcmpneh_p:
8709   case Intrinsic::ppc_altivec_vcmpnew_p:
8710   case Intrinsic::ppc_altivec_vcmpnezb_p:
8711   case Intrinsic::ppc_altivec_vcmpnezh_p:
8712   case Intrinsic::ppc_altivec_vcmpnezw_p:
8713     if (Subtarget.hasP9Altivec()) {
8714       switch (IntrinsicID) {
8715       default:
8716         llvm_unreachable("Unknown comparison intrinsic.");
8717       case Intrinsic::ppc_altivec_vcmpneb_p:
8718         CompareOpc = 7;
8719         break;
8720       case Intrinsic::ppc_altivec_vcmpneh_p:
8721         CompareOpc = 71;
8722         break;
8723       case Intrinsic::ppc_altivec_vcmpnew_p:
8724         CompareOpc = 135;
8725         break;
8726       case Intrinsic::ppc_altivec_vcmpnezb_p:
8727         CompareOpc = 263;
8728         break;
8729       case Intrinsic::ppc_altivec_vcmpnezh_p:
8730         CompareOpc = 327;
8731         break;
8732       case Intrinsic::ppc_altivec_vcmpnezw_p:
8733         CompareOpc = 391;
8734         break;
8735       }
8736       isDot = true;
8737     } else
8738       return false;
8739     break;
8740   case Intrinsic::ppc_altivec_vcmpgefp_p:
8741     CompareOpc = 454;
8742     isDot = true;
8743     break;
8744   case Intrinsic::ppc_altivec_vcmpgtfp_p:
8745     CompareOpc = 710;
8746     isDot = true;
8747     break;
8748   case Intrinsic::ppc_altivec_vcmpgtsb_p:
8749     CompareOpc = 774;
8750     isDot = true;
8751     break;
8752   case Intrinsic::ppc_altivec_vcmpgtsh_p:
8753     CompareOpc = 838;
8754     isDot = true;
8755     break;
8756   case Intrinsic::ppc_altivec_vcmpgtsw_p:
8757     CompareOpc = 902;
8758     isDot = true;
8759     break;
8760   case Intrinsic::ppc_altivec_vcmpgtsd_p:
8761     if (Subtarget.hasP8Altivec()) {
8762       CompareOpc = 967;
8763       isDot = true;
8764     } else
8765       return false;
8766     break;
8767   case Intrinsic::ppc_altivec_vcmpgtub_p:
8768     CompareOpc = 518;
8769     isDot = true;
8770     break;
8771   case Intrinsic::ppc_altivec_vcmpgtuh_p:
8772     CompareOpc = 582;
8773     isDot = true;
8774     break;
8775   case Intrinsic::ppc_altivec_vcmpgtuw_p:
8776     CompareOpc = 646;
8777     isDot = true;
8778     break;
8779   case Intrinsic::ppc_altivec_vcmpgtud_p:
8780     if (Subtarget.hasP8Altivec()) {
8781       CompareOpc = 711;
8782       isDot = true;
8783     } else
8784       return false;
8785     break;
8786 
8787   // VSX predicate comparisons use the same infrastructure
8788   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8789   case Intrinsic::ppc_vsx_xvcmpgedp_p:
8790   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8791   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8792   case Intrinsic::ppc_vsx_xvcmpgesp_p:
8793   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8794     if (Subtarget.hasVSX()) {
8795       switch (IntrinsicID) {
8796       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8797         CompareOpc = 99;
8798         break;
8799       case Intrinsic::ppc_vsx_xvcmpgedp_p:
8800         CompareOpc = 115;
8801         break;
8802       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8803         CompareOpc = 107;
8804         break;
8805       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8806         CompareOpc = 67;
8807         break;
8808       case Intrinsic::ppc_vsx_xvcmpgesp_p:
8809         CompareOpc = 83;
8810         break;
8811       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8812         CompareOpc = 75;
8813         break;
8814       }
8815       isDot = true;
8816     } else
8817       return false;
8818     break;
8819 
8820   // Normal Comparisons.
8821   case Intrinsic::ppc_altivec_vcmpbfp:
8822     CompareOpc = 966;
8823     break;
8824   case Intrinsic::ppc_altivec_vcmpeqfp:
8825     CompareOpc = 198;
8826     break;
8827   case Intrinsic::ppc_altivec_vcmpequb:
8828     CompareOpc = 6;
8829     break;
8830   case Intrinsic::ppc_altivec_vcmpequh:
8831     CompareOpc = 70;
8832     break;
8833   case Intrinsic::ppc_altivec_vcmpequw:
8834     CompareOpc = 134;
8835     break;
8836   case Intrinsic::ppc_altivec_vcmpequd:
8837     if (Subtarget.hasP8Altivec())
8838       CompareOpc = 199;
8839     else
8840       return false;
8841     break;
8842   case Intrinsic::ppc_altivec_vcmpneb:
8843   case Intrinsic::ppc_altivec_vcmpneh:
8844   case Intrinsic::ppc_altivec_vcmpnew:
8845   case Intrinsic::ppc_altivec_vcmpnezb:
8846   case Intrinsic::ppc_altivec_vcmpnezh:
8847   case Intrinsic::ppc_altivec_vcmpnezw:
8848     if (Subtarget.hasP9Altivec())
8849       switch (IntrinsicID) {
8850       default:
8851         llvm_unreachable("Unknown comparison intrinsic.");
8852       case Intrinsic::ppc_altivec_vcmpneb:
8853         CompareOpc = 7;
8854         break;
8855       case Intrinsic::ppc_altivec_vcmpneh:
8856         CompareOpc = 71;
8857         break;
8858       case Intrinsic::ppc_altivec_vcmpnew:
8859         CompareOpc = 135;
8860         break;
8861       case Intrinsic::ppc_altivec_vcmpnezb:
8862         CompareOpc = 263;
8863         break;
8864       case Intrinsic::ppc_altivec_vcmpnezh:
8865         CompareOpc = 327;
8866         break;
8867       case Intrinsic::ppc_altivec_vcmpnezw:
8868         CompareOpc = 391;
8869         break;
8870       }
8871     else
8872       return false;
8873     break;
8874   case Intrinsic::ppc_altivec_vcmpgefp:
8875     CompareOpc = 454;
8876     break;
8877   case Intrinsic::ppc_altivec_vcmpgtfp:
8878     CompareOpc = 710;
8879     break;
8880   case Intrinsic::ppc_altivec_vcmpgtsb:
8881     CompareOpc = 774;
8882     break;
8883   case Intrinsic::ppc_altivec_vcmpgtsh:
8884     CompareOpc = 838;
8885     break;
8886   case Intrinsic::ppc_altivec_vcmpgtsw:
8887     CompareOpc = 902;
8888     break;
8889   case Intrinsic::ppc_altivec_vcmpgtsd:
8890     if (Subtarget.hasP8Altivec())
8891       CompareOpc = 967;
8892     else
8893       return false;
8894     break;
8895   case Intrinsic::ppc_altivec_vcmpgtub:
8896     CompareOpc = 518;
8897     break;
8898   case Intrinsic::ppc_altivec_vcmpgtuh:
8899     CompareOpc = 582;
8900     break;
8901   case Intrinsic::ppc_altivec_vcmpgtuw:
8902     CompareOpc = 646;
8903     break;
8904   case Intrinsic::ppc_altivec_vcmpgtud:
8905     if (Subtarget.hasP8Altivec())
8906       CompareOpc = 711;
8907     else
8908       return false;
8909     break;
8910   }
8911   return true;
8912 }
8913 
8914 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
8915 /// lower, do it, otherwise return null.
8916 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
8917                                                    SelectionDAG &DAG) const {
8918   unsigned IntrinsicID =
8919     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8920 
8921   SDLoc dl(Op);
8922 
8923   if (IntrinsicID == Intrinsic::thread_pointer) {
8924     // Reads the thread pointer register, used for __builtin_thread_pointer.
8925     if (Subtarget.isPPC64())
8926       return DAG.getRegister(PPC::X13, MVT::i64);
8927     return DAG.getRegister(PPC::R2, MVT::i32);
8928   }
8929 
8930   // We are looking for absolute values here.
8931   // The idea is to try to fit one of two patterns:
8932   //  max (a, (0-a))  OR  max ((0-a), a)
8933   if (Subtarget.hasP9Vector() &&
8934       (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw ||
8935        IntrinsicID == Intrinsic::ppc_altivec_vmaxsh ||
8936        IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) {
8937     SDValue V1 = Op.getOperand(1);
8938     SDValue V2 = Op.getOperand(2);
8939     if (V1.getSimpleValueType() == V2.getSimpleValueType() &&
8940         (V1.getSimpleValueType() == MVT::v4i32 ||
8941          V1.getSimpleValueType() == MVT::v8i16 ||
8942          V1.getSimpleValueType() == MVT::v16i8)) {
8943       if ( V1.getOpcode() == ISD::SUB &&
8944            ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
8945            V1.getOperand(1) == V2 ) {
8946         // Generate the abs instruction with the operands
8947         return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2);
8948       }
8949 
8950       if ( V2.getOpcode() == ISD::SUB &&
8951            ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
8952            V2.getOperand(1) == V1 ) {
8953         // Generate the abs instruction with the operands
8954         return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1);
8955       }
8956     }
8957   }
8958 
8959   // If this is a lowered altivec predicate compare, CompareOpc is set to the
8960   // opcode number of the comparison.
8961   int CompareOpc;
8962   bool isDot;
8963   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
8964     return SDValue();    // Don't custom lower most intrinsics.
8965 
8966   // If this is a non-dot comparison, make the VCMP node and we are done.
8967   if (!isDot) {
8968     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
8969                               Op.getOperand(1), Op.getOperand(2),
8970                               DAG.getConstant(CompareOpc, dl, MVT::i32));
8971     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
8972   }
8973 
8974   // Create the PPCISD altivec 'dot' comparison node.
8975   SDValue Ops[] = {
8976     Op.getOperand(2),  // LHS
8977     Op.getOperand(3),  // RHS
8978     DAG.getConstant(CompareOpc, dl, MVT::i32)
8979   };
8980   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
8981   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
8982 
8983   // Now that we have the comparison, emit a copy from the CR to a GPR.
8984   // This is flagged to the above dot comparison.
8985   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
8986                                 DAG.getRegister(PPC::CR6, MVT::i32),
8987                                 CompNode.getValue(1));
8988 
8989   // Unpack the result based on how the target uses it.
8990   unsigned BitNo;   // Bit # of CR6.
8991   bool InvertBit;   // Invert result?
8992   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
8993   default:  // Can't happen, don't crash on invalid number though.
8994   case 0:   // Return the value of the EQ bit of CR6.
8995     BitNo = 0; InvertBit = false;
8996     break;
8997   case 1:   // Return the inverted value of the EQ bit of CR6.
8998     BitNo = 0; InvertBit = true;
8999     break;
9000   case 2:   // Return the value of the LT bit of CR6.
9001     BitNo = 2; InvertBit = false;
9002     break;
9003   case 3:   // Return the inverted value of the LT bit of CR6.
9004     BitNo = 2; InvertBit = true;
9005     break;
9006   }
9007 
9008   // Shift the bit into the low position.
9009   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9010                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9011   // Isolate the bit.
9012   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9013                       DAG.getConstant(1, dl, MVT::i32));
9014 
9015   // If we are supposed to, toggle the bit.
9016   if (InvertBit)
9017     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9018                         DAG.getConstant(1, dl, MVT::i32));
9019   return Flags;
9020 }
9021 
9022 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9023                                                SelectionDAG &DAG) const {
9024   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9025   // the beginning of the argument list.
9026   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9027   SDLoc DL(Op);
9028   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9029   case Intrinsic::ppc_cfence: {
9030     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9031     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9032     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9033                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9034                                                   Op.getOperand(ArgStart + 1)),
9035                                       Op.getOperand(0)),
9036                    0);
9037   }
9038   default:
9039     break;
9040   }
9041   return SDValue();
9042 }
9043 
9044 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
9045   // Check for a DIV with the same operands as this REM.
9046   for (auto UI : Op.getOperand(1)->uses()) {
9047     if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
9048         (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
9049       if (UI->getOperand(0) == Op.getOperand(0) &&
9050           UI->getOperand(1) == Op.getOperand(1))
9051         return SDValue();
9052   }
9053   return Op;
9054 }
9055 
9056 // Lower scalar BSWAP64 to xxbrd.
9057 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9058   SDLoc dl(Op);
9059   // MTVSRDD
9060   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9061                    Op.getOperand(0));
9062   // XXBRD
9063   Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op);
9064   // MFVSRD
9065   int VectorIndex = 0;
9066   if (Subtarget.isLittleEndian())
9067     VectorIndex = 1;
9068   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9069                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9070   return Op;
9071 }
9072 
9073 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9074 // compared to a value that is atomically loaded (atomic loads zero-extend).
9075 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9076                                                 SelectionDAG &DAG) const {
9077   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9078          "Expecting an atomic compare-and-swap here.");
9079   SDLoc dl(Op);
9080   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9081   EVT MemVT = AtomicNode->getMemoryVT();
9082   if (MemVT.getSizeInBits() >= 32)
9083     return Op;
9084 
9085   SDValue CmpOp = Op.getOperand(2);
9086   // If this is already correctly zero-extended, leave it alone.
9087   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9088   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9089     return Op;
9090 
9091   // Clear the high bits of the compare operand.
9092   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9093   SDValue NewCmpOp =
9094     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9095                 DAG.getConstant(MaskVal, dl, MVT::i32));
9096 
9097   // Replace the existing compare operand with the properly zero-extended one.
9098   SmallVector<SDValue, 4> Ops;
9099   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9100     Ops.push_back(AtomicNode->getOperand(i));
9101   Ops[2] = NewCmpOp;
9102   MachineMemOperand *MMO = AtomicNode->getMemOperand();
9103   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9104   auto NodeTy =
9105     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9106   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9107 }
9108 
9109 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
9110                                                   SelectionDAG &DAG) const {
9111   SDLoc dl(Op);
9112   // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
9113   // instructions), but for smaller types, we need to first extend up to v2i32
9114   // before doing going farther.
9115   if (Op.getValueType() == MVT::v2i64) {
9116     EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
9117     if (ExtVT != MVT::v2i32) {
9118       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
9119       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
9120                        DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
9121                                         ExtVT.getVectorElementType(), 4)));
9122       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
9123       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
9124                        DAG.getValueType(MVT::v2i32));
9125     }
9126 
9127     return Op;
9128   }
9129 
9130   return SDValue();
9131 }
9132 
9133 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9134                                                  SelectionDAG &DAG) const {
9135   SDLoc dl(Op);
9136   // Create a stack slot that is 16-byte aligned.
9137   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9138   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9139   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9140   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9141 
9142   // Store the input value into Value#0 of the stack slot.
9143   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
9144                                MachinePointerInfo());
9145   // Load it out.
9146   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
9147 }
9148 
9149 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
9150                                                   SelectionDAG &DAG) const {
9151   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
9152          "Should only be called for ISD::INSERT_VECTOR_ELT");
9153 
9154   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
9155   // We have legal lowering for constant indices but not for variable ones.
9156   if (!C)
9157     return SDValue();
9158 
9159   EVT VT = Op.getValueType();
9160   SDLoc dl(Op);
9161   SDValue V1 = Op.getOperand(0);
9162   SDValue V2 = Op.getOperand(1);
9163   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
9164   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
9165     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
9166     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
9167     unsigned InsertAtElement = C->getZExtValue();
9168     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9169     if (Subtarget.isLittleEndian()) {
9170       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9171     }
9172     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
9173                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9174   }
9175   return Op;
9176 }
9177 
9178 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
9179                                                    SelectionDAG &DAG) const {
9180   SDLoc dl(Op);
9181   SDNode *N = Op.getNode();
9182 
9183   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
9184          "Unknown extract_vector_elt type");
9185 
9186   SDValue Value = N->getOperand(0);
9187 
9188   // The first part of this is like the store lowering except that we don't
9189   // need to track the chain.
9190 
9191   // The values are now known to be -1 (false) or 1 (true). To convert this
9192   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9193   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9194   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9195 
9196   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9197   // understand how to form the extending load.
9198   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9199 
9200   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9201 
9202   // Now convert to an integer and store.
9203   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9204     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9205     Value);
9206 
9207   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9208   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9209   MachinePointerInfo PtrInfo =
9210       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9211   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9212   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9213 
9214   SDValue StoreChain = DAG.getEntryNode();
9215   SDValue Ops[] = {StoreChain,
9216                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9217                    Value, FIdx};
9218   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9219 
9220   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9221     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9222 
9223   // Extract the value requested.
9224   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
9225   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9226   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9227 
9228   SDValue IntVal =
9229       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
9230 
9231   if (!Subtarget.useCRBits())
9232     return IntVal;
9233 
9234   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
9235 }
9236 
9237 /// Lowering for QPX v4i1 loads
9238 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
9239                                            SelectionDAG &DAG) const {
9240   SDLoc dl(Op);
9241   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
9242   SDValue LoadChain = LN->getChain();
9243   SDValue BasePtr = LN->getBasePtr();
9244 
9245   if (Op.getValueType() == MVT::v4f64 ||
9246       Op.getValueType() == MVT::v4f32) {
9247     EVT MemVT = LN->getMemoryVT();
9248     unsigned Alignment = LN->getAlignment();
9249 
9250     // If this load is properly aligned, then it is legal.
9251     if (Alignment >= MemVT.getStoreSize())
9252       return Op;
9253 
9254     EVT ScalarVT = Op.getValueType().getScalarType(),
9255         ScalarMemVT = MemVT.getScalarType();
9256     unsigned Stride = ScalarMemVT.getStoreSize();
9257 
9258     SDValue Vals[4], LoadChains[4];
9259     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9260       SDValue Load;
9261       if (ScalarVT != ScalarMemVT)
9262         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
9263                               BasePtr,
9264                               LN->getPointerInfo().getWithOffset(Idx * Stride),
9265                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9266                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
9267       else
9268         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
9269                            LN->getPointerInfo().getWithOffset(Idx * Stride),
9270                            MinAlign(Alignment, Idx * Stride),
9271                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
9272 
9273       if (Idx == 0 && LN->isIndexed()) {
9274         assert(LN->getAddressingMode() == ISD::PRE_INC &&
9275                "Unknown addressing mode on vector load");
9276         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
9277                                   LN->getAddressingMode());
9278       }
9279 
9280       Vals[Idx] = Load;
9281       LoadChains[Idx] = Load.getValue(1);
9282 
9283       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9284                             DAG.getConstant(Stride, dl,
9285                                             BasePtr.getValueType()));
9286     }
9287 
9288     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9289     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
9290 
9291     if (LN->isIndexed()) {
9292       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
9293       return DAG.getMergeValues(RetOps, dl);
9294     }
9295 
9296     SDValue RetOps[] = { Value, TF };
9297     return DAG.getMergeValues(RetOps, dl);
9298   }
9299 
9300   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
9301   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
9302 
9303   // To lower v4i1 from a byte array, we load the byte elements of the
9304   // vector and then reuse the BUILD_VECTOR logic.
9305 
9306   SDValue VectElmts[4], VectElmtChains[4];
9307   for (unsigned i = 0; i < 4; ++i) {
9308     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9309     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9310 
9311     VectElmts[i] = DAG.getExtLoad(
9312         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
9313         LN->getPointerInfo().getWithOffset(i), MVT::i8,
9314         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
9315     VectElmtChains[i] = VectElmts[i].getValue(1);
9316   }
9317 
9318   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
9319   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
9320 
9321   SDValue RVals[] = { Value, LoadChain };
9322   return DAG.getMergeValues(RVals, dl);
9323 }
9324 
9325 /// Lowering for QPX v4i1 stores
9326 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
9327                                             SelectionDAG &DAG) const {
9328   SDLoc dl(Op);
9329   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
9330   SDValue StoreChain = SN->getChain();
9331   SDValue BasePtr = SN->getBasePtr();
9332   SDValue Value = SN->getValue();
9333 
9334   if (Value.getValueType() == MVT::v4f64 ||
9335       Value.getValueType() == MVT::v4f32) {
9336     EVT MemVT = SN->getMemoryVT();
9337     unsigned Alignment = SN->getAlignment();
9338 
9339     // If this store is properly aligned, then it is legal.
9340     if (Alignment >= MemVT.getStoreSize())
9341       return Op;
9342 
9343     EVT ScalarVT = Value.getValueType().getScalarType(),
9344         ScalarMemVT = MemVT.getScalarType();
9345     unsigned Stride = ScalarMemVT.getStoreSize();
9346 
9347     SDValue Stores[4];
9348     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9349       SDValue Ex = DAG.getNode(
9350           ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
9351           DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
9352       SDValue Store;
9353       if (ScalarVT != ScalarMemVT)
9354         Store =
9355             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
9356                               SN->getPointerInfo().getWithOffset(Idx * Stride),
9357                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9358                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
9359       else
9360         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
9361                              SN->getPointerInfo().getWithOffset(Idx * Stride),
9362                              MinAlign(Alignment, Idx * Stride),
9363                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
9364 
9365       if (Idx == 0 && SN->isIndexed()) {
9366         assert(SN->getAddressingMode() == ISD::PRE_INC &&
9367                "Unknown addressing mode on vector store");
9368         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
9369                                     SN->getAddressingMode());
9370       }
9371 
9372       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9373                             DAG.getConstant(Stride, dl,
9374                                             BasePtr.getValueType()));
9375       Stores[Idx] = Store;
9376     }
9377 
9378     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9379 
9380     if (SN->isIndexed()) {
9381       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
9382       return DAG.getMergeValues(RetOps, dl);
9383     }
9384 
9385     return TF;
9386   }
9387 
9388   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
9389   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
9390 
9391   // The values are now known to be -1 (false) or 1 (true). To convert this
9392   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9393   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9394   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9395 
9396   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9397   // understand how to form the extending load.
9398   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9399 
9400   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9401 
9402   // Now convert to an integer and store.
9403   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9404     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9405     Value);
9406 
9407   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9408   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9409   MachinePointerInfo PtrInfo =
9410       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9411   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9412   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9413 
9414   SDValue Ops[] = {StoreChain,
9415                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9416                    Value, FIdx};
9417   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9418 
9419   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9420     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9421 
9422   // Move data into the byte array.
9423   SDValue Loads[4], LoadChains[4];
9424   for (unsigned i = 0; i < 4; ++i) {
9425     unsigned Offset = 4*i;
9426     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9427     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9428 
9429     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
9430                            PtrInfo.getWithOffset(Offset));
9431     LoadChains[i] = Loads[i].getValue(1);
9432   }
9433 
9434   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9435 
9436   SDValue Stores[4];
9437   for (unsigned i = 0; i < 4; ++i) {
9438     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9439     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9440 
9441     Stores[i] = DAG.getTruncStore(
9442         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
9443         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
9444         SN->getAAInfo());
9445   }
9446 
9447   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9448 
9449   return StoreChain;
9450 }
9451 
9452 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
9453   SDLoc dl(Op);
9454   if (Op.getValueType() == MVT::v4i32) {
9455     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9456 
9457     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
9458     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
9459 
9460     SDValue RHSSwap =   // = vrlw RHS, 16
9461       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
9462 
9463     // Shrinkify inputs to v8i16.
9464     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
9465     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
9466     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
9467 
9468     // Low parts multiplied together, generating 32-bit results (we ignore the
9469     // top parts).
9470     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
9471                                         LHS, RHS, DAG, dl, MVT::v4i32);
9472 
9473     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
9474                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
9475     // Shift the high parts up 16 bits.
9476     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
9477                               Neg16, DAG, dl);
9478     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
9479   } else if (Op.getValueType() == MVT::v8i16) {
9480     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9481 
9482     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
9483 
9484     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
9485                             LHS, RHS, Zero, DAG, dl);
9486   } else if (Op.getValueType() == MVT::v16i8) {
9487     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9488     bool isLittleEndian = Subtarget.isLittleEndian();
9489 
9490     // Multiply the even 8-bit parts, producing 16-bit sums.
9491     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
9492                                            LHS, RHS, DAG, dl, MVT::v8i16);
9493     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
9494 
9495     // Multiply the odd 8-bit parts, producing 16-bit sums.
9496     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
9497                                           LHS, RHS, DAG, dl, MVT::v8i16);
9498     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
9499 
9500     // Merge the results together.  Because vmuleub and vmuloub are
9501     // instructions with a big-endian bias, we must reverse the
9502     // element numbering and reverse the meaning of "odd" and "even"
9503     // when generating little endian code.
9504     int Ops[16];
9505     for (unsigned i = 0; i != 8; ++i) {
9506       if (isLittleEndian) {
9507         Ops[i*2  ] = 2*i;
9508         Ops[i*2+1] = 2*i+16;
9509       } else {
9510         Ops[i*2  ] = 2*i+1;
9511         Ops[i*2+1] = 2*i+1+16;
9512       }
9513     }
9514     if (isLittleEndian)
9515       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
9516     else
9517       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
9518   } else {
9519     llvm_unreachable("Unknown mul to lower!");
9520   }
9521 }
9522 
9523 /// LowerOperation - Provide custom lowering hooks for some operations.
9524 ///
9525 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9526   switch (Op.getOpcode()) {
9527   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
9528   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
9529   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
9530   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
9531   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
9532   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
9533   case ISD::SETCC:              return LowerSETCC(Op, DAG);
9534   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
9535   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
9536 
9537   // Variable argument lowering.
9538   case ISD::VASTART:            return LowerVASTART(Op, DAG);
9539   case ISD::VAARG:              return LowerVAARG(Op, DAG);
9540   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
9541 
9542   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
9543   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
9544   case ISD::GET_DYNAMIC_AREA_OFFSET:
9545     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
9546 
9547   // Exception handling lowering.
9548   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
9549   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
9550   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
9551 
9552   case ISD::LOAD:               return LowerLOAD(Op, DAG);
9553   case ISD::STORE:              return LowerSTORE(Op, DAG);
9554   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
9555   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
9556   case ISD::FP_TO_UINT:
9557   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
9558   case ISD::UINT_TO_FP:
9559   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
9560   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
9561 
9562   // Lower 64-bit shifts.
9563   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
9564   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
9565   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
9566 
9567   // Vector-related lowering.
9568   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
9569   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
9570   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
9571   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
9572   case ISD::SIGN_EXTEND_INREG:  return LowerSIGN_EXTEND_INREG(Op, DAG);
9573   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
9574   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
9575   case ISD::MUL:                return LowerMUL(Op, DAG);
9576 
9577   // For counter-based loop handling.
9578   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
9579 
9580   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
9581 
9582   // Frame & Return address.
9583   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
9584   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
9585 
9586   case ISD::INTRINSIC_VOID:
9587     return LowerINTRINSIC_VOID(Op, DAG);
9588   case ISD::SREM:
9589   case ISD::UREM:
9590     return LowerREM(Op, DAG);
9591   case ISD::BSWAP:
9592     return LowerBSWAP(Op, DAG);
9593   case ISD::ATOMIC_CMP_SWAP:
9594     return LowerATOMIC_CMP_SWAP(Op, DAG);
9595   }
9596 }
9597 
9598 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
9599                                            SmallVectorImpl<SDValue>&Results,
9600                                            SelectionDAG &DAG) const {
9601   SDLoc dl(N);
9602   switch (N->getOpcode()) {
9603   default:
9604     llvm_unreachable("Do not know how to custom type legalize this operation!");
9605   case ISD::READCYCLECOUNTER: {
9606     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
9607     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
9608 
9609     Results.push_back(RTB);
9610     Results.push_back(RTB.getValue(1));
9611     Results.push_back(RTB.getValue(2));
9612     break;
9613   }
9614   case ISD::INTRINSIC_W_CHAIN: {
9615     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
9616         Intrinsic::ppc_is_decremented_ctr_nonzero)
9617       break;
9618 
9619     assert(N->getValueType(0) == MVT::i1 &&
9620            "Unexpected result type for CTR decrement intrinsic");
9621     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
9622                                  N->getValueType(0));
9623     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
9624     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
9625                                  N->getOperand(1));
9626 
9627     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
9628     Results.push_back(NewInt.getValue(1));
9629     break;
9630   }
9631   case ISD::VAARG: {
9632     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
9633       return;
9634 
9635     EVT VT = N->getValueType(0);
9636 
9637     if (VT == MVT::i64) {
9638       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
9639 
9640       Results.push_back(NewNode);
9641       Results.push_back(NewNode.getValue(1));
9642     }
9643     return;
9644   }
9645   case ISD::FP_TO_SINT:
9646   case ISD::FP_TO_UINT:
9647     // LowerFP_TO_INT() can only handle f32 and f64.
9648     if (N->getOperand(0).getValueType() == MVT::ppcf128)
9649       return;
9650     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
9651     return;
9652   }
9653 }
9654 
9655 //===----------------------------------------------------------------------===//
9656 //  Other Lowering Code
9657 //===----------------------------------------------------------------------===//
9658 
9659 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
9660   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
9661   Function *Func = Intrinsic::getDeclaration(M, Id);
9662   return Builder.CreateCall(Func, {});
9663 }
9664 
9665 // The mappings for emitLeading/TrailingFence is taken from
9666 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
9667 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
9668                                                  Instruction *Inst,
9669                                                  AtomicOrdering Ord) const {
9670   if (Ord == AtomicOrdering::SequentiallyConsistent)
9671     return callIntrinsic(Builder, Intrinsic::ppc_sync);
9672   if (isReleaseOrStronger(Ord))
9673     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9674   return nullptr;
9675 }
9676 
9677 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
9678                                                   Instruction *Inst,
9679                                                   AtomicOrdering Ord) const {
9680   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
9681     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
9682     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
9683     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
9684     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
9685       return Builder.CreateCall(
9686           Intrinsic::getDeclaration(
9687               Builder.GetInsertBlock()->getParent()->getParent(),
9688               Intrinsic::ppc_cfence, {Inst->getType()}),
9689           {Inst});
9690     // FIXME: Can use isync for rmw operation.
9691     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9692   }
9693   return nullptr;
9694 }
9695 
9696 MachineBasicBlock *
9697 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
9698                                     unsigned AtomicSize,
9699                                     unsigned BinOpcode,
9700                                     unsigned CmpOpcode,
9701                                     unsigned CmpPred) const {
9702   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9703   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9704 
9705   auto LoadMnemonic = PPC::LDARX;
9706   auto StoreMnemonic = PPC::STDCX;
9707   switch (AtomicSize) {
9708   default:
9709     llvm_unreachable("Unexpected size of atomic entity");
9710   case 1:
9711     LoadMnemonic = PPC::LBARX;
9712     StoreMnemonic = PPC::STBCX;
9713     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9714     break;
9715   case 2:
9716     LoadMnemonic = PPC::LHARX;
9717     StoreMnemonic = PPC::STHCX;
9718     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9719     break;
9720   case 4:
9721     LoadMnemonic = PPC::LWARX;
9722     StoreMnemonic = PPC::STWCX;
9723     break;
9724   case 8:
9725     LoadMnemonic = PPC::LDARX;
9726     StoreMnemonic = PPC::STDCX;
9727     break;
9728   }
9729 
9730   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9731   MachineFunction *F = BB->getParent();
9732   MachineFunction::iterator It = ++BB->getIterator();
9733 
9734   unsigned dest = MI.getOperand(0).getReg();
9735   unsigned ptrA = MI.getOperand(1).getReg();
9736   unsigned ptrB = MI.getOperand(2).getReg();
9737   unsigned incr = MI.getOperand(3).getReg();
9738   DebugLoc dl = MI.getDebugLoc();
9739 
9740   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9741   MachineBasicBlock *loop2MBB =
9742     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9743   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9744   F->insert(It, loopMBB);
9745   if (CmpOpcode)
9746     F->insert(It, loop2MBB);
9747   F->insert(It, exitMBB);
9748   exitMBB->splice(exitMBB->begin(), BB,
9749                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9750   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9751 
9752   MachineRegisterInfo &RegInfo = F->getRegInfo();
9753   unsigned TmpReg = (!BinOpcode) ? incr :
9754     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
9755                                            : &PPC::GPRCRegClass);
9756 
9757   //  thisMBB:
9758   //   ...
9759   //   fallthrough --> loopMBB
9760   BB->addSuccessor(loopMBB);
9761 
9762   //  loopMBB:
9763   //   l[wd]arx dest, ptr
9764   //   add r0, dest, incr
9765   //   st[wd]cx. r0, ptr
9766   //   bne- loopMBB
9767   //   fallthrough --> exitMBB
9768 
9769   // For max/min...
9770   //  loopMBB:
9771   //   l[wd]arx dest, ptr
9772   //   cmpl?[wd] incr, dest
9773   //   bgt exitMBB
9774   //  loop2MBB:
9775   //   st[wd]cx. dest, ptr
9776   //   bne- loopMBB
9777   //   fallthrough --> exitMBB
9778 
9779   BB = loopMBB;
9780   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
9781     .addReg(ptrA).addReg(ptrB);
9782   if (BinOpcode)
9783     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
9784   if (CmpOpcode) {
9785     // Signed comparisons of byte or halfword values must be sign-extended.
9786     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
9787       unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
9788       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
9789               ExtReg).addReg(dest);
9790       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9791         .addReg(incr).addReg(ExtReg);
9792     } else
9793       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9794         .addReg(incr).addReg(dest);
9795 
9796     BuildMI(BB, dl, TII->get(PPC::BCC))
9797       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9798     BB->addSuccessor(loop2MBB);
9799     BB->addSuccessor(exitMBB);
9800     BB = loop2MBB;
9801   }
9802   BuildMI(BB, dl, TII->get(StoreMnemonic))
9803     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
9804   BuildMI(BB, dl, TII->get(PPC::BCC))
9805     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9806   BB->addSuccessor(loopMBB);
9807   BB->addSuccessor(exitMBB);
9808 
9809   //  exitMBB:
9810   //   ...
9811   BB = exitMBB;
9812   return BB;
9813 }
9814 
9815 MachineBasicBlock *
9816 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
9817                                             MachineBasicBlock *BB,
9818                                             bool is8bit, // operation
9819                                             unsigned BinOpcode,
9820                                             unsigned CmpOpcode,
9821                                             unsigned CmpPred) const {
9822   // If we support part-word atomic mnemonics, just use them
9823   if (Subtarget.hasPartwordAtomics())
9824     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
9825                             CmpOpcode, CmpPred);
9826 
9827   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9828   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9829   // In 64 bit mode we have to use 64 bits for addresses, even though the
9830   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
9831   // registers without caring whether they're 32 or 64, but here we're
9832   // doing actual arithmetic on the addresses.
9833   bool is64bit = Subtarget.isPPC64();
9834   bool isLittleEndian = Subtarget.isLittleEndian();
9835   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9836 
9837   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9838   MachineFunction *F = BB->getParent();
9839   MachineFunction::iterator It = ++BB->getIterator();
9840 
9841   unsigned dest = MI.getOperand(0).getReg();
9842   unsigned ptrA = MI.getOperand(1).getReg();
9843   unsigned ptrB = MI.getOperand(2).getReg();
9844   unsigned incr = MI.getOperand(3).getReg();
9845   DebugLoc dl = MI.getDebugLoc();
9846 
9847   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9848   MachineBasicBlock *loop2MBB =
9849     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9850   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9851   F->insert(It, loopMBB);
9852   if (CmpOpcode)
9853     F->insert(It, loop2MBB);
9854   F->insert(It, exitMBB);
9855   exitMBB->splice(exitMBB->begin(), BB,
9856                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9857   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9858 
9859   MachineRegisterInfo &RegInfo = F->getRegInfo();
9860   const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
9861                                           : &PPC::GPRCRegClass;
9862   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
9863   unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
9864   unsigned ShiftReg =
9865     isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
9866   unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
9867   unsigned MaskReg = RegInfo.createVirtualRegister(RC);
9868   unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
9869   unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
9870   unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
9871   unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
9872   unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
9873   unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
9874   unsigned Ptr1Reg;
9875   unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
9876 
9877   //  thisMBB:
9878   //   ...
9879   //   fallthrough --> loopMBB
9880   BB->addSuccessor(loopMBB);
9881 
9882   // The 4-byte load must be aligned, while a char or short may be
9883   // anywhere in the word.  Hence all this nasty bookkeeping code.
9884   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
9885   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
9886   //   xori shift, shift1, 24 [16]
9887   //   rlwinm ptr, ptr1, 0, 0, 29
9888   //   slw incr2, incr, shift
9889   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
9890   //   slw mask, mask2, shift
9891   //  loopMBB:
9892   //   lwarx tmpDest, ptr
9893   //   add tmp, tmpDest, incr2
9894   //   andc tmp2, tmpDest, mask
9895   //   and tmp3, tmp, mask
9896   //   or tmp4, tmp3, tmp2
9897   //   stwcx. tmp4, ptr
9898   //   bne- loopMBB
9899   //   fallthrough --> exitMBB
9900   //   srw dest, tmpDest, shift
9901   if (ptrA != ZeroReg) {
9902     Ptr1Reg = RegInfo.createVirtualRegister(RC);
9903     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
9904       .addReg(ptrA).addReg(ptrB);
9905   } else {
9906     Ptr1Reg = ptrB;
9907   }
9908   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
9909       .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
9910   if (!isLittleEndian)
9911     BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
9912         .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
9913   if (is64bit)
9914     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
9915       .addReg(Ptr1Reg).addImm(0).addImm(61);
9916   else
9917     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
9918       .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
9919   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
9920       .addReg(incr).addReg(ShiftReg);
9921   if (is8bit)
9922     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
9923   else {
9924     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
9925     BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
9926   }
9927   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
9928       .addReg(Mask2Reg).addReg(ShiftReg);
9929 
9930   BB = loopMBB;
9931   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
9932     .addReg(ZeroReg).addReg(PtrReg);
9933   if (BinOpcode)
9934     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
9935       .addReg(Incr2Reg).addReg(TmpDestReg);
9936   BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
9937     .addReg(TmpDestReg).addReg(MaskReg);
9938   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
9939     .addReg(TmpReg).addReg(MaskReg);
9940   if (CmpOpcode) {
9941     // For unsigned comparisons, we can directly compare the shifted values.
9942     // For signed comparisons we shift and sign extend.
9943     unsigned SReg = RegInfo.createVirtualRegister(RC);
9944     BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
9945       .addReg(TmpDestReg).addReg(MaskReg);
9946     unsigned ValueReg = SReg;
9947     unsigned CmpReg = Incr2Reg;
9948     if (CmpOpcode == PPC::CMPW) {
9949       ValueReg = RegInfo.createVirtualRegister(RC);
9950       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
9951         .addReg(SReg).addReg(ShiftReg);
9952       unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
9953       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
9954         .addReg(ValueReg);
9955       ValueReg = ValueSReg;
9956       CmpReg = incr;
9957     }
9958     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9959       .addReg(CmpReg).addReg(ValueReg);
9960     BuildMI(BB, dl, TII->get(PPC::BCC))
9961       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9962     BB->addSuccessor(loop2MBB);
9963     BB->addSuccessor(exitMBB);
9964     BB = loop2MBB;
9965   }
9966   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
9967     .addReg(Tmp3Reg).addReg(Tmp2Reg);
9968   BuildMI(BB, dl, TII->get(PPC::STWCX))
9969     .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
9970   BuildMI(BB, dl, TII->get(PPC::BCC))
9971     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9972   BB->addSuccessor(loopMBB);
9973   BB->addSuccessor(exitMBB);
9974 
9975   //  exitMBB:
9976   //   ...
9977   BB = exitMBB;
9978   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
9979     .addReg(ShiftReg);
9980   return BB;
9981 }
9982 
9983 llvm::MachineBasicBlock *
9984 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
9985                                     MachineBasicBlock *MBB) const {
9986   DebugLoc DL = MI.getDebugLoc();
9987   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9988   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
9989 
9990   MachineFunction *MF = MBB->getParent();
9991   MachineRegisterInfo &MRI = MF->getRegInfo();
9992 
9993   const BasicBlock *BB = MBB->getBasicBlock();
9994   MachineFunction::iterator I = ++MBB->getIterator();
9995 
9996   unsigned DstReg = MI.getOperand(0).getReg();
9997   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
9998   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
9999   unsigned mainDstReg = MRI.createVirtualRegister(RC);
10000   unsigned restoreDstReg = MRI.createVirtualRegister(RC);
10001 
10002   MVT PVT = getPointerTy(MF->getDataLayout());
10003   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10004          "Invalid Pointer Size!");
10005   // For v = setjmp(buf), we generate
10006   //
10007   // thisMBB:
10008   //  SjLjSetup mainMBB
10009   //  bl mainMBB
10010   //  v_restore = 1
10011   //  b sinkMBB
10012   //
10013   // mainMBB:
10014   //  buf[LabelOffset] = LR
10015   //  v_main = 0
10016   //
10017   // sinkMBB:
10018   //  v = phi(main, restore)
10019   //
10020 
10021   MachineBasicBlock *thisMBB = MBB;
10022   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10023   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10024   MF->insert(I, mainMBB);
10025   MF->insert(I, sinkMBB);
10026 
10027   MachineInstrBuilder MIB;
10028 
10029   // Transfer the remainder of BB and its successor edges to sinkMBB.
10030   sinkMBB->splice(sinkMBB->begin(), MBB,
10031                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10032   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10033 
10034   // Note that the structure of the jmp_buf used here is not compatible
10035   // with that used by libc, and is not designed to be. Specifically, it
10036   // stores only those 'reserved' registers that LLVM does not otherwise
10037   // understand how to spill. Also, by convention, by the time this
10038   // intrinsic is called, Clang has already stored the frame address in the
10039   // first slot of the buffer and stack address in the third. Following the
10040   // X86 target code, we'll store the jump address in the second slot. We also
10041   // need to save the TOC pointer (R2) to handle jumps between shared
10042   // libraries, and that will be stored in the fourth slot. The thread
10043   // identifier (R13) is not affected.
10044 
10045   // thisMBB:
10046   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10047   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10048   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10049 
10050   // Prepare IP either in reg.
10051   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10052   unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
10053   unsigned BufReg = MI.getOperand(1).getReg();
10054 
10055   if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
10056     setUsesTOCBasePtr(*MBB->getParent());
10057     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10058               .addReg(PPC::X2)
10059               .addImm(TOCOffset)
10060               .addReg(BufReg)
10061               .cloneMemRefs(MI);
10062   }
10063 
10064   // Naked functions never have a base pointer, and so we use r1. For all
10065   // other functions, this decision must be delayed until during PEI.
10066   unsigned BaseReg;
10067   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
10068     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10069   else
10070     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10071 
10072   MIB = BuildMI(*thisMBB, MI, DL,
10073                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10074             .addReg(BaseReg)
10075             .addImm(BPOffset)
10076             .addReg(BufReg)
10077             .cloneMemRefs(MI);
10078 
10079   // Setup
10080   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
10081   MIB.addRegMask(TRI->getNoPreservedMask());
10082 
10083   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
10084 
10085   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
10086           .addMBB(mainMBB);
10087   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
10088 
10089   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
10090   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
10091 
10092   // mainMBB:
10093   //  mainDstReg = 0
10094   MIB =
10095       BuildMI(mainMBB, DL,
10096               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10097 
10098   // Store IP
10099   if (Subtarget.isPPC64()) {
10100     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
10101             .addReg(LabelReg)
10102             .addImm(LabelOffset)
10103             .addReg(BufReg);
10104   } else {
10105     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
10106             .addReg(LabelReg)
10107             .addImm(LabelOffset)
10108             .addReg(BufReg);
10109   }
10110   MIB.cloneMemRefs(MI);
10111 
10112   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
10113   mainMBB->addSuccessor(sinkMBB);
10114 
10115   // sinkMBB:
10116   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10117           TII->get(PPC::PHI), DstReg)
10118     .addReg(mainDstReg).addMBB(mainMBB)
10119     .addReg(restoreDstReg).addMBB(thisMBB);
10120 
10121   MI.eraseFromParent();
10122   return sinkMBB;
10123 }
10124 
10125 MachineBasicBlock *
10126 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
10127                                      MachineBasicBlock *MBB) const {
10128   DebugLoc DL = MI.getDebugLoc();
10129   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10130 
10131   MachineFunction *MF = MBB->getParent();
10132   MachineRegisterInfo &MRI = MF->getRegInfo();
10133 
10134   MVT PVT = getPointerTy(MF->getDataLayout());
10135   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10136          "Invalid Pointer Size!");
10137 
10138   const TargetRegisterClass *RC =
10139     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10140   unsigned Tmp = MRI.createVirtualRegister(RC);
10141   // Since FP is only updated here but NOT referenced, it's treated as GPR.
10142   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
10143   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
10144   unsigned BP =
10145       (PVT == MVT::i64)
10146           ? PPC::X30
10147           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
10148                                                               : PPC::R30);
10149 
10150   MachineInstrBuilder MIB;
10151 
10152   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10153   const int64_t SPOffset    = 2 * PVT.getStoreSize();
10154   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10155   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10156 
10157   unsigned BufReg = MI.getOperand(0).getReg();
10158 
10159   // Reload FP (the jumped-to function may not have had a
10160   // frame pointer, and if so, then its r31 will be restored
10161   // as necessary).
10162   if (PVT == MVT::i64) {
10163     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
10164             .addImm(0)
10165             .addReg(BufReg);
10166   } else {
10167     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
10168             .addImm(0)
10169             .addReg(BufReg);
10170   }
10171   MIB.cloneMemRefs(MI);
10172 
10173   // Reload IP
10174   if (PVT == MVT::i64) {
10175     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10176             .addImm(LabelOffset)
10177             .addReg(BufReg);
10178   } else {
10179     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10180             .addImm(LabelOffset)
10181             .addReg(BufReg);
10182   }
10183   MIB.cloneMemRefs(MI);
10184 
10185   // Reload SP
10186   if (PVT == MVT::i64) {
10187     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10188             .addImm(SPOffset)
10189             .addReg(BufReg);
10190   } else {
10191     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10192             .addImm(SPOffset)
10193             .addReg(BufReg);
10194   }
10195   MIB.cloneMemRefs(MI);
10196 
10197   // Reload BP
10198   if (PVT == MVT::i64) {
10199     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
10200             .addImm(BPOffset)
10201             .addReg(BufReg);
10202   } else {
10203     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
10204             .addImm(BPOffset)
10205             .addReg(BufReg);
10206   }
10207   MIB.cloneMemRefs(MI);
10208 
10209   // Reload TOC
10210   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
10211     setUsesTOCBasePtr(*MBB->getParent());
10212     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
10213               .addImm(TOCOffset)
10214               .addReg(BufReg)
10215               .cloneMemRefs(MI);
10216   }
10217 
10218   // Jump
10219   BuildMI(*MBB, MI, DL,
10220           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
10221   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
10222 
10223   MI.eraseFromParent();
10224   return MBB;
10225 }
10226 
10227 MachineBasicBlock *
10228 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10229                                                MachineBasicBlock *BB) const {
10230   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
10231       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10232     if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
10233         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10234       // Call lowering should have added an r2 operand to indicate a dependence
10235       // on the TOC base pointer value. It can't however, because there is no
10236       // way to mark the dependence as implicit there, and so the stackmap code
10237       // will confuse it with a regular operand. Instead, add the dependence
10238       // here.
10239       setUsesTOCBasePtr(*BB->getParent());
10240       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
10241     }
10242 
10243     return emitPatchPoint(MI, BB);
10244   }
10245 
10246   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
10247       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
10248     return emitEHSjLjSetJmp(MI, BB);
10249   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
10250              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
10251     return emitEHSjLjLongJmp(MI, BB);
10252   }
10253 
10254   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10255 
10256   // To "insert" these instructions we actually have to insert their
10257   // control-flow patterns.
10258   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10259   MachineFunction::iterator It = ++BB->getIterator();
10260 
10261   MachineFunction *F = BB->getParent();
10262 
10263   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10264        MI.getOpcode() == PPC::SELECT_CC_I8 ||
10265        MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) {
10266     SmallVector<MachineOperand, 2> Cond;
10267     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10268         MI.getOpcode() == PPC::SELECT_CC_I8)
10269       Cond.push_back(MI.getOperand(4));
10270     else
10271       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
10272     Cond.push_back(MI.getOperand(1));
10273 
10274     DebugLoc dl = MI.getDebugLoc();
10275     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
10276                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
10277   } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10278              MI.getOpcode() == PPC::SELECT_CC_I8 ||
10279              MI.getOpcode() == PPC::SELECT_CC_F4 ||
10280              MI.getOpcode() == PPC::SELECT_CC_F8 ||
10281              MI.getOpcode() == PPC::SELECT_CC_F16 ||
10282              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
10283              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
10284              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
10285              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
10286              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
10287              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
10288              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
10289              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
10290              MI.getOpcode() == PPC::SELECT_CC_SPE ||
10291              MI.getOpcode() == PPC::SELECT_I4 ||
10292              MI.getOpcode() == PPC::SELECT_I8 ||
10293              MI.getOpcode() == PPC::SELECT_F4 ||
10294              MI.getOpcode() == PPC::SELECT_F8 ||
10295              MI.getOpcode() == PPC::SELECT_F16 ||
10296              MI.getOpcode() == PPC::SELECT_QFRC ||
10297              MI.getOpcode() == PPC::SELECT_QSRC ||
10298              MI.getOpcode() == PPC::SELECT_QBRC ||
10299              MI.getOpcode() == PPC::SELECT_SPE ||
10300              MI.getOpcode() == PPC::SELECT_SPE4 ||
10301              MI.getOpcode() == PPC::SELECT_VRRC ||
10302              MI.getOpcode() == PPC::SELECT_VSFRC ||
10303              MI.getOpcode() == PPC::SELECT_VSSRC ||
10304              MI.getOpcode() == PPC::SELECT_VSRC) {
10305     // The incoming instruction knows the destination vreg to set, the
10306     // condition code register to branch on, the true/false values to
10307     // select between, and a branch opcode to use.
10308 
10309     //  thisMBB:
10310     //  ...
10311     //   TrueVal = ...
10312     //   cmpTY ccX, r1, r2
10313     //   bCC copy1MBB
10314     //   fallthrough --> copy0MBB
10315     MachineBasicBlock *thisMBB = BB;
10316     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10317     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10318     DebugLoc dl = MI.getDebugLoc();
10319     F->insert(It, copy0MBB);
10320     F->insert(It, sinkMBB);
10321 
10322     // Transfer the remainder of BB and its successor edges to sinkMBB.
10323     sinkMBB->splice(sinkMBB->begin(), BB,
10324                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10325     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10326 
10327     // Next, add the true and fallthrough blocks as its successors.
10328     BB->addSuccessor(copy0MBB);
10329     BB->addSuccessor(sinkMBB);
10330 
10331     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
10332         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
10333         MI.getOpcode() == PPC::SELECT_F16 ||
10334         MI.getOpcode() == PPC::SELECT_SPE4 ||
10335         MI.getOpcode() == PPC::SELECT_SPE ||
10336         MI.getOpcode() == PPC::SELECT_QFRC ||
10337         MI.getOpcode() == PPC::SELECT_QSRC ||
10338         MI.getOpcode() == PPC::SELECT_QBRC ||
10339         MI.getOpcode() == PPC::SELECT_VRRC ||
10340         MI.getOpcode() == PPC::SELECT_VSFRC ||
10341         MI.getOpcode() == PPC::SELECT_VSSRC ||
10342         MI.getOpcode() == PPC::SELECT_VSRC) {
10343       BuildMI(BB, dl, TII->get(PPC::BC))
10344           .addReg(MI.getOperand(1).getReg())
10345           .addMBB(sinkMBB);
10346     } else {
10347       unsigned SelectPred = MI.getOperand(4).getImm();
10348       BuildMI(BB, dl, TII->get(PPC::BCC))
10349           .addImm(SelectPred)
10350           .addReg(MI.getOperand(1).getReg())
10351           .addMBB(sinkMBB);
10352     }
10353 
10354     //  copy0MBB:
10355     //   %FalseValue = ...
10356     //   # fallthrough to sinkMBB
10357     BB = copy0MBB;
10358 
10359     // Update machine-CFG edges
10360     BB->addSuccessor(sinkMBB);
10361 
10362     //  sinkMBB:
10363     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10364     //  ...
10365     BB = sinkMBB;
10366     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
10367         .addReg(MI.getOperand(3).getReg())
10368         .addMBB(copy0MBB)
10369         .addReg(MI.getOperand(2).getReg())
10370         .addMBB(thisMBB);
10371   } else if (MI.getOpcode() == PPC::ReadTB) {
10372     // To read the 64-bit time-base register on a 32-bit target, we read the
10373     // two halves. Should the counter have wrapped while it was being read, we
10374     // need to try again.
10375     // ...
10376     // readLoop:
10377     // mfspr Rx,TBU # load from TBU
10378     // mfspr Ry,TB  # load from TB
10379     // mfspr Rz,TBU # load from TBU
10380     // cmpw crX,Rx,Rz # check if 'old'='new'
10381     // bne readLoop   # branch if they're not equal
10382     // ...
10383 
10384     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
10385     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10386     DebugLoc dl = MI.getDebugLoc();
10387     F->insert(It, readMBB);
10388     F->insert(It, sinkMBB);
10389 
10390     // Transfer the remainder of BB and its successor edges to sinkMBB.
10391     sinkMBB->splice(sinkMBB->begin(), BB,
10392                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10393     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10394 
10395     BB->addSuccessor(readMBB);
10396     BB = readMBB;
10397 
10398     MachineRegisterInfo &RegInfo = F->getRegInfo();
10399     unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10400     unsigned LoReg = MI.getOperand(0).getReg();
10401     unsigned HiReg = MI.getOperand(1).getReg();
10402 
10403     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
10404     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
10405     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
10406 
10407     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10408 
10409     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
10410       .addReg(HiReg).addReg(ReadAgainReg);
10411     BuildMI(BB, dl, TII->get(PPC::BCC))
10412       .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
10413 
10414     BB->addSuccessor(readMBB);
10415     BB->addSuccessor(sinkMBB);
10416   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
10417     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
10418   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
10419     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
10420   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
10421     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
10422   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
10423     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
10424 
10425   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
10426     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
10427   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
10428     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
10429   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
10430     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
10431   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
10432     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
10433 
10434   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
10435     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
10436   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
10437     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
10438   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
10439     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
10440   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
10441     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
10442 
10443   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
10444     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
10445   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
10446     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
10447   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
10448     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
10449   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
10450     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
10451 
10452   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
10453     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
10454   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
10455     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
10456   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
10457     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
10458   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
10459     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
10460 
10461   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
10462     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
10463   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
10464     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
10465   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
10466     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
10467   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
10468     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
10469 
10470   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
10471     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
10472   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
10473     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
10474   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
10475     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
10476   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
10477     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
10478 
10479   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
10480     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
10481   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
10482     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
10483   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
10484     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
10485   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
10486     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
10487 
10488   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
10489     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
10490   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
10491     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
10492   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
10493     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
10494   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
10495     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
10496 
10497   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
10498     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
10499   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
10500     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
10501   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
10502     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
10503   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
10504     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
10505 
10506   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
10507     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
10508   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
10509     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
10510   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
10511     BB = EmitAtomicBinary(MI, BB, 4, 0);
10512   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
10513     BB = EmitAtomicBinary(MI, BB, 8, 0);
10514   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
10515            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
10516            (Subtarget.hasPartwordAtomics() &&
10517             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
10518            (Subtarget.hasPartwordAtomics() &&
10519             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
10520     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
10521 
10522     auto LoadMnemonic = PPC::LDARX;
10523     auto StoreMnemonic = PPC::STDCX;
10524     switch (MI.getOpcode()) {
10525     default:
10526       llvm_unreachable("Compare and swap of unknown size");
10527     case PPC::ATOMIC_CMP_SWAP_I8:
10528       LoadMnemonic = PPC::LBARX;
10529       StoreMnemonic = PPC::STBCX;
10530       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10531       break;
10532     case PPC::ATOMIC_CMP_SWAP_I16:
10533       LoadMnemonic = PPC::LHARX;
10534       StoreMnemonic = PPC::STHCX;
10535       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10536       break;
10537     case PPC::ATOMIC_CMP_SWAP_I32:
10538       LoadMnemonic = PPC::LWARX;
10539       StoreMnemonic = PPC::STWCX;
10540       break;
10541     case PPC::ATOMIC_CMP_SWAP_I64:
10542       LoadMnemonic = PPC::LDARX;
10543       StoreMnemonic = PPC::STDCX;
10544       break;
10545     }
10546     unsigned dest = MI.getOperand(0).getReg();
10547     unsigned ptrA = MI.getOperand(1).getReg();
10548     unsigned ptrB = MI.getOperand(2).getReg();
10549     unsigned oldval = MI.getOperand(3).getReg();
10550     unsigned newval = MI.getOperand(4).getReg();
10551     DebugLoc dl = MI.getDebugLoc();
10552 
10553     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10554     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10555     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10556     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10557     F->insert(It, loop1MBB);
10558     F->insert(It, loop2MBB);
10559     F->insert(It, midMBB);
10560     F->insert(It, exitMBB);
10561     exitMBB->splice(exitMBB->begin(), BB,
10562                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10563     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10564 
10565     //  thisMBB:
10566     //   ...
10567     //   fallthrough --> loopMBB
10568     BB->addSuccessor(loop1MBB);
10569 
10570     // loop1MBB:
10571     //   l[bhwd]arx dest, ptr
10572     //   cmp[wd] dest, oldval
10573     //   bne- midMBB
10574     // loop2MBB:
10575     //   st[bhwd]cx. newval, ptr
10576     //   bne- loopMBB
10577     //   b exitBB
10578     // midMBB:
10579     //   st[bhwd]cx. dest, ptr
10580     // exitBB:
10581     BB = loop1MBB;
10582     BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10583       .addReg(ptrA).addReg(ptrB);
10584     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
10585       .addReg(oldval).addReg(dest);
10586     BuildMI(BB, dl, TII->get(PPC::BCC))
10587       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10588     BB->addSuccessor(loop2MBB);
10589     BB->addSuccessor(midMBB);
10590 
10591     BB = loop2MBB;
10592     BuildMI(BB, dl, TII->get(StoreMnemonic))
10593       .addReg(newval).addReg(ptrA).addReg(ptrB);
10594     BuildMI(BB, dl, TII->get(PPC::BCC))
10595       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10596     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10597     BB->addSuccessor(loop1MBB);
10598     BB->addSuccessor(exitMBB);
10599 
10600     BB = midMBB;
10601     BuildMI(BB, dl, TII->get(StoreMnemonic))
10602       .addReg(dest).addReg(ptrA).addReg(ptrB);
10603     BB->addSuccessor(exitMBB);
10604 
10605     //  exitMBB:
10606     //   ...
10607     BB = exitMBB;
10608   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
10609              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
10610     // We must use 64-bit registers for addresses when targeting 64-bit,
10611     // since we're actually doing arithmetic on them.  Other registers
10612     // can be 32-bit.
10613     bool is64bit = Subtarget.isPPC64();
10614     bool isLittleEndian = Subtarget.isLittleEndian();
10615     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
10616 
10617     unsigned dest = MI.getOperand(0).getReg();
10618     unsigned ptrA = MI.getOperand(1).getReg();
10619     unsigned ptrB = MI.getOperand(2).getReg();
10620     unsigned oldval = MI.getOperand(3).getReg();
10621     unsigned newval = MI.getOperand(4).getReg();
10622     DebugLoc dl = MI.getDebugLoc();
10623 
10624     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10625     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10626     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10627     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10628     F->insert(It, loop1MBB);
10629     F->insert(It, loop2MBB);
10630     F->insert(It, midMBB);
10631     F->insert(It, exitMBB);
10632     exitMBB->splice(exitMBB->begin(), BB,
10633                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10634     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10635 
10636     MachineRegisterInfo &RegInfo = F->getRegInfo();
10637     const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
10638                                             : &PPC::GPRCRegClass;
10639     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
10640     unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
10641     unsigned ShiftReg =
10642       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
10643     unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
10644     unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
10645     unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
10646     unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
10647     unsigned MaskReg = RegInfo.createVirtualRegister(RC);
10648     unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
10649     unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
10650     unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
10651     unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
10652     unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
10653     unsigned Ptr1Reg;
10654     unsigned TmpReg = RegInfo.createVirtualRegister(RC);
10655     unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10656     //  thisMBB:
10657     //   ...
10658     //   fallthrough --> loopMBB
10659     BB->addSuccessor(loop1MBB);
10660 
10661     // The 4-byte load must be aligned, while a char or short may be
10662     // anywhere in the word.  Hence all this nasty bookkeeping code.
10663     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10664     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10665     //   xori shift, shift1, 24 [16]
10666     //   rlwinm ptr, ptr1, 0, 0, 29
10667     //   slw newval2, newval, shift
10668     //   slw oldval2, oldval,shift
10669     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10670     //   slw mask, mask2, shift
10671     //   and newval3, newval2, mask
10672     //   and oldval3, oldval2, mask
10673     // loop1MBB:
10674     //   lwarx tmpDest, ptr
10675     //   and tmp, tmpDest, mask
10676     //   cmpw tmp, oldval3
10677     //   bne- midMBB
10678     // loop2MBB:
10679     //   andc tmp2, tmpDest, mask
10680     //   or tmp4, tmp2, newval3
10681     //   stwcx. tmp4, ptr
10682     //   bne- loop1MBB
10683     //   b exitBB
10684     // midMBB:
10685     //   stwcx. tmpDest, ptr
10686     // exitBB:
10687     //   srw dest, tmpDest, shift
10688     if (ptrA != ZeroReg) {
10689       Ptr1Reg = RegInfo.createVirtualRegister(RC);
10690       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10691         .addReg(ptrA).addReg(ptrB);
10692     } else {
10693       Ptr1Reg = ptrB;
10694     }
10695     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
10696         .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
10697     if (!isLittleEndian)
10698       BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
10699           .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
10700     if (is64bit)
10701       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10702         .addReg(Ptr1Reg).addImm(0).addImm(61);
10703     else
10704       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10705         .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
10706     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
10707         .addReg(newval).addReg(ShiftReg);
10708     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
10709         .addReg(oldval).addReg(ShiftReg);
10710     if (is8bit)
10711       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10712     else {
10713       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10714       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10715         .addReg(Mask3Reg).addImm(65535);
10716     }
10717     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10718         .addReg(Mask2Reg).addReg(ShiftReg);
10719     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
10720         .addReg(NewVal2Reg).addReg(MaskReg);
10721     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
10722         .addReg(OldVal2Reg).addReg(MaskReg);
10723 
10724     BB = loop1MBB;
10725     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10726         .addReg(ZeroReg).addReg(PtrReg);
10727     BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
10728         .addReg(TmpDestReg).addReg(MaskReg);
10729     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
10730         .addReg(TmpReg).addReg(OldVal3Reg);
10731     BuildMI(BB, dl, TII->get(PPC::BCC))
10732         .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10733     BB->addSuccessor(loop2MBB);
10734     BB->addSuccessor(midMBB);
10735 
10736     BB = loop2MBB;
10737     BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
10738         .addReg(TmpDestReg).addReg(MaskReg);
10739     BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
10740         .addReg(Tmp2Reg).addReg(NewVal3Reg);
10741     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
10742         .addReg(ZeroReg).addReg(PtrReg);
10743     BuildMI(BB, dl, TII->get(PPC::BCC))
10744       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10745     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10746     BB->addSuccessor(loop1MBB);
10747     BB->addSuccessor(exitMBB);
10748 
10749     BB = midMBB;
10750     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
10751       .addReg(ZeroReg).addReg(PtrReg);
10752     BB->addSuccessor(exitMBB);
10753 
10754     //  exitMBB:
10755     //   ...
10756     BB = exitMBB;
10757     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
10758       .addReg(ShiftReg);
10759   } else if (MI.getOpcode() == PPC::FADDrtz) {
10760     // This pseudo performs an FADD with rounding mode temporarily forced
10761     // to round-to-zero.  We emit this via custom inserter since the FPSCR
10762     // is not modeled at the SelectionDAG level.
10763     unsigned Dest = MI.getOperand(0).getReg();
10764     unsigned Src1 = MI.getOperand(1).getReg();
10765     unsigned Src2 = MI.getOperand(2).getReg();
10766     DebugLoc dl = MI.getDebugLoc();
10767 
10768     MachineRegisterInfo &RegInfo = F->getRegInfo();
10769     unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
10770 
10771     // Save FPSCR value.
10772     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
10773 
10774     // Set rounding mode to round-to-zero.
10775     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
10776     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
10777 
10778     // Perform addition.
10779     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
10780 
10781     // Restore FPSCR value.
10782     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
10783   } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10784              MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
10785              MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10786              MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
10787     unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10788                        MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
10789                           ? PPC::ANDIo8
10790                           : PPC::ANDIo;
10791     bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10792                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
10793 
10794     MachineRegisterInfo &RegInfo = F->getRegInfo();
10795     unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
10796                                                   &PPC::GPRCRegClass :
10797                                                   &PPC::G8RCRegClass);
10798 
10799     DebugLoc dl = MI.getDebugLoc();
10800     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
10801         .addReg(MI.getOperand(1).getReg())
10802         .addImm(1);
10803     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
10804             MI.getOperand(0).getReg())
10805         .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
10806   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
10807     DebugLoc Dl = MI.getDebugLoc();
10808     MachineRegisterInfo &RegInfo = F->getRegInfo();
10809     unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10810     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
10811     return BB;
10812   } else {
10813     llvm_unreachable("Unexpected instr type to insert");
10814   }
10815 
10816   MI.eraseFromParent(); // The pseudo instruction is gone now.
10817   return BB;
10818 }
10819 
10820 //===----------------------------------------------------------------------===//
10821 // Target Optimization Hooks
10822 //===----------------------------------------------------------------------===//
10823 
10824 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
10825   // For the estimates, convergence is quadratic, so we essentially double the
10826   // number of digits correct after every iteration. For both FRE and FRSQRTE,
10827   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
10828   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
10829   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
10830   if (VT.getScalarType() == MVT::f64)
10831     RefinementSteps++;
10832   return RefinementSteps;
10833 }
10834 
10835 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
10836                                            int Enabled, int &RefinementSteps,
10837                                            bool &UseOneConstNR,
10838                                            bool Reciprocal) const {
10839   EVT VT = Operand.getValueType();
10840   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
10841       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
10842       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10843       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10844       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10845       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10846     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10847       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10848 
10849     UseOneConstNR = true;
10850     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
10851   }
10852   return SDValue();
10853 }
10854 
10855 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
10856                                             int Enabled,
10857                                             int &RefinementSteps) const {
10858   EVT VT = Operand.getValueType();
10859   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
10860       (VT == MVT::f64 && Subtarget.hasFRE()) ||
10861       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10862       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10863       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10864       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10865     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10866       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10867     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
10868   }
10869   return SDValue();
10870 }
10871 
10872 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
10873   // Note: This functionality is used only when unsafe-fp-math is enabled, and
10874   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
10875   // enabled for division), this functionality is redundant with the default
10876   // combiner logic (once the division -> reciprocal/multiply transformation
10877   // has taken place). As a result, this matters more for older cores than for
10878   // newer ones.
10879 
10880   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
10881   // reciprocal if there are two or more FDIVs (for embedded cores with only
10882   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
10883   switch (Subtarget.getDarwinDirective()) {
10884   default:
10885     return 3;
10886   case PPC::DIR_440:
10887   case PPC::DIR_A2:
10888   case PPC::DIR_E500:
10889   case PPC::DIR_E500mc:
10890   case PPC::DIR_E5500:
10891     return 2;
10892   }
10893 }
10894 
10895 // isConsecutiveLSLoc needs to work even if all adds have not yet been
10896 // collapsed, and so we need to look through chains of them.
10897 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
10898                                      int64_t& Offset, SelectionDAG &DAG) {
10899   if (DAG.isBaseWithConstantOffset(Loc)) {
10900     Base = Loc.getOperand(0);
10901     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
10902 
10903     // The base might itself be a base plus an offset, and if so, accumulate
10904     // that as well.
10905     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
10906   }
10907 }
10908 
10909 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
10910                             unsigned Bytes, int Dist,
10911                             SelectionDAG &DAG) {
10912   if (VT.getSizeInBits() / 8 != Bytes)
10913     return false;
10914 
10915   SDValue BaseLoc = Base->getBasePtr();
10916   if (Loc.getOpcode() == ISD::FrameIndex) {
10917     if (BaseLoc.getOpcode() != ISD::FrameIndex)
10918       return false;
10919     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10920     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
10921     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
10922     int FS  = MFI.getObjectSize(FI);
10923     int BFS = MFI.getObjectSize(BFI);
10924     if (FS != BFS || FS != (int)Bytes) return false;
10925     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
10926   }
10927 
10928   SDValue Base1 = Loc, Base2 = BaseLoc;
10929   int64_t Offset1 = 0, Offset2 = 0;
10930   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
10931   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
10932   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
10933     return true;
10934 
10935   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10936   const GlobalValue *GV1 = nullptr;
10937   const GlobalValue *GV2 = nullptr;
10938   Offset1 = 0;
10939   Offset2 = 0;
10940   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
10941   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
10942   if (isGA1 && isGA2 && GV1 == GV2)
10943     return Offset1 == (Offset2 + Dist*Bytes);
10944   return false;
10945 }
10946 
10947 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
10948 // not enforce equality of the chain operands.
10949 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
10950                             unsigned Bytes, int Dist,
10951                             SelectionDAG &DAG) {
10952   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
10953     EVT VT = LS->getMemoryVT();
10954     SDValue Loc = LS->getBasePtr();
10955     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
10956   }
10957 
10958   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
10959     EVT VT;
10960     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
10961     default: return false;
10962     case Intrinsic::ppc_qpx_qvlfd:
10963     case Intrinsic::ppc_qpx_qvlfda:
10964       VT = MVT::v4f64;
10965       break;
10966     case Intrinsic::ppc_qpx_qvlfs:
10967     case Intrinsic::ppc_qpx_qvlfsa:
10968       VT = MVT::v4f32;
10969       break;
10970     case Intrinsic::ppc_qpx_qvlfcd:
10971     case Intrinsic::ppc_qpx_qvlfcda:
10972       VT = MVT::v2f64;
10973       break;
10974     case Intrinsic::ppc_qpx_qvlfcs:
10975     case Intrinsic::ppc_qpx_qvlfcsa:
10976       VT = MVT::v2f32;
10977       break;
10978     case Intrinsic::ppc_qpx_qvlfiwa:
10979     case Intrinsic::ppc_qpx_qvlfiwz:
10980     case Intrinsic::ppc_altivec_lvx:
10981     case Intrinsic::ppc_altivec_lvxl:
10982     case Intrinsic::ppc_vsx_lxvw4x:
10983     case Intrinsic::ppc_vsx_lxvw4x_be:
10984       VT = MVT::v4i32;
10985       break;
10986     case Intrinsic::ppc_vsx_lxvd2x:
10987     case Intrinsic::ppc_vsx_lxvd2x_be:
10988       VT = MVT::v2f64;
10989       break;
10990     case Intrinsic::ppc_altivec_lvebx:
10991       VT = MVT::i8;
10992       break;
10993     case Intrinsic::ppc_altivec_lvehx:
10994       VT = MVT::i16;
10995       break;
10996     case Intrinsic::ppc_altivec_lvewx:
10997       VT = MVT::i32;
10998       break;
10999     }
11000 
11001     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
11002   }
11003 
11004   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
11005     EVT VT;
11006     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11007     default: return false;
11008     case Intrinsic::ppc_qpx_qvstfd:
11009     case Intrinsic::ppc_qpx_qvstfda:
11010       VT = MVT::v4f64;
11011       break;
11012     case Intrinsic::ppc_qpx_qvstfs:
11013     case Intrinsic::ppc_qpx_qvstfsa:
11014       VT = MVT::v4f32;
11015       break;
11016     case Intrinsic::ppc_qpx_qvstfcd:
11017     case Intrinsic::ppc_qpx_qvstfcda:
11018       VT = MVT::v2f64;
11019       break;
11020     case Intrinsic::ppc_qpx_qvstfcs:
11021     case Intrinsic::ppc_qpx_qvstfcsa:
11022       VT = MVT::v2f32;
11023       break;
11024     case Intrinsic::ppc_qpx_qvstfiw:
11025     case Intrinsic::ppc_qpx_qvstfiwa:
11026     case Intrinsic::ppc_altivec_stvx:
11027     case Intrinsic::ppc_altivec_stvxl:
11028     case Intrinsic::ppc_vsx_stxvw4x:
11029       VT = MVT::v4i32;
11030       break;
11031     case Intrinsic::ppc_vsx_stxvd2x:
11032       VT = MVT::v2f64;
11033       break;
11034     case Intrinsic::ppc_vsx_stxvw4x_be:
11035       VT = MVT::v4i32;
11036       break;
11037     case Intrinsic::ppc_vsx_stxvd2x_be:
11038       VT = MVT::v2f64;
11039       break;
11040     case Intrinsic::ppc_altivec_stvebx:
11041       VT = MVT::i8;
11042       break;
11043     case Intrinsic::ppc_altivec_stvehx:
11044       VT = MVT::i16;
11045       break;
11046     case Intrinsic::ppc_altivec_stvewx:
11047       VT = MVT::i32;
11048       break;
11049     }
11050 
11051     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
11052   }
11053 
11054   return false;
11055 }
11056 
11057 // Return true is there is a nearyby consecutive load to the one provided
11058 // (regardless of alignment). We search up and down the chain, looking though
11059 // token factors and other loads (but nothing else). As a result, a true result
11060 // indicates that it is safe to create a new consecutive load adjacent to the
11061 // load provided.
11062 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
11063   SDValue Chain = LD->getChain();
11064   EVT VT = LD->getMemoryVT();
11065 
11066   SmallSet<SDNode *, 16> LoadRoots;
11067   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
11068   SmallSet<SDNode *, 16> Visited;
11069 
11070   // First, search up the chain, branching to follow all token-factor operands.
11071   // If we find a consecutive load, then we're done, otherwise, record all
11072   // nodes just above the top-level loads and token factors.
11073   while (!Queue.empty()) {
11074     SDNode *ChainNext = Queue.pop_back_val();
11075     if (!Visited.insert(ChainNext).second)
11076       continue;
11077 
11078     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
11079       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11080         return true;
11081 
11082       if (!Visited.count(ChainLD->getChain().getNode()))
11083         Queue.push_back(ChainLD->getChain().getNode());
11084     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
11085       for (const SDUse &O : ChainNext->ops())
11086         if (!Visited.count(O.getNode()))
11087           Queue.push_back(O.getNode());
11088     } else
11089       LoadRoots.insert(ChainNext);
11090   }
11091 
11092   // Second, search down the chain, starting from the top-level nodes recorded
11093   // in the first phase. These top-level nodes are the nodes just above all
11094   // loads and token factors. Starting with their uses, recursively look though
11095   // all loads (just the chain uses) and token factors to find a consecutive
11096   // load.
11097   Visited.clear();
11098   Queue.clear();
11099 
11100   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
11101        IE = LoadRoots.end(); I != IE; ++I) {
11102     Queue.push_back(*I);
11103 
11104     while (!Queue.empty()) {
11105       SDNode *LoadRoot = Queue.pop_back_val();
11106       if (!Visited.insert(LoadRoot).second)
11107         continue;
11108 
11109       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
11110         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11111           return true;
11112 
11113       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
11114            UE = LoadRoot->use_end(); UI != UE; ++UI)
11115         if (((isa<MemSDNode>(*UI) &&
11116             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
11117             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
11118           Queue.push_back(*UI);
11119     }
11120   }
11121 
11122   return false;
11123 }
11124 
11125 /// This function is called when we have proved that a SETCC node can be replaced
11126 /// by subtraction (and other supporting instructions) so that the result of
11127 /// comparison is kept in a GPR instead of CR. This function is purely for
11128 /// codegen purposes and has some flags to guide the codegen process.
11129 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
11130                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
11131   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11132 
11133   // Zero extend the operands to the largest legal integer. Originally, they
11134   // must be of a strictly smaller size.
11135   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
11136                          DAG.getConstant(Size, DL, MVT::i32));
11137   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
11138                          DAG.getConstant(Size, DL, MVT::i32));
11139 
11140   // Swap if needed. Depends on the condition code.
11141   if (Swap)
11142     std::swap(Op0, Op1);
11143 
11144   // Subtract extended integers.
11145   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
11146 
11147   // Move the sign bit to the least significant position and zero out the rest.
11148   // Now the least significant bit carries the result of original comparison.
11149   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
11150                              DAG.getConstant(Size - 1, DL, MVT::i32));
11151   auto Final = Shifted;
11152 
11153   // Complement the result if needed. Based on the condition code.
11154   if (Complement)
11155     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
11156                         DAG.getConstant(1, DL, MVT::i64));
11157 
11158   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
11159 }
11160 
11161 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
11162                                                   DAGCombinerInfo &DCI) const {
11163   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11164 
11165   SelectionDAG &DAG = DCI.DAG;
11166   SDLoc DL(N);
11167 
11168   // Size of integers being compared has a critical role in the following
11169   // analysis, so we prefer to do this when all types are legal.
11170   if (!DCI.isAfterLegalizeDAG())
11171     return SDValue();
11172 
11173   // If all users of SETCC extend its value to a legal integer type
11174   // then we replace SETCC with a subtraction
11175   for (SDNode::use_iterator UI = N->use_begin(),
11176        UE = N->use_end(); UI != UE; ++UI) {
11177     if (UI->getOpcode() != ISD::ZERO_EXTEND)
11178       return SDValue();
11179   }
11180 
11181   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
11182   auto OpSize = N->getOperand(0).getValueSizeInBits();
11183 
11184   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
11185 
11186   if (OpSize < Size) {
11187     switch (CC) {
11188     default: break;
11189     case ISD::SETULT:
11190       return generateEquivalentSub(N, Size, false, false, DL, DAG);
11191     case ISD::SETULE:
11192       return generateEquivalentSub(N, Size, true, true, DL, DAG);
11193     case ISD::SETUGT:
11194       return generateEquivalentSub(N, Size, false, true, DL, DAG);
11195     case ISD::SETUGE:
11196       return generateEquivalentSub(N, Size, true, false, DL, DAG);
11197     }
11198   }
11199 
11200   return SDValue();
11201 }
11202 
11203 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
11204                                                   DAGCombinerInfo &DCI) const {
11205   SelectionDAG &DAG = DCI.DAG;
11206   SDLoc dl(N);
11207 
11208   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
11209   // If we're tracking CR bits, we need to be careful that we don't have:
11210   //   trunc(binary-ops(zext(x), zext(y)))
11211   // or
11212   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
11213   // such that we're unnecessarily moving things into GPRs when it would be
11214   // better to keep them in CR bits.
11215 
11216   // Note that trunc here can be an actual i1 trunc, or can be the effective
11217   // truncation that comes from a setcc or select_cc.
11218   if (N->getOpcode() == ISD::TRUNCATE &&
11219       N->getValueType(0) != MVT::i1)
11220     return SDValue();
11221 
11222   if (N->getOperand(0).getValueType() != MVT::i32 &&
11223       N->getOperand(0).getValueType() != MVT::i64)
11224     return SDValue();
11225 
11226   if (N->getOpcode() == ISD::SETCC ||
11227       N->getOpcode() == ISD::SELECT_CC) {
11228     // If we're looking at a comparison, then we need to make sure that the
11229     // high bits (all except for the first) don't matter the result.
11230     ISD::CondCode CC =
11231       cast<CondCodeSDNode>(N->getOperand(
11232         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
11233     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
11234 
11235     if (ISD::isSignedIntSetCC(CC)) {
11236       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
11237           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
11238         return SDValue();
11239     } else if (ISD::isUnsignedIntSetCC(CC)) {
11240       if (!DAG.MaskedValueIsZero(N->getOperand(0),
11241                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
11242           !DAG.MaskedValueIsZero(N->getOperand(1),
11243                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
11244         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
11245                                              : SDValue());
11246     } else {
11247       // This is neither a signed nor an unsigned comparison, just make sure
11248       // that the high bits are equal.
11249       KnownBits Op1Known, Op2Known;
11250       DAG.computeKnownBits(N->getOperand(0), Op1Known);
11251       DAG.computeKnownBits(N->getOperand(1), Op2Known);
11252 
11253       // We don't really care about what is known about the first bit (if
11254       // anything), so clear it in all masks prior to comparing them.
11255       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
11256       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
11257 
11258       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
11259         return SDValue();
11260     }
11261   }
11262 
11263   // We now know that the higher-order bits are irrelevant, we just need to
11264   // make sure that all of the intermediate operations are bit operations, and
11265   // all inputs are extensions.
11266   if (N->getOperand(0).getOpcode() != ISD::AND &&
11267       N->getOperand(0).getOpcode() != ISD::OR  &&
11268       N->getOperand(0).getOpcode() != ISD::XOR &&
11269       N->getOperand(0).getOpcode() != ISD::SELECT &&
11270       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
11271       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
11272       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
11273       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
11274       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
11275     return SDValue();
11276 
11277   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
11278       N->getOperand(1).getOpcode() != ISD::AND &&
11279       N->getOperand(1).getOpcode() != ISD::OR  &&
11280       N->getOperand(1).getOpcode() != ISD::XOR &&
11281       N->getOperand(1).getOpcode() != ISD::SELECT &&
11282       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
11283       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
11284       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
11285       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
11286       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
11287     return SDValue();
11288 
11289   SmallVector<SDValue, 4> Inputs;
11290   SmallVector<SDValue, 8> BinOps, PromOps;
11291   SmallPtrSet<SDNode *, 16> Visited;
11292 
11293   for (unsigned i = 0; i < 2; ++i) {
11294     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11295           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11296           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11297           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11298         isa<ConstantSDNode>(N->getOperand(i)))
11299       Inputs.push_back(N->getOperand(i));
11300     else
11301       BinOps.push_back(N->getOperand(i));
11302 
11303     if (N->getOpcode() == ISD::TRUNCATE)
11304       break;
11305   }
11306 
11307   // Visit all inputs, collect all binary operations (and, or, xor and
11308   // select) that are all fed by extensions.
11309   while (!BinOps.empty()) {
11310     SDValue BinOp = BinOps.back();
11311     BinOps.pop_back();
11312 
11313     if (!Visited.insert(BinOp.getNode()).second)
11314       continue;
11315 
11316     PromOps.push_back(BinOp);
11317 
11318     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11319       // The condition of the select is not promoted.
11320       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11321         continue;
11322       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11323         continue;
11324 
11325       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11326             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11327             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11328            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11329           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11330         Inputs.push_back(BinOp.getOperand(i));
11331       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11332                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11333                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11334                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11335                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
11336                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11337                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11338                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11339                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
11340         BinOps.push_back(BinOp.getOperand(i));
11341       } else {
11342         // We have an input that is not an extension or another binary
11343         // operation; we'll abort this transformation.
11344         return SDValue();
11345       }
11346     }
11347   }
11348 
11349   // Make sure that this is a self-contained cluster of operations (which
11350   // is not quite the same thing as saying that everything has only one
11351   // use).
11352   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11353     if (isa<ConstantSDNode>(Inputs[i]))
11354       continue;
11355 
11356     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11357                               UE = Inputs[i].getNode()->use_end();
11358          UI != UE; ++UI) {
11359       SDNode *User = *UI;
11360       if (User != N && !Visited.count(User))
11361         return SDValue();
11362 
11363       // Make sure that we're not going to promote the non-output-value
11364       // operand(s) or SELECT or SELECT_CC.
11365       // FIXME: Although we could sometimes handle this, and it does occur in
11366       // practice that one of the condition inputs to the select is also one of
11367       // the outputs, we currently can't deal with this.
11368       if (User->getOpcode() == ISD::SELECT) {
11369         if (User->getOperand(0) == Inputs[i])
11370           return SDValue();
11371       } else if (User->getOpcode() == ISD::SELECT_CC) {
11372         if (User->getOperand(0) == Inputs[i] ||
11373             User->getOperand(1) == Inputs[i])
11374           return SDValue();
11375       }
11376     }
11377   }
11378 
11379   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11380     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11381                               UE = PromOps[i].getNode()->use_end();
11382          UI != UE; ++UI) {
11383       SDNode *User = *UI;
11384       if (User != N && !Visited.count(User))
11385         return SDValue();
11386 
11387       // Make sure that we're not going to promote the non-output-value
11388       // operand(s) or SELECT or SELECT_CC.
11389       // FIXME: Although we could sometimes handle this, and it does occur in
11390       // practice that one of the condition inputs to the select is also one of
11391       // the outputs, we currently can't deal with this.
11392       if (User->getOpcode() == ISD::SELECT) {
11393         if (User->getOperand(0) == PromOps[i])
11394           return SDValue();
11395       } else if (User->getOpcode() == ISD::SELECT_CC) {
11396         if (User->getOperand(0) == PromOps[i] ||
11397             User->getOperand(1) == PromOps[i])
11398           return SDValue();
11399       }
11400     }
11401   }
11402 
11403   // Replace all inputs with the extension operand.
11404   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11405     // Constants may have users outside the cluster of to-be-promoted nodes,
11406     // and so we need to replace those as we do the promotions.
11407     if (isa<ConstantSDNode>(Inputs[i]))
11408       continue;
11409     else
11410       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
11411   }
11412 
11413   std::list<HandleSDNode> PromOpHandles;
11414   for (auto &PromOp : PromOps)
11415     PromOpHandles.emplace_back(PromOp);
11416 
11417   // Replace all operations (these are all the same, but have a different
11418   // (i1) return type). DAG.getNode will validate that the types of
11419   // a binary operator match, so go through the list in reverse so that
11420   // we've likely promoted both operands first. Any intermediate truncations or
11421   // extensions disappear.
11422   while (!PromOpHandles.empty()) {
11423     SDValue PromOp = PromOpHandles.back().getValue();
11424     PromOpHandles.pop_back();
11425 
11426     if (PromOp.getOpcode() == ISD::TRUNCATE ||
11427         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
11428         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
11429         PromOp.getOpcode() == ISD::ANY_EXTEND) {
11430       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
11431           PromOp.getOperand(0).getValueType() != MVT::i1) {
11432         // The operand is not yet ready (see comment below).
11433         PromOpHandles.emplace_front(PromOp);
11434         continue;
11435       }
11436 
11437       SDValue RepValue = PromOp.getOperand(0);
11438       if (isa<ConstantSDNode>(RepValue))
11439         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
11440 
11441       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
11442       continue;
11443     }
11444 
11445     unsigned C;
11446     switch (PromOp.getOpcode()) {
11447     default:             C = 0; break;
11448     case ISD::SELECT:    C = 1; break;
11449     case ISD::SELECT_CC: C = 2; break;
11450     }
11451 
11452     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11453          PromOp.getOperand(C).getValueType() != MVT::i1) ||
11454         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11455          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
11456       // The to-be-promoted operands of this node have not yet been
11457       // promoted (this should be rare because we're going through the
11458       // list backward, but if one of the operands has several users in
11459       // this cluster of to-be-promoted nodes, it is possible).
11460       PromOpHandles.emplace_front(PromOp);
11461       continue;
11462     }
11463 
11464     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11465                                 PromOp.getNode()->op_end());
11466 
11467     // If there are any constant inputs, make sure they're replaced now.
11468     for (unsigned i = 0; i < 2; ++i)
11469       if (isa<ConstantSDNode>(Ops[C+i]))
11470         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
11471 
11472     DAG.ReplaceAllUsesOfValueWith(PromOp,
11473       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
11474   }
11475 
11476   // Now we're left with the initial truncation itself.
11477   if (N->getOpcode() == ISD::TRUNCATE)
11478     return N->getOperand(0);
11479 
11480   // Otherwise, this is a comparison. The operands to be compared have just
11481   // changed type (to i1), but everything else is the same.
11482   return SDValue(N, 0);
11483 }
11484 
11485 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
11486                                                   DAGCombinerInfo &DCI) const {
11487   SelectionDAG &DAG = DCI.DAG;
11488   SDLoc dl(N);
11489 
11490   // If we're tracking CR bits, we need to be careful that we don't have:
11491   //   zext(binary-ops(trunc(x), trunc(y)))
11492   // or
11493   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
11494   // such that we're unnecessarily moving things into CR bits that can more
11495   // efficiently stay in GPRs. Note that if we're not certain that the high
11496   // bits are set as required by the final extension, we still may need to do
11497   // some masking to get the proper behavior.
11498 
11499   // This same functionality is important on PPC64 when dealing with
11500   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
11501   // the return values of functions. Because it is so similar, it is handled
11502   // here as well.
11503 
11504   if (N->getValueType(0) != MVT::i32 &&
11505       N->getValueType(0) != MVT::i64)
11506     return SDValue();
11507 
11508   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
11509         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
11510     return SDValue();
11511 
11512   if (N->getOperand(0).getOpcode() != ISD::AND &&
11513       N->getOperand(0).getOpcode() != ISD::OR  &&
11514       N->getOperand(0).getOpcode() != ISD::XOR &&
11515       N->getOperand(0).getOpcode() != ISD::SELECT &&
11516       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
11517     return SDValue();
11518 
11519   SmallVector<SDValue, 4> Inputs;
11520   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
11521   SmallPtrSet<SDNode *, 16> Visited;
11522 
11523   // Visit all inputs, collect all binary operations (and, or, xor and
11524   // select) that are all fed by truncations.
11525   while (!BinOps.empty()) {
11526     SDValue BinOp = BinOps.back();
11527     BinOps.pop_back();
11528 
11529     if (!Visited.insert(BinOp.getNode()).second)
11530       continue;
11531 
11532     PromOps.push_back(BinOp);
11533 
11534     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11535       // The condition of the select is not promoted.
11536       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11537         continue;
11538       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11539         continue;
11540 
11541       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11542           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11543         Inputs.push_back(BinOp.getOperand(i));
11544       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11545                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11546                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11547                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11548                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
11549         BinOps.push_back(BinOp.getOperand(i));
11550       } else {
11551         // We have an input that is not a truncation or another binary
11552         // operation; we'll abort this transformation.
11553         return SDValue();
11554       }
11555     }
11556   }
11557 
11558   // The operands of a select that must be truncated when the select is
11559   // promoted because the operand is actually part of the to-be-promoted set.
11560   DenseMap<SDNode *, EVT> SelectTruncOp[2];
11561 
11562   // Make sure that this is a self-contained cluster of operations (which
11563   // is not quite the same thing as saying that everything has only one
11564   // use).
11565   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11566     if (isa<ConstantSDNode>(Inputs[i]))
11567       continue;
11568 
11569     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11570                               UE = Inputs[i].getNode()->use_end();
11571          UI != UE; ++UI) {
11572       SDNode *User = *UI;
11573       if (User != N && !Visited.count(User))
11574         return SDValue();
11575 
11576       // If we're going to promote the non-output-value operand(s) or SELECT or
11577       // SELECT_CC, record them for truncation.
11578       if (User->getOpcode() == ISD::SELECT) {
11579         if (User->getOperand(0) == Inputs[i])
11580           SelectTruncOp[0].insert(std::make_pair(User,
11581                                     User->getOperand(0).getValueType()));
11582       } else if (User->getOpcode() == ISD::SELECT_CC) {
11583         if (User->getOperand(0) == Inputs[i])
11584           SelectTruncOp[0].insert(std::make_pair(User,
11585                                     User->getOperand(0).getValueType()));
11586         if (User->getOperand(1) == Inputs[i])
11587           SelectTruncOp[1].insert(std::make_pair(User,
11588                                     User->getOperand(1).getValueType()));
11589       }
11590     }
11591   }
11592 
11593   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11594     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11595                               UE = PromOps[i].getNode()->use_end();
11596          UI != UE; ++UI) {
11597       SDNode *User = *UI;
11598       if (User != N && !Visited.count(User))
11599         return SDValue();
11600 
11601       // If we're going to promote the non-output-value operand(s) or SELECT or
11602       // SELECT_CC, record them for truncation.
11603       if (User->getOpcode() == ISD::SELECT) {
11604         if (User->getOperand(0) == PromOps[i])
11605           SelectTruncOp[0].insert(std::make_pair(User,
11606                                     User->getOperand(0).getValueType()));
11607       } else if (User->getOpcode() == ISD::SELECT_CC) {
11608         if (User->getOperand(0) == PromOps[i])
11609           SelectTruncOp[0].insert(std::make_pair(User,
11610                                     User->getOperand(0).getValueType()));
11611         if (User->getOperand(1) == PromOps[i])
11612           SelectTruncOp[1].insert(std::make_pair(User,
11613                                     User->getOperand(1).getValueType()));
11614       }
11615     }
11616   }
11617 
11618   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
11619   bool ReallyNeedsExt = false;
11620   if (N->getOpcode() != ISD::ANY_EXTEND) {
11621     // If all of the inputs are not already sign/zero extended, then
11622     // we'll still need to do that at the end.
11623     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11624       if (isa<ConstantSDNode>(Inputs[i]))
11625         continue;
11626 
11627       unsigned OpBits =
11628         Inputs[i].getOperand(0).getValueSizeInBits();
11629       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
11630 
11631       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
11632            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
11633                                   APInt::getHighBitsSet(OpBits,
11634                                                         OpBits-PromBits))) ||
11635           (N->getOpcode() == ISD::SIGN_EXTEND &&
11636            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
11637              (OpBits-(PromBits-1)))) {
11638         ReallyNeedsExt = true;
11639         break;
11640       }
11641     }
11642   }
11643 
11644   // Replace all inputs, either with the truncation operand, or a
11645   // truncation or extension to the final output type.
11646   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11647     // Constant inputs need to be replaced with the to-be-promoted nodes that
11648     // use them because they might have users outside of the cluster of
11649     // promoted nodes.
11650     if (isa<ConstantSDNode>(Inputs[i]))
11651       continue;
11652 
11653     SDValue InSrc = Inputs[i].getOperand(0);
11654     if (Inputs[i].getValueType() == N->getValueType(0))
11655       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
11656     else if (N->getOpcode() == ISD::SIGN_EXTEND)
11657       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11658         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
11659     else if (N->getOpcode() == ISD::ZERO_EXTEND)
11660       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11661         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
11662     else
11663       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11664         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
11665   }
11666 
11667   std::list<HandleSDNode> PromOpHandles;
11668   for (auto &PromOp : PromOps)
11669     PromOpHandles.emplace_back(PromOp);
11670 
11671   // Replace all operations (these are all the same, but have a different
11672   // (promoted) return type). DAG.getNode will validate that the types of
11673   // a binary operator match, so go through the list in reverse so that
11674   // we've likely promoted both operands first.
11675   while (!PromOpHandles.empty()) {
11676     SDValue PromOp = PromOpHandles.back().getValue();
11677     PromOpHandles.pop_back();
11678 
11679     unsigned C;
11680     switch (PromOp.getOpcode()) {
11681     default:             C = 0; break;
11682     case ISD::SELECT:    C = 1; break;
11683     case ISD::SELECT_CC: C = 2; break;
11684     }
11685 
11686     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11687          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
11688         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11689          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
11690       // The to-be-promoted operands of this node have not yet been
11691       // promoted (this should be rare because we're going through the
11692       // list backward, but if one of the operands has several users in
11693       // this cluster of to-be-promoted nodes, it is possible).
11694       PromOpHandles.emplace_front(PromOp);
11695       continue;
11696     }
11697 
11698     // For SELECT and SELECT_CC nodes, we do a similar check for any
11699     // to-be-promoted comparison inputs.
11700     if (PromOp.getOpcode() == ISD::SELECT ||
11701         PromOp.getOpcode() == ISD::SELECT_CC) {
11702       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
11703            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
11704           (SelectTruncOp[1].count(PromOp.getNode()) &&
11705            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
11706         PromOpHandles.emplace_front(PromOp);
11707         continue;
11708       }
11709     }
11710 
11711     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11712                                 PromOp.getNode()->op_end());
11713 
11714     // If this node has constant inputs, then they'll need to be promoted here.
11715     for (unsigned i = 0; i < 2; ++i) {
11716       if (!isa<ConstantSDNode>(Ops[C+i]))
11717         continue;
11718       if (Ops[C+i].getValueType() == N->getValueType(0))
11719         continue;
11720 
11721       if (N->getOpcode() == ISD::SIGN_EXTEND)
11722         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11723       else if (N->getOpcode() == ISD::ZERO_EXTEND)
11724         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11725       else
11726         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11727     }
11728 
11729     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
11730     // truncate them again to the original value type.
11731     if (PromOp.getOpcode() == ISD::SELECT ||
11732         PromOp.getOpcode() == ISD::SELECT_CC) {
11733       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
11734       if (SI0 != SelectTruncOp[0].end())
11735         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
11736       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
11737       if (SI1 != SelectTruncOp[1].end())
11738         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
11739     }
11740 
11741     DAG.ReplaceAllUsesOfValueWith(PromOp,
11742       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
11743   }
11744 
11745   // Now we're left with the initial extension itself.
11746   if (!ReallyNeedsExt)
11747     return N->getOperand(0);
11748 
11749   // To zero extend, just mask off everything except for the first bit (in the
11750   // i1 case).
11751   if (N->getOpcode() == ISD::ZERO_EXTEND)
11752     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
11753                        DAG.getConstant(APInt::getLowBitsSet(
11754                                          N->getValueSizeInBits(0), PromBits),
11755                                        dl, N->getValueType(0)));
11756 
11757   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
11758          "Invalid extension type");
11759   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
11760   SDValue ShiftCst =
11761       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
11762   return DAG.getNode(
11763       ISD::SRA, dl, N->getValueType(0),
11764       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
11765       ShiftCst);
11766 }
11767 
11768 // Is this an extending load from an f32 to an f64?
11769 static bool isFPExtLoad(SDValue Op) {
11770   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
11771     return LD->getExtensionType() == ISD::EXTLOAD &&
11772       Op.getValueType() == MVT::f64;
11773   return false;
11774 }
11775 
11776 /// Reduces the number of fp-to-int conversion when building a vector.
11777 ///
11778 /// If this vector is built out of floating to integer conversions,
11779 /// transform it to a vector built out of floating point values followed by a
11780 /// single floating to integer conversion of the vector.
11781 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
11782 /// becomes (fptosi (build_vector ($A, $B, ...)))
11783 SDValue PPCTargetLowering::
11784 combineElementTruncationToVectorTruncation(SDNode *N,
11785                                            DAGCombinerInfo &DCI) const {
11786   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11787          "Should be called with a BUILD_VECTOR node");
11788 
11789   SelectionDAG &DAG = DCI.DAG;
11790   SDLoc dl(N);
11791 
11792   SDValue FirstInput = N->getOperand(0);
11793   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
11794          "The input operand must be an fp-to-int conversion.");
11795 
11796   // This combine happens after legalization so the fp_to_[su]i nodes are
11797   // already converted to PPCSISD nodes.
11798   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
11799   if (FirstConversion == PPCISD::FCTIDZ ||
11800       FirstConversion == PPCISD::FCTIDUZ ||
11801       FirstConversion == PPCISD::FCTIWZ ||
11802       FirstConversion == PPCISD::FCTIWUZ) {
11803     bool IsSplat = true;
11804     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
11805       FirstConversion == PPCISD::FCTIWUZ;
11806     EVT SrcVT = FirstInput.getOperand(0).getValueType();
11807     SmallVector<SDValue, 4> Ops;
11808     EVT TargetVT = N->getValueType(0);
11809     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11810       SDValue NextOp = N->getOperand(i);
11811       if (NextOp.getOpcode() != PPCISD::MFVSR)
11812         return SDValue();
11813       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
11814       if (NextConversion != FirstConversion)
11815         return SDValue();
11816       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
11817       // This is not valid if the input was originally double precision. It is
11818       // also not profitable to do unless this is an extending load in which
11819       // case doing this combine will allow us to combine consecutive loads.
11820       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
11821         return SDValue();
11822       if (N->getOperand(i) != FirstInput)
11823         IsSplat = false;
11824     }
11825 
11826     // If this is a splat, we leave it as-is since there will be only a single
11827     // fp-to-int conversion followed by a splat of the integer. This is better
11828     // for 32-bit and smaller ints and neutral for 64-bit ints.
11829     if (IsSplat)
11830       return SDValue();
11831 
11832     // Now that we know we have the right type of node, get its operands
11833     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11834       SDValue In = N->getOperand(i).getOperand(0);
11835       if (Is32Bit) {
11836         // For 32-bit values, we need to add an FP_ROUND node (if we made it
11837         // here, we know that all inputs are extending loads so this is safe).
11838         if (In.isUndef())
11839           Ops.push_back(DAG.getUNDEF(SrcVT));
11840         else {
11841           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
11842                                       MVT::f32, In.getOperand(0),
11843                                       DAG.getIntPtrConstant(1, dl));
11844           Ops.push_back(Trunc);
11845         }
11846       } else
11847         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
11848     }
11849 
11850     unsigned Opcode;
11851     if (FirstConversion == PPCISD::FCTIDZ ||
11852         FirstConversion == PPCISD::FCTIWZ)
11853       Opcode = ISD::FP_TO_SINT;
11854     else
11855       Opcode = ISD::FP_TO_UINT;
11856 
11857     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
11858     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
11859     return DAG.getNode(Opcode, dl, TargetVT, BV);
11860   }
11861   return SDValue();
11862 }
11863 
11864 /// Reduce the number of loads when building a vector.
11865 ///
11866 /// Building a vector out of multiple loads can be converted to a load
11867 /// of the vector type if the loads are consecutive. If the loads are
11868 /// consecutive but in descending order, a shuffle is added at the end
11869 /// to reorder the vector.
11870 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
11871   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11872          "Should be called with a BUILD_VECTOR node");
11873 
11874   SDLoc dl(N);
11875   bool InputsAreConsecutiveLoads = true;
11876   bool InputsAreReverseConsecutive = true;
11877   unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8;
11878   SDValue FirstInput = N->getOperand(0);
11879   bool IsRoundOfExtLoad = false;
11880 
11881   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
11882       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
11883     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
11884     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
11885   }
11886   // Not a build vector of (possibly fp_rounded) loads.
11887   if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD)
11888     return SDValue();
11889 
11890   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
11891     // If any inputs are fp_round(extload), they all must be.
11892     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
11893       return SDValue();
11894 
11895     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
11896       N->getOperand(i);
11897     if (NextInput.getOpcode() != ISD::LOAD)
11898       return SDValue();
11899 
11900     SDValue PreviousInput =
11901       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
11902     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
11903     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
11904 
11905     // If any inputs are fp_round(extload), they all must be.
11906     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
11907       return SDValue();
11908 
11909     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
11910       InputsAreConsecutiveLoads = false;
11911     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
11912       InputsAreReverseConsecutive = false;
11913 
11914     // Exit early if the loads are neither consecutive nor reverse consecutive.
11915     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
11916       return SDValue();
11917   }
11918 
11919   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
11920          "The loads cannot be both consecutive and reverse consecutive.");
11921 
11922   SDValue FirstLoadOp =
11923     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
11924   SDValue LastLoadOp =
11925     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
11926                        N->getOperand(N->getNumOperands()-1);
11927 
11928   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
11929   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
11930   if (InputsAreConsecutiveLoads) {
11931     assert(LD1 && "Input needs to be a LoadSDNode.");
11932     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
11933                        LD1->getBasePtr(), LD1->getPointerInfo(),
11934                        LD1->getAlignment());
11935   }
11936   if (InputsAreReverseConsecutive) {
11937     assert(LDL && "Input needs to be a LoadSDNode.");
11938     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
11939                                LDL->getBasePtr(), LDL->getPointerInfo(),
11940                                LDL->getAlignment());
11941     SmallVector<int, 16> Ops;
11942     for (int i = N->getNumOperands() - 1; i >= 0; i--)
11943       Ops.push_back(i);
11944 
11945     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
11946                                 DAG.getUNDEF(N->getValueType(0)), Ops);
11947   }
11948   return SDValue();
11949 }
11950 
11951 // This function adds the required vector_shuffle needed to get
11952 // the elements of the vector extract in the correct position
11953 // as specified by the CorrectElems encoding.
11954 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
11955                                       SDValue Input, uint64_t Elems,
11956                                       uint64_t CorrectElems) {
11957   SDLoc dl(N);
11958 
11959   unsigned NumElems = Input.getValueType().getVectorNumElements();
11960   SmallVector<int, 16> ShuffleMask(NumElems, -1);
11961 
11962   // Knowing the element indices being extracted from the original
11963   // vector and the order in which they're being inserted, just put
11964   // them at element indices required for the instruction.
11965   for (unsigned i = 0; i < N->getNumOperands(); i++) {
11966     if (DAG.getDataLayout().isLittleEndian())
11967       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
11968     else
11969       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
11970     CorrectElems = CorrectElems >> 8;
11971     Elems = Elems >> 8;
11972   }
11973 
11974   SDValue Shuffle =
11975       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
11976                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
11977 
11978   EVT Ty = N->getValueType(0);
11979   SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
11980   return BV;
11981 }
11982 
11983 // Look for build vector patterns where input operands come from sign
11984 // extended vector_extract elements of specific indices. If the correct indices
11985 // aren't used, add a vector shuffle to fix up the indices and create a new
11986 // PPCISD:SExtVElems node which selects the vector sign extend instructions
11987 // during instruction selection.
11988 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
11989   // This array encodes the indices that the vector sign extend instructions
11990   // extract from when extending from one type to another for both BE and LE.
11991   // The right nibble of each byte corresponds to the LE incides.
11992   // and the left nibble of each byte corresponds to the BE incides.
11993   // For example: 0x3074B8FC  byte->word
11994   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
11995   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
11996   // For example: 0x000070F8  byte->double word
11997   // For LE: the allowed indices are: 0x0,0x8
11998   // For BE: the allowed indices are: 0x7,0xF
11999   uint64_t TargetElems[] = {
12000       0x3074B8FC, // b->w
12001       0x000070F8, // b->d
12002       0x10325476, // h->w
12003       0x00003074, // h->d
12004       0x00001032, // w->d
12005   };
12006 
12007   uint64_t Elems = 0;
12008   int Index;
12009   SDValue Input;
12010 
12011   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
12012     if (!Op)
12013       return false;
12014     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
12015         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
12016       return false;
12017 
12018     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
12019     // of the right width.
12020     SDValue Extract = Op.getOperand(0);
12021     if (Extract.getOpcode() == ISD::ANY_EXTEND)
12022       Extract = Extract.getOperand(0);
12023     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12024       return false;
12025 
12026     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
12027     if (!ExtOp)
12028       return false;
12029 
12030     Index = ExtOp->getZExtValue();
12031     if (Input && Input != Extract.getOperand(0))
12032       return false;
12033 
12034     if (!Input)
12035       Input = Extract.getOperand(0);
12036 
12037     Elems = Elems << 8;
12038     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
12039     Elems |= Index;
12040 
12041     return true;
12042   };
12043 
12044   // If the build vector operands aren't sign extended vector extracts,
12045   // of the same input vector, then return.
12046   for (unsigned i = 0; i < N->getNumOperands(); i++) {
12047     if (!isSExtOfVecExtract(N->getOperand(i))) {
12048       return SDValue();
12049     }
12050   }
12051 
12052   // If the vector extract indicies are not correct, add the appropriate
12053   // vector_shuffle.
12054   int TgtElemArrayIdx;
12055   int InputSize = Input.getValueType().getScalarSizeInBits();
12056   int OutputSize = N->getValueType(0).getScalarSizeInBits();
12057   if (InputSize + OutputSize == 40)
12058     TgtElemArrayIdx = 0;
12059   else if (InputSize + OutputSize == 72)
12060     TgtElemArrayIdx = 1;
12061   else if (InputSize + OutputSize == 48)
12062     TgtElemArrayIdx = 2;
12063   else if (InputSize + OutputSize == 80)
12064     TgtElemArrayIdx = 3;
12065   else if (InputSize + OutputSize == 96)
12066     TgtElemArrayIdx = 4;
12067   else
12068     return SDValue();
12069 
12070   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
12071   CorrectElems = DAG.getDataLayout().isLittleEndian()
12072                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
12073                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
12074   if (Elems != CorrectElems) {
12075     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
12076   }
12077 
12078   // Regular lowering will catch cases where a shuffle is not needed.
12079   return SDValue();
12080 }
12081 
12082 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
12083                                                  DAGCombinerInfo &DCI) const {
12084   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12085          "Should be called with a BUILD_VECTOR node");
12086 
12087   SelectionDAG &DAG = DCI.DAG;
12088   SDLoc dl(N);
12089 
12090   if (!Subtarget.hasVSX())
12091     return SDValue();
12092 
12093   // The target independent DAG combiner will leave a build_vector of
12094   // float-to-int conversions intact. We can generate MUCH better code for
12095   // a float-to-int conversion of a vector of floats.
12096   SDValue FirstInput = N->getOperand(0);
12097   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
12098     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
12099     if (Reduced)
12100       return Reduced;
12101   }
12102 
12103   // If we're building a vector out of consecutive loads, just load that
12104   // vector type.
12105   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
12106   if (Reduced)
12107     return Reduced;
12108 
12109   // If we're building a vector out of extended elements from another vector
12110   // we have P9 vector integer extend instructions. The code assumes legal
12111   // input types (i.e. it can't handle things like v4i16) so do not run before
12112   // legalization.
12113   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
12114     Reduced = combineBVOfVecSExt(N, DAG);
12115     if (Reduced)
12116       return Reduced;
12117   }
12118 
12119 
12120   if (N->getValueType(0) != MVT::v2f64)
12121     return SDValue();
12122 
12123   // Looking for:
12124   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
12125   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
12126       FirstInput.getOpcode() != ISD::UINT_TO_FP)
12127     return SDValue();
12128   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
12129       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
12130     return SDValue();
12131   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
12132     return SDValue();
12133 
12134   SDValue Ext1 = FirstInput.getOperand(0);
12135   SDValue Ext2 = N->getOperand(1).getOperand(0);
12136   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
12137      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12138     return SDValue();
12139 
12140   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
12141   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
12142   if (!Ext1Op || !Ext2Op)
12143     return SDValue();
12144   if (Ext1.getValueType() != MVT::i32 ||
12145       Ext2.getValueType() != MVT::i32)
12146   if (Ext1.getOperand(0) != Ext2.getOperand(0))
12147     return SDValue();
12148 
12149   int FirstElem = Ext1Op->getZExtValue();
12150   int SecondElem = Ext2Op->getZExtValue();
12151   int SubvecIdx;
12152   if (FirstElem == 0 && SecondElem == 1)
12153     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
12154   else if (FirstElem == 2 && SecondElem == 3)
12155     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
12156   else
12157     return SDValue();
12158 
12159   SDValue SrcVec = Ext1.getOperand(0);
12160   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
12161     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
12162   return DAG.getNode(NodeType, dl, MVT::v2f64,
12163                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
12164 }
12165 
12166 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
12167                                               DAGCombinerInfo &DCI) const {
12168   assert((N->getOpcode() == ISD::SINT_TO_FP ||
12169           N->getOpcode() == ISD::UINT_TO_FP) &&
12170          "Need an int -> FP conversion node here");
12171 
12172   if (useSoftFloat() || !Subtarget.has64BitSupport())
12173     return SDValue();
12174 
12175   SelectionDAG &DAG = DCI.DAG;
12176   SDLoc dl(N);
12177   SDValue Op(N, 0);
12178 
12179   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
12180   // from the hardware.
12181   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
12182     return SDValue();
12183   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
12184       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
12185     return SDValue();
12186 
12187   SDValue FirstOperand(Op.getOperand(0));
12188   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
12189     (FirstOperand.getValueType() == MVT::i8 ||
12190      FirstOperand.getValueType() == MVT::i16);
12191   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
12192     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
12193     bool DstDouble = Op.getValueType() == MVT::f64;
12194     unsigned ConvOp = Signed ?
12195       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
12196       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
12197     SDValue WidthConst =
12198       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
12199                             dl, false);
12200     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
12201     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
12202     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
12203                                          DAG.getVTList(MVT::f64, MVT::Other),
12204                                          Ops, MVT::i8, LDN->getMemOperand());
12205 
12206     // For signed conversion, we need to sign-extend the value in the VSR
12207     if (Signed) {
12208       SDValue ExtOps[] = { Ld, WidthConst };
12209       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
12210       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
12211     } else
12212       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
12213   }
12214 
12215 
12216   // For i32 intermediate values, unfortunately, the conversion functions
12217   // leave the upper 32 bits of the value are undefined. Within the set of
12218   // scalar instructions, we have no method for zero- or sign-extending the
12219   // value. Thus, we cannot handle i32 intermediate values here.
12220   if (Op.getOperand(0).getValueType() == MVT::i32)
12221     return SDValue();
12222 
12223   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
12224          "UINT_TO_FP is supported only with FPCVT");
12225 
12226   // If we have FCFIDS, then use it when converting to single-precision.
12227   // Otherwise, convert to double-precision and then round.
12228   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12229                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
12230                                                             : PPCISD::FCFIDS)
12231                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
12232                                                             : PPCISD::FCFID);
12233   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12234                   ? MVT::f32
12235                   : MVT::f64;
12236 
12237   // If we're converting from a float, to an int, and back to a float again,
12238   // then we don't need the store/load pair at all.
12239   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
12240        Subtarget.hasFPCVT()) ||
12241       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
12242     SDValue Src = Op.getOperand(0).getOperand(0);
12243     if (Src.getValueType() == MVT::f32) {
12244       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
12245       DCI.AddToWorklist(Src.getNode());
12246     } else if (Src.getValueType() != MVT::f64) {
12247       // Make sure that we don't pick up a ppc_fp128 source value.
12248       return SDValue();
12249     }
12250 
12251     unsigned FCTOp =
12252       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
12253                                                         PPCISD::FCTIDUZ;
12254 
12255     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
12256     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
12257 
12258     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
12259       FP = DAG.getNode(ISD::FP_ROUND, dl,
12260                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
12261       DCI.AddToWorklist(FP.getNode());
12262     }
12263 
12264     return FP;
12265   }
12266 
12267   return SDValue();
12268 }
12269 
12270 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
12271 // builtins) into loads with swaps.
12272 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
12273                                               DAGCombinerInfo &DCI) const {
12274   SelectionDAG &DAG = DCI.DAG;
12275   SDLoc dl(N);
12276   SDValue Chain;
12277   SDValue Base;
12278   MachineMemOperand *MMO;
12279 
12280   switch (N->getOpcode()) {
12281   default:
12282     llvm_unreachable("Unexpected opcode for little endian VSX load");
12283   case ISD::LOAD: {
12284     LoadSDNode *LD = cast<LoadSDNode>(N);
12285     Chain = LD->getChain();
12286     Base = LD->getBasePtr();
12287     MMO = LD->getMemOperand();
12288     // If the MMO suggests this isn't a load of a full vector, leave
12289     // things alone.  For a built-in, we have to make the change for
12290     // correctness, so if there is a size problem that will be a bug.
12291     if (MMO->getSize() < 16)
12292       return SDValue();
12293     break;
12294   }
12295   case ISD::INTRINSIC_W_CHAIN: {
12296     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12297     Chain = Intrin->getChain();
12298     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
12299     // us what we want. Get operand 2 instead.
12300     Base = Intrin->getOperand(2);
12301     MMO = Intrin->getMemOperand();
12302     break;
12303   }
12304   }
12305 
12306   MVT VecTy = N->getValueType(0).getSimpleVT();
12307 
12308   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
12309   // aligned and the type is a vector with elements up to 4 bytes
12310   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12311       && VecTy.getScalarSizeInBits() <= 32 ) {
12312     return SDValue();
12313   }
12314 
12315   SDValue LoadOps[] = { Chain, Base };
12316   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
12317                                          DAG.getVTList(MVT::v2f64, MVT::Other),
12318                                          LoadOps, MVT::v2f64, MMO);
12319 
12320   DCI.AddToWorklist(Load.getNode());
12321   Chain = Load.getValue(1);
12322   SDValue Swap = DAG.getNode(
12323       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
12324   DCI.AddToWorklist(Swap.getNode());
12325 
12326   // Add a bitcast if the resulting load type doesn't match v2f64.
12327   if (VecTy != MVT::v2f64) {
12328     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
12329     DCI.AddToWorklist(N.getNode());
12330     // Package {bitcast value, swap's chain} to match Load's shape.
12331     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
12332                        N, Swap.getValue(1));
12333   }
12334 
12335   return Swap;
12336 }
12337 
12338 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
12339 // builtins) into stores with swaps.
12340 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
12341                                                DAGCombinerInfo &DCI) const {
12342   SelectionDAG &DAG = DCI.DAG;
12343   SDLoc dl(N);
12344   SDValue Chain;
12345   SDValue Base;
12346   unsigned SrcOpnd;
12347   MachineMemOperand *MMO;
12348 
12349   switch (N->getOpcode()) {
12350   default:
12351     llvm_unreachable("Unexpected opcode for little endian VSX store");
12352   case ISD::STORE: {
12353     StoreSDNode *ST = cast<StoreSDNode>(N);
12354     Chain = ST->getChain();
12355     Base = ST->getBasePtr();
12356     MMO = ST->getMemOperand();
12357     SrcOpnd = 1;
12358     // If the MMO suggests this isn't a store of a full vector, leave
12359     // things alone.  For a built-in, we have to make the change for
12360     // correctness, so if there is a size problem that will be a bug.
12361     if (MMO->getSize() < 16)
12362       return SDValue();
12363     break;
12364   }
12365   case ISD::INTRINSIC_VOID: {
12366     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12367     Chain = Intrin->getChain();
12368     // Intrin->getBasePtr() oddly does not get what we want.
12369     Base = Intrin->getOperand(3);
12370     MMO = Intrin->getMemOperand();
12371     SrcOpnd = 2;
12372     break;
12373   }
12374   }
12375 
12376   SDValue Src = N->getOperand(SrcOpnd);
12377   MVT VecTy = Src.getValueType().getSimpleVT();
12378 
12379   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
12380   // aligned and the type is a vector with elements up to 4 bytes
12381   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12382       && VecTy.getScalarSizeInBits() <= 32 ) {
12383     return SDValue();
12384   }
12385 
12386   // All stores are done as v2f64 and possible bit cast.
12387   if (VecTy != MVT::v2f64) {
12388     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
12389     DCI.AddToWorklist(Src.getNode());
12390   }
12391 
12392   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
12393                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
12394   DCI.AddToWorklist(Swap.getNode());
12395   Chain = Swap.getValue(1);
12396   SDValue StoreOps[] = { Chain, Swap, Base };
12397   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
12398                                           DAG.getVTList(MVT::Other),
12399                                           StoreOps, VecTy, MMO);
12400   DCI.AddToWorklist(Store.getNode());
12401   return Store;
12402 }
12403 
12404 // Handle DAG combine for STORE (FP_TO_INT F).
12405 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
12406                                                DAGCombinerInfo &DCI) const {
12407 
12408   SelectionDAG &DAG = DCI.DAG;
12409   SDLoc dl(N);
12410   unsigned Opcode = N->getOperand(1).getOpcode();
12411 
12412   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
12413          && "Not a FP_TO_INT Instruction!");
12414 
12415   SDValue Val = N->getOperand(1).getOperand(0);
12416   EVT Op1VT = N->getOperand(1).getValueType();
12417   EVT ResVT = Val.getValueType();
12418 
12419   // Floating point types smaller than 32 bits are not legal on Power.
12420   if (ResVT.getScalarSizeInBits() < 32)
12421     return SDValue();
12422 
12423   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
12424   bool ValidTypeForStoreFltAsInt =
12425         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
12426          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
12427 
12428   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
12429       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
12430     return SDValue();
12431 
12432   // Extend f32 values to f64
12433   if (ResVT.getScalarSizeInBits() == 32) {
12434     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
12435     DCI.AddToWorklist(Val.getNode());
12436   }
12437 
12438   // Set signed or unsigned conversion opcode.
12439   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
12440                           PPCISD::FP_TO_SINT_IN_VSR :
12441                           PPCISD::FP_TO_UINT_IN_VSR;
12442 
12443   Val = DAG.getNode(ConvOpcode,
12444                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
12445   DCI.AddToWorklist(Val.getNode());
12446 
12447   // Set number of bytes being converted.
12448   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
12449   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
12450                     DAG.getIntPtrConstant(ByteSize, dl, false),
12451                     DAG.getValueType(Op1VT) };
12452 
12453   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
12454           DAG.getVTList(MVT::Other), Ops,
12455           cast<StoreSDNode>(N)->getMemoryVT(),
12456           cast<StoreSDNode>(N)->getMemOperand());
12457 
12458   DCI.AddToWorklist(Val.getNode());
12459   return Val;
12460 }
12461 
12462 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
12463                                              DAGCombinerInfo &DCI) const {
12464   SelectionDAG &DAG = DCI.DAG;
12465   SDLoc dl(N);
12466   switch (N->getOpcode()) {
12467   default: break;
12468   case ISD::SHL:
12469     return combineSHL(N, DCI);
12470   case ISD::SRA:
12471     return combineSRA(N, DCI);
12472   case ISD::SRL:
12473     return combineSRL(N, DCI);
12474   case PPCISD::SHL:
12475     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
12476         return N->getOperand(0);
12477     break;
12478   case PPCISD::SRL:
12479     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
12480         return N->getOperand(0);
12481     break;
12482   case PPCISD::SRA:
12483     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
12484       if (C->isNullValue() ||   //  0 >>s V -> 0.
12485           C->isAllOnesValue())    // -1 >>s V -> -1.
12486         return N->getOperand(0);
12487     }
12488     break;
12489   case ISD::SIGN_EXTEND:
12490   case ISD::ZERO_EXTEND:
12491   case ISD::ANY_EXTEND:
12492     return DAGCombineExtBoolTrunc(N, DCI);
12493   case ISD::TRUNCATE:
12494   case ISD::SETCC:
12495   case ISD::SELECT_CC:
12496     return DAGCombineTruncBoolExt(N, DCI);
12497   case ISD::SINT_TO_FP:
12498   case ISD::UINT_TO_FP:
12499     return combineFPToIntToFP(N, DCI);
12500   case ISD::STORE: {
12501 
12502     EVT Op1VT = N->getOperand(1).getValueType();
12503     unsigned Opcode = N->getOperand(1).getOpcode();
12504 
12505     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
12506       SDValue Val= combineStoreFPToInt(N, DCI);
12507       if (Val)
12508         return Val;
12509     }
12510 
12511     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
12512     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
12513         N->getOperand(1).getNode()->hasOneUse() &&
12514         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
12515          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
12516 
12517       // STBRX can only handle simple types.
12518       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
12519       if (mVT.isExtended())
12520         break;
12521 
12522       SDValue BSwapOp = N->getOperand(1).getOperand(0);
12523       // Do an any-extend to 32-bits if this is a half-word input.
12524       if (BSwapOp.getValueType() == MVT::i16)
12525         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
12526 
12527       // If the type of BSWAP operand is wider than stored memory width
12528       // it need to be shifted to the right side before STBRX.
12529       if (Op1VT.bitsGT(mVT)) {
12530         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
12531         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
12532                               DAG.getConstant(Shift, dl, MVT::i32));
12533         // Need to truncate if this is a bswap of i64 stored as i32/i16.
12534         if (Op1VT == MVT::i64)
12535           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
12536       }
12537 
12538       SDValue Ops[] = {
12539         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
12540       };
12541       return
12542         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
12543                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
12544                                 cast<StoreSDNode>(N)->getMemOperand());
12545     }
12546 
12547     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
12548     // So it can increase the chance of CSE constant construction.
12549     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
12550         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
12551       // Need to sign-extended to 64-bits to handle negative values.
12552       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
12553       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
12554                                     MemVT.getSizeInBits());
12555       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
12556 
12557       // DAG.getTruncStore() can't be used here because it doesn't accept
12558       // the general (base + offset) addressing mode.
12559       // So we use UpdateNodeOperands and setTruncatingStore instead.
12560       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
12561                              N->getOperand(3));
12562       cast<StoreSDNode>(N)->setTruncatingStore(true);
12563       return SDValue(N, 0);
12564     }
12565 
12566     // For little endian, VSX stores require generating xxswapd/lxvd2x.
12567     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12568     if (Op1VT.isSimple()) {
12569       MVT StoreVT = Op1VT.getSimpleVT();
12570       if (Subtarget.needsSwapsForVSXMemOps() &&
12571           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
12572            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
12573         return expandVSXStoreForLE(N, DCI);
12574     }
12575     break;
12576   }
12577   case ISD::LOAD: {
12578     LoadSDNode *LD = cast<LoadSDNode>(N);
12579     EVT VT = LD->getValueType(0);
12580 
12581     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12582     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12583     if (VT.isSimple()) {
12584       MVT LoadVT = VT.getSimpleVT();
12585       if (Subtarget.needsSwapsForVSXMemOps() &&
12586           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
12587            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
12588         return expandVSXLoadForLE(N, DCI);
12589     }
12590 
12591     // We sometimes end up with a 64-bit integer load, from which we extract
12592     // two single-precision floating-point numbers. This happens with
12593     // std::complex<float>, and other similar structures, because of the way we
12594     // canonicalize structure copies. However, if we lack direct moves,
12595     // then the final bitcasts from the extracted integer values to the
12596     // floating-point numbers turn into store/load pairs. Even with direct moves,
12597     // just loading the two floating-point numbers is likely better.
12598     auto ReplaceTwoFloatLoad = [&]() {
12599       if (VT != MVT::i64)
12600         return false;
12601 
12602       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
12603           LD->isVolatile())
12604         return false;
12605 
12606       //  We're looking for a sequence like this:
12607       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
12608       //      t16: i64 = srl t13, Constant:i32<32>
12609       //    t17: i32 = truncate t16
12610       //  t18: f32 = bitcast t17
12611       //    t19: i32 = truncate t13
12612       //  t20: f32 = bitcast t19
12613 
12614       if (!LD->hasNUsesOfValue(2, 0))
12615         return false;
12616 
12617       auto UI = LD->use_begin();
12618       while (UI.getUse().getResNo() != 0) ++UI;
12619       SDNode *Trunc = *UI++;
12620       while (UI.getUse().getResNo() != 0) ++UI;
12621       SDNode *RightShift = *UI;
12622       if (Trunc->getOpcode() != ISD::TRUNCATE)
12623         std::swap(Trunc, RightShift);
12624 
12625       if (Trunc->getOpcode() != ISD::TRUNCATE ||
12626           Trunc->getValueType(0) != MVT::i32 ||
12627           !Trunc->hasOneUse())
12628         return false;
12629       if (RightShift->getOpcode() != ISD::SRL ||
12630           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
12631           RightShift->getConstantOperandVal(1) != 32 ||
12632           !RightShift->hasOneUse())
12633         return false;
12634 
12635       SDNode *Trunc2 = *RightShift->use_begin();
12636       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
12637           Trunc2->getValueType(0) != MVT::i32 ||
12638           !Trunc2->hasOneUse())
12639         return false;
12640 
12641       SDNode *Bitcast = *Trunc->use_begin();
12642       SDNode *Bitcast2 = *Trunc2->use_begin();
12643 
12644       if (Bitcast->getOpcode() != ISD::BITCAST ||
12645           Bitcast->getValueType(0) != MVT::f32)
12646         return false;
12647       if (Bitcast2->getOpcode() != ISD::BITCAST ||
12648           Bitcast2->getValueType(0) != MVT::f32)
12649         return false;
12650 
12651       if (Subtarget.isLittleEndian())
12652         std::swap(Bitcast, Bitcast2);
12653 
12654       // Bitcast has the second float (in memory-layout order) and Bitcast2
12655       // has the first one.
12656 
12657       SDValue BasePtr = LD->getBasePtr();
12658       if (LD->isIndexed()) {
12659         assert(LD->getAddressingMode() == ISD::PRE_INC &&
12660                "Non-pre-inc AM on PPC?");
12661         BasePtr =
12662           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
12663                       LD->getOffset());
12664       }
12665 
12666       auto MMOFlags =
12667           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
12668       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
12669                                       LD->getPointerInfo(), LD->getAlignment(),
12670                                       MMOFlags, LD->getAAInfo());
12671       SDValue AddPtr =
12672         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
12673                     BasePtr, DAG.getIntPtrConstant(4, dl));
12674       SDValue FloatLoad2 = DAG.getLoad(
12675           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
12676           LD->getPointerInfo().getWithOffset(4),
12677           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
12678 
12679       if (LD->isIndexed()) {
12680         // Note that DAGCombine should re-form any pre-increment load(s) from
12681         // what is produced here if that makes sense.
12682         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
12683       }
12684 
12685       DCI.CombineTo(Bitcast2, FloatLoad);
12686       DCI.CombineTo(Bitcast, FloatLoad2);
12687 
12688       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
12689                                     SDValue(FloatLoad2.getNode(), 1));
12690       return true;
12691     };
12692 
12693     if (ReplaceTwoFloatLoad())
12694       return SDValue(N, 0);
12695 
12696     EVT MemVT = LD->getMemoryVT();
12697     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
12698     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
12699     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
12700     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
12701     if (LD->isUnindexed() && VT.isVector() &&
12702         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
12703           // P8 and later hardware should just use LOAD.
12704           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
12705                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
12706          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
12707           LD->getAlignment() >= ScalarABIAlignment)) &&
12708         LD->getAlignment() < ABIAlignment) {
12709       // This is a type-legal unaligned Altivec or QPX load.
12710       SDValue Chain = LD->getChain();
12711       SDValue Ptr = LD->getBasePtr();
12712       bool isLittleEndian = Subtarget.isLittleEndian();
12713 
12714       // This implements the loading of unaligned vectors as described in
12715       // the venerable Apple Velocity Engine overview. Specifically:
12716       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
12717       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
12718       //
12719       // The general idea is to expand a sequence of one or more unaligned
12720       // loads into an alignment-based permutation-control instruction (lvsl
12721       // or lvsr), a series of regular vector loads (which always truncate
12722       // their input address to an aligned address), and a series of
12723       // permutations.  The results of these permutations are the requested
12724       // loaded values.  The trick is that the last "extra" load is not taken
12725       // from the address you might suspect (sizeof(vector) bytes after the
12726       // last requested load), but rather sizeof(vector) - 1 bytes after the
12727       // last requested vector. The point of this is to avoid a page fault if
12728       // the base address happened to be aligned. This works because if the
12729       // base address is aligned, then adding less than a full vector length
12730       // will cause the last vector in the sequence to be (re)loaded.
12731       // Otherwise, the next vector will be fetched as you might suspect was
12732       // necessary.
12733 
12734       // We might be able to reuse the permutation generation from
12735       // a different base address offset from this one by an aligned amount.
12736       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
12737       // optimization later.
12738       Intrinsic::ID Intr, IntrLD, IntrPerm;
12739       MVT PermCntlTy, PermTy, LDTy;
12740       if (Subtarget.hasAltivec()) {
12741         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
12742                                  Intrinsic::ppc_altivec_lvsl;
12743         IntrLD = Intrinsic::ppc_altivec_lvx;
12744         IntrPerm = Intrinsic::ppc_altivec_vperm;
12745         PermCntlTy = MVT::v16i8;
12746         PermTy = MVT::v4i32;
12747         LDTy = MVT::v4i32;
12748       } else {
12749         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
12750                                        Intrinsic::ppc_qpx_qvlpcls;
12751         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
12752                                        Intrinsic::ppc_qpx_qvlfs;
12753         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
12754         PermCntlTy = MVT::v4f64;
12755         PermTy = MVT::v4f64;
12756         LDTy = MemVT.getSimpleVT();
12757       }
12758 
12759       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
12760 
12761       // Create the new MMO for the new base load. It is like the original MMO,
12762       // but represents an area in memory almost twice the vector size centered
12763       // on the original address. If the address is unaligned, we might start
12764       // reading up to (sizeof(vector)-1) bytes below the address of the
12765       // original unaligned load.
12766       MachineFunction &MF = DAG.getMachineFunction();
12767       MachineMemOperand *BaseMMO =
12768         MF.getMachineMemOperand(LD->getMemOperand(),
12769                                 -(long)MemVT.getStoreSize()+1,
12770                                 2*MemVT.getStoreSize()-1);
12771 
12772       // Create the new base load.
12773       SDValue LDXIntID =
12774           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
12775       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
12776       SDValue BaseLoad =
12777         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12778                                 DAG.getVTList(PermTy, MVT::Other),
12779                                 BaseLoadOps, LDTy, BaseMMO);
12780 
12781       // Note that the value of IncOffset (which is provided to the next
12782       // load's pointer info offset value, and thus used to calculate the
12783       // alignment), and the value of IncValue (which is actually used to
12784       // increment the pointer value) are different! This is because we
12785       // require the next load to appear to be aligned, even though it
12786       // is actually offset from the base pointer by a lesser amount.
12787       int IncOffset = VT.getSizeInBits() / 8;
12788       int IncValue = IncOffset;
12789 
12790       // Walk (both up and down) the chain looking for another load at the real
12791       // (aligned) offset (the alignment of the other load does not matter in
12792       // this case). If found, then do not use the offset reduction trick, as
12793       // that will prevent the loads from being later combined (as they would
12794       // otherwise be duplicates).
12795       if (!findConsecutiveLoad(LD, DAG))
12796         --IncValue;
12797 
12798       SDValue Increment =
12799           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
12800       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
12801 
12802       MachineMemOperand *ExtraMMO =
12803         MF.getMachineMemOperand(LD->getMemOperand(),
12804                                 1, 2*MemVT.getStoreSize()-1);
12805       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
12806       SDValue ExtraLoad =
12807         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12808                                 DAG.getVTList(PermTy, MVT::Other),
12809                                 ExtraLoadOps, LDTy, ExtraMMO);
12810 
12811       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
12812         BaseLoad.getValue(1), ExtraLoad.getValue(1));
12813 
12814       // Because vperm has a big-endian bias, we must reverse the order
12815       // of the input vectors and complement the permute control vector
12816       // when generating little endian code.  We have already handled the
12817       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
12818       // and ExtraLoad here.
12819       SDValue Perm;
12820       if (isLittleEndian)
12821         Perm = BuildIntrinsicOp(IntrPerm,
12822                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
12823       else
12824         Perm = BuildIntrinsicOp(IntrPerm,
12825                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
12826 
12827       if (VT != PermTy)
12828         Perm = Subtarget.hasAltivec() ?
12829                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
12830                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
12831                                DAG.getTargetConstant(1, dl, MVT::i64));
12832                                // second argument is 1 because this rounding
12833                                // is always exact.
12834 
12835       // The output of the permutation is our loaded result, the TokenFactor is
12836       // our new chain.
12837       DCI.CombineTo(N, Perm, TF);
12838       return SDValue(N, 0);
12839     }
12840     }
12841     break;
12842     case ISD::INTRINSIC_WO_CHAIN: {
12843       bool isLittleEndian = Subtarget.isLittleEndian();
12844       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
12845       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
12846                                            : Intrinsic::ppc_altivec_lvsl);
12847       if ((IID == Intr ||
12848            IID == Intrinsic::ppc_qpx_qvlpcld  ||
12849            IID == Intrinsic::ppc_qpx_qvlpcls) &&
12850         N->getOperand(1)->getOpcode() == ISD::ADD) {
12851         SDValue Add = N->getOperand(1);
12852 
12853         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
12854                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
12855 
12856         if (DAG.MaskedValueIsZero(Add->getOperand(1),
12857                                   APInt::getAllOnesValue(Bits /* alignment */)
12858                                       .zext(Add.getScalarValueSizeInBits()))) {
12859           SDNode *BasePtr = Add->getOperand(0).getNode();
12860           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12861                                     UE = BasePtr->use_end();
12862                UI != UE; ++UI) {
12863             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12864                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
12865               // We've found another LVSL/LVSR, and this address is an aligned
12866               // multiple of that one. The results will be the same, so use the
12867               // one we've just found instead.
12868 
12869               return SDValue(*UI, 0);
12870             }
12871           }
12872         }
12873 
12874         if (isa<ConstantSDNode>(Add->getOperand(1))) {
12875           SDNode *BasePtr = Add->getOperand(0).getNode();
12876           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12877                UE = BasePtr->use_end(); UI != UE; ++UI) {
12878             if (UI->getOpcode() == ISD::ADD &&
12879                 isa<ConstantSDNode>(UI->getOperand(1)) &&
12880                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
12881                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
12882                 (1ULL << Bits) == 0) {
12883               SDNode *OtherAdd = *UI;
12884               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
12885                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
12886                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12887                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
12888                   return SDValue(*VI, 0);
12889                 }
12890               }
12891             }
12892           }
12893         }
12894       }
12895     }
12896 
12897     break;
12898   case ISD::INTRINSIC_W_CHAIN:
12899     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12900     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12901     if (Subtarget.needsSwapsForVSXMemOps()) {
12902       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12903       default:
12904         break;
12905       case Intrinsic::ppc_vsx_lxvw4x:
12906       case Intrinsic::ppc_vsx_lxvd2x:
12907         return expandVSXLoadForLE(N, DCI);
12908       }
12909     }
12910     break;
12911   case ISD::INTRINSIC_VOID:
12912     // For little endian, VSX stores require generating xxswapd/stxvd2x.
12913     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12914     if (Subtarget.needsSwapsForVSXMemOps()) {
12915       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12916       default:
12917         break;
12918       case Intrinsic::ppc_vsx_stxvw4x:
12919       case Intrinsic::ppc_vsx_stxvd2x:
12920         return expandVSXStoreForLE(N, DCI);
12921       }
12922     }
12923     break;
12924   case ISD::BSWAP:
12925     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
12926     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
12927         N->getOperand(0).hasOneUse() &&
12928         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
12929          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
12930           N->getValueType(0) == MVT::i64))) {
12931       SDValue Load = N->getOperand(0);
12932       LoadSDNode *LD = cast<LoadSDNode>(Load);
12933       // Create the byte-swapping load.
12934       SDValue Ops[] = {
12935         LD->getChain(),    // Chain
12936         LD->getBasePtr(),  // Ptr
12937         DAG.getValueType(N->getValueType(0)) // VT
12938       };
12939       SDValue BSLoad =
12940         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
12941                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
12942                                               MVT::i64 : MVT::i32, MVT::Other),
12943                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
12944 
12945       // If this is an i16 load, insert the truncate.
12946       SDValue ResVal = BSLoad;
12947       if (N->getValueType(0) == MVT::i16)
12948         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
12949 
12950       // First, combine the bswap away.  This makes the value produced by the
12951       // load dead.
12952       DCI.CombineTo(N, ResVal);
12953 
12954       // Next, combine the load away, we give it a bogus result value but a real
12955       // chain result.  The result value is dead because the bswap is dead.
12956       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
12957 
12958       // Return N so it doesn't get rechecked!
12959       return SDValue(N, 0);
12960     }
12961     break;
12962   case PPCISD::VCMP:
12963     // If a VCMPo node already exists with exactly the same operands as this
12964     // node, use its result instead of this node (VCMPo computes both a CR6 and
12965     // a normal output).
12966     //
12967     if (!N->getOperand(0).hasOneUse() &&
12968         !N->getOperand(1).hasOneUse() &&
12969         !N->getOperand(2).hasOneUse()) {
12970 
12971       // Scan all of the users of the LHS, looking for VCMPo's that match.
12972       SDNode *VCMPoNode = nullptr;
12973 
12974       SDNode *LHSN = N->getOperand(0).getNode();
12975       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
12976            UI != E; ++UI)
12977         if (UI->getOpcode() == PPCISD::VCMPo &&
12978             UI->getOperand(1) == N->getOperand(1) &&
12979             UI->getOperand(2) == N->getOperand(2) &&
12980             UI->getOperand(0) == N->getOperand(0)) {
12981           VCMPoNode = *UI;
12982           break;
12983         }
12984 
12985       // If there is no VCMPo node, or if the flag value has a single use, don't
12986       // transform this.
12987       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
12988         break;
12989 
12990       // Look at the (necessarily single) use of the flag value.  If it has a
12991       // chain, this transformation is more complex.  Note that multiple things
12992       // could use the value result, which we should ignore.
12993       SDNode *FlagUser = nullptr;
12994       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
12995            FlagUser == nullptr; ++UI) {
12996         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
12997         SDNode *User = *UI;
12998         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
12999           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
13000             FlagUser = User;
13001             break;
13002           }
13003         }
13004       }
13005 
13006       // If the user is a MFOCRF instruction, we know this is safe.
13007       // Otherwise we give up for right now.
13008       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
13009         return SDValue(VCMPoNode, 0);
13010     }
13011     break;
13012   case ISD::BRCOND: {
13013     SDValue Cond = N->getOperand(1);
13014     SDValue Target = N->getOperand(2);
13015 
13016     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13017         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
13018           Intrinsic::ppc_is_decremented_ctr_nonzero) {
13019 
13020       // We now need to make the intrinsic dead (it cannot be instruction
13021       // selected).
13022       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
13023       assert(Cond.getNode()->hasOneUse() &&
13024              "Counter decrement has more than one use");
13025 
13026       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
13027                          N->getOperand(0), Target);
13028     }
13029   }
13030   break;
13031   case ISD::BR_CC: {
13032     // If this is a branch on an altivec predicate comparison, lower this so
13033     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
13034     // lowering is done pre-legalize, because the legalizer lowers the predicate
13035     // compare down to code that is difficult to reassemble.
13036     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
13037     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
13038 
13039     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
13040     // value. If so, pass-through the AND to get to the intrinsic.
13041     if (LHS.getOpcode() == ISD::AND &&
13042         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13043         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
13044           Intrinsic::ppc_is_decremented_ctr_nonzero &&
13045         isa<ConstantSDNode>(LHS.getOperand(1)) &&
13046         !isNullConstant(LHS.getOperand(1)))
13047       LHS = LHS.getOperand(0);
13048 
13049     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13050         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
13051           Intrinsic::ppc_is_decremented_ctr_nonzero &&
13052         isa<ConstantSDNode>(RHS)) {
13053       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
13054              "Counter decrement comparison is not EQ or NE");
13055 
13056       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13057       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
13058                     (CC == ISD::SETNE && !Val);
13059 
13060       // We now need to make the intrinsic dead (it cannot be instruction
13061       // selected).
13062       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
13063       assert(LHS.getNode()->hasOneUse() &&
13064              "Counter decrement has more than one use");
13065 
13066       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
13067                          N->getOperand(0), N->getOperand(4));
13068     }
13069 
13070     int CompareOpc;
13071     bool isDot;
13072 
13073     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
13074         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
13075         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
13076       assert(isDot && "Can't compare against a vector result!");
13077 
13078       // If this is a comparison against something other than 0/1, then we know
13079       // that the condition is never/always true.
13080       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13081       if (Val != 0 && Val != 1) {
13082         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
13083           return N->getOperand(0);
13084         // Always !=, turn it into an unconditional branch.
13085         return DAG.getNode(ISD::BR, dl, MVT::Other,
13086                            N->getOperand(0), N->getOperand(4));
13087       }
13088 
13089       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
13090 
13091       // Create the PPCISD altivec 'dot' comparison node.
13092       SDValue Ops[] = {
13093         LHS.getOperand(2),  // LHS of compare
13094         LHS.getOperand(3),  // RHS of compare
13095         DAG.getConstant(CompareOpc, dl, MVT::i32)
13096       };
13097       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
13098       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
13099 
13100       // Unpack the result based on how the target uses it.
13101       PPC::Predicate CompOpc;
13102       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
13103       default:  // Can't happen, don't crash on invalid number though.
13104       case 0:   // Branch on the value of the EQ bit of CR6.
13105         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
13106         break;
13107       case 1:   // Branch on the inverted value of the EQ bit of CR6.
13108         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
13109         break;
13110       case 2:   // Branch on the value of the LT bit of CR6.
13111         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
13112         break;
13113       case 3:   // Branch on the inverted value of the LT bit of CR6.
13114         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
13115         break;
13116       }
13117 
13118       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
13119                          DAG.getConstant(CompOpc, dl, MVT::i32),
13120                          DAG.getRegister(PPC::CR6, MVT::i32),
13121                          N->getOperand(4), CompNode.getValue(1));
13122     }
13123     break;
13124   }
13125   case ISD::BUILD_VECTOR:
13126     return DAGCombineBuildVector(N, DCI);
13127   }
13128 
13129   return SDValue();
13130 }
13131 
13132 SDValue
13133 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13134                                  SelectionDAG &DAG,
13135                                  SmallVectorImpl<SDNode *> &Created) const {
13136   // fold (sdiv X, pow2)
13137   EVT VT = N->getValueType(0);
13138   if (VT == MVT::i64 && !Subtarget.isPPC64())
13139     return SDValue();
13140   if ((VT != MVT::i32 && VT != MVT::i64) ||
13141       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
13142     return SDValue();
13143 
13144   SDLoc DL(N);
13145   SDValue N0 = N->getOperand(0);
13146 
13147   bool IsNegPow2 = (-Divisor).isPowerOf2();
13148   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
13149   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
13150 
13151   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
13152   Created.push_back(Op.getNode());
13153 
13154   if (IsNegPow2) {
13155     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
13156     Created.push_back(Op.getNode());
13157   }
13158 
13159   return Op;
13160 }
13161 
13162 //===----------------------------------------------------------------------===//
13163 // Inline Assembly Support
13164 //===----------------------------------------------------------------------===//
13165 
13166 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
13167                                                       KnownBits &Known,
13168                                                       const APInt &DemandedElts,
13169                                                       const SelectionDAG &DAG,
13170                                                       unsigned Depth) const {
13171   Known.resetAll();
13172   switch (Op.getOpcode()) {
13173   default: break;
13174   case PPCISD::LBRX: {
13175     // lhbrx is known to have the top bits cleared out.
13176     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
13177       Known.Zero = 0xFFFF0000;
13178     break;
13179   }
13180   case ISD::INTRINSIC_WO_CHAIN: {
13181     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
13182     default: break;
13183     case Intrinsic::ppc_altivec_vcmpbfp_p:
13184     case Intrinsic::ppc_altivec_vcmpeqfp_p:
13185     case Intrinsic::ppc_altivec_vcmpequb_p:
13186     case Intrinsic::ppc_altivec_vcmpequh_p:
13187     case Intrinsic::ppc_altivec_vcmpequw_p:
13188     case Intrinsic::ppc_altivec_vcmpequd_p:
13189     case Intrinsic::ppc_altivec_vcmpgefp_p:
13190     case Intrinsic::ppc_altivec_vcmpgtfp_p:
13191     case Intrinsic::ppc_altivec_vcmpgtsb_p:
13192     case Intrinsic::ppc_altivec_vcmpgtsh_p:
13193     case Intrinsic::ppc_altivec_vcmpgtsw_p:
13194     case Intrinsic::ppc_altivec_vcmpgtsd_p:
13195     case Intrinsic::ppc_altivec_vcmpgtub_p:
13196     case Intrinsic::ppc_altivec_vcmpgtuh_p:
13197     case Intrinsic::ppc_altivec_vcmpgtuw_p:
13198     case Intrinsic::ppc_altivec_vcmpgtud_p:
13199       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
13200       break;
13201     }
13202   }
13203   }
13204 }
13205 
13206 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
13207   switch (Subtarget.getDarwinDirective()) {
13208   default: break;
13209   case PPC::DIR_970:
13210   case PPC::DIR_PWR4:
13211   case PPC::DIR_PWR5:
13212   case PPC::DIR_PWR5X:
13213   case PPC::DIR_PWR6:
13214   case PPC::DIR_PWR6X:
13215   case PPC::DIR_PWR7:
13216   case PPC::DIR_PWR8:
13217   case PPC::DIR_PWR9: {
13218     if (!ML)
13219       break;
13220 
13221     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
13222 
13223     // For small loops (between 5 and 8 instructions), align to a 32-byte
13224     // boundary so that the entire loop fits in one instruction-cache line.
13225     uint64_t LoopSize = 0;
13226     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
13227       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
13228         LoopSize += TII->getInstSizeInBytes(*J);
13229         if (LoopSize > 32)
13230           break;
13231       }
13232 
13233     if (LoopSize > 16 && LoopSize <= 32)
13234       return 5;
13235 
13236     break;
13237   }
13238   }
13239 
13240   return TargetLowering::getPrefLoopAlignment(ML);
13241 }
13242 
13243 /// getConstraintType - Given a constraint, return the type of
13244 /// constraint it is for this target.
13245 PPCTargetLowering::ConstraintType
13246 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
13247   if (Constraint.size() == 1) {
13248     switch (Constraint[0]) {
13249     default: break;
13250     case 'b':
13251     case 'r':
13252     case 'f':
13253     case 'd':
13254     case 'v':
13255     case 'y':
13256       return C_RegisterClass;
13257     case 'Z':
13258       // FIXME: While Z does indicate a memory constraint, it specifically
13259       // indicates an r+r address (used in conjunction with the 'y' modifier
13260       // in the replacement string). Currently, we're forcing the base
13261       // register to be r0 in the asm printer (which is interpreted as zero)
13262       // and forming the complete address in the second register. This is
13263       // suboptimal.
13264       return C_Memory;
13265     }
13266   } else if (Constraint == "wc") { // individual CR bits.
13267     return C_RegisterClass;
13268   } else if (Constraint == "wa" || Constraint == "wd" ||
13269              Constraint == "wf" || Constraint == "ws") {
13270     return C_RegisterClass; // VSX registers.
13271   }
13272   return TargetLowering::getConstraintType(Constraint);
13273 }
13274 
13275 /// Examine constraint type and operand type and determine a weight value.
13276 /// This object must already have been set up with the operand type
13277 /// and the current alternative constraint selected.
13278 TargetLowering::ConstraintWeight
13279 PPCTargetLowering::getSingleConstraintMatchWeight(
13280     AsmOperandInfo &info, const char *constraint) const {
13281   ConstraintWeight weight = CW_Invalid;
13282   Value *CallOperandVal = info.CallOperandVal;
13283     // If we don't have a value, we can't do a match,
13284     // but allow it at the lowest weight.
13285   if (!CallOperandVal)
13286     return CW_Default;
13287   Type *type = CallOperandVal->getType();
13288 
13289   // Look at the constraint type.
13290   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
13291     return CW_Register; // an individual CR bit.
13292   else if ((StringRef(constraint) == "wa" ||
13293             StringRef(constraint) == "wd" ||
13294             StringRef(constraint) == "wf") &&
13295            type->isVectorTy())
13296     return CW_Register;
13297   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
13298     return CW_Register;
13299 
13300   switch (*constraint) {
13301   default:
13302     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
13303     break;
13304   case 'b':
13305     if (type->isIntegerTy())
13306       weight = CW_Register;
13307     break;
13308   case 'f':
13309     if (type->isFloatTy())
13310       weight = CW_Register;
13311     break;
13312   case 'd':
13313     if (type->isDoubleTy())
13314       weight = CW_Register;
13315     break;
13316   case 'v':
13317     if (type->isVectorTy())
13318       weight = CW_Register;
13319     break;
13320   case 'y':
13321     weight = CW_Register;
13322     break;
13323   case 'Z':
13324     weight = CW_Memory;
13325     break;
13326   }
13327   return weight;
13328 }
13329 
13330 std::pair<unsigned, const TargetRegisterClass *>
13331 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
13332                                                 StringRef Constraint,
13333                                                 MVT VT) const {
13334   if (Constraint.size() == 1) {
13335     // GCC RS6000 Constraint Letters
13336     switch (Constraint[0]) {
13337     case 'b':   // R1-R31
13338       if (VT == MVT::i64 && Subtarget.isPPC64())
13339         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
13340       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
13341     case 'r':   // R0-R31
13342       if (VT == MVT::i64 && Subtarget.isPPC64())
13343         return std::make_pair(0U, &PPC::G8RCRegClass);
13344       return std::make_pair(0U, &PPC::GPRCRegClass);
13345     // 'd' and 'f' constraints are both defined to be "the floating point
13346     // registers", where one is for 32-bit and the other for 64-bit. We don't
13347     // really care overly much here so just give them all the same reg classes.
13348     case 'd':
13349     case 'f':
13350       if (Subtarget.hasSPE()) {
13351         if (VT == MVT::f32 || VT == MVT::i32)
13352           return std::make_pair(0U, &PPC::SPE4RCRegClass);
13353         if (VT == MVT::f64 || VT == MVT::i64)
13354           return std::make_pair(0U, &PPC::SPERCRegClass);
13355       } else {
13356         if (VT == MVT::f32 || VT == MVT::i32)
13357           return std::make_pair(0U, &PPC::F4RCRegClass);
13358         if (VT == MVT::f64 || VT == MVT::i64)
13359           return std::make_pair(0U, &PPC::F8RCRegClass);
13360         if (VT == MVT::v4f64 && Subtarget.hasQPX())
13361           return std::make_pair(0U, &PPC::QFRCRegClass);
13362         if (VT == MVT::v4f32 && Subtarget.hasQPX())
13363           return std::make_pair(0U, &PPC::QSRCRegClass);
13364       }
13365       break;
13366     case 'v':
13367       if (VT == MVT::v4f64 && Subtarget.hasQPX())
13368         return std::make_pair(0U, &PPC::QFRCRegClass);
13369       if (VT == MVT::v4f32 && Subtarget.hasQPX())
13370         return std::make_pair(0U, &PPC::QSRCRegClass);
13371       if (Subtarget.hasAltivec())
13372         return std::make_pair(0U, &PPC::VRRCRegClass);
13373       break;
13374     case 'y':   // crrc
13375       return std::make_pair(0U, &PPC::CRRCRegClass);
13376     }
13377   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
13378     // An individual CR bit.
13379     return std::make_pair(0U, &PPC::CRBITRCRegClass);
13380   } else if ((Constraint == "wa" || Constraint == "wd" ||
13381              Constraint == "wf") && Subtarget.hasVSX()) {
13382     return std::make_pair(0U, &PPC::VSRCRegClass);
13383   } else if (Constraint == "ws" && Subtarget.hasVSX()) {
13384     if (VT == MVT::f32 && Subtarget.hasP8Vector())
13385       return std::make_pair(0U, &PPC::VSSRCRegClass);
13386     else
13387       return std::make_pair(0U, &PPC::VSFRCRegClass);
13388   }
13389 
13390   std::pair<unsigned, const TargetRegisterClass *> R =
13391       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
13392 
13393   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
13394   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
13395   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
13396   // register.
13397   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
13398   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
13399   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
13400       PPC::GPRCRegClass.contains(R.first))
13401     return std::make_pair(TRI->getMatchingSuperReg(R.first,
13402                             PPC::sub_32, &PPC::G8RCRegClass),
13403                           &PPC::G8RCRegClass);
13404 
13405   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
13406   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
13407     R.first = PPC::CR0;
13408     R.second = &PPC::CRRCRegClass;
13409   }
13410 
13411   return R;
13412 }
13413 
13414 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
13415 /// vector.  If it is invalid, don't add anything to Ops.
13416 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
13417                                                      std::string &Constraint,
13418                                                      std::vector<SDValue>&Ops,
13419                                                      SelectionDAG &DAG) const {
13420   SDValue Result;
13421 
13422   // Only support length 1 constraints.
13423   if (Constraint.length() > 1) return;
13424 
13425   char Letter = Constraint[0];
13426   switch (Letter) {
13427   default: break;
13428   case 'I':
13429   case 'J':
13430   case 'K':
13431   case 'L':
13432   case 'M':
13433   case 'N':
13434   case 'O':
13435   case 'P': {
13436     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
13437     if (!CST) return; // Must be an immediate to match.
13438     SDLoc dl(Op);
13439     int64_t Value = CST->getSExtValue();
13440     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
13441                          // numbers are printed as such.
13442     switch (Letter) {
13443     default: llvm_unreachable("Unknown constraint letter!");
13444     case 'I':  // "I" is a signed 16-bit constant.
13445       if (isInt<16>(Value))
13446         Result = DAG.getTargetConstant(Value, dl, TCVT);
13447       break;
13448     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
13449       if (isShiftedUInt<16, 16>(Value))
13450         Result = DAG.getTargetConstant(Value, dl, TCVT);
13451       break;
13452     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
13453       if (isShiftedInt<16, 16>(Value))
13454         Result = DAG.getTargetConstant(Value, dl, TCVT);
13455       break;
13456     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
13457       if (isUInt<16>(Value))
13458         Result = DAG.getTargetConstant(Value, dl, TCVT);
13459       break;
13460     case 'M':  // "M" is a constant that is greater than 31.
13461       if (Value > 31)
13462         Result = DAG.getTargetConstant(Value, dl, TCVT);
13463       break;
13464     case 'N':  // "N" is a positive constant that is an exact power of two.
13465       if (Value > 0 && isPowerOf2_64(Value))
13466         Result = DAG.getTargetConstant(Value, dl, TCVT);
13467       break;
13468     case 'O':  // "O" is the constant zero.
13469       if (Value == 0)
13470         Result = DAG.getTargetConstant(Value, dl, TCVT);
13471       break;
13472     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
13473       if (isInt<16>(-Value))
13474         Result = DAG.getTargetConstant(Value, dl, TCVT);
13475       break;
13476     }
13477     break;
13478   }
13479   }
13480 
13481   if (Result.getNode()) {
13482     Ops.push_back(Result);
13483     return;
13484   }
13485 
13486   // Handle standard constraint letters.
13487   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13488 }
13489 
13490 // isLegalAddressingMode - Return true if the addressing mode represented
13491 // by AM is legal for this target, for a load/store of the specified type.
13492 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
13493                                               const AddrMode &AM, Type *Ty,
13494                                               unsigned AS, Instruction *I) const {
13495   // PPC does not allow r+i addressing modes for vectors!
13496   if (Ty->isVectorTy() && AM.BaseOffs != 0)
13497     return false;
13498 
13499   // PPC allows a sign-extended 16-bit immediate field.
13500   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
13501     return false;
13502 
13503   // No global is ever allowed as a base.
13504   if (AM.BaseGV)
13505     return false;
13506 
13507   // PPC only support r+r,
13508   switch (AM.Scale) {
13509   case 0:  // "r+i" or just "i", depending on HasBaseReg.
13510     break;
13511   case 1:
13512     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
13513       return false;
13514     // Otherwise we have r+r or r+i.
13515     break;
13516   case 2:
13517     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
13518       return false;
13519     // Allow 2*r as r+r.
13520     break;
13521   default:
13522     // No other scales are supported.
13523     return false;
13524   }
13525 
13526   return true;
13527 }
13528 
13529 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
13530                                            SelectionDAG &DAG) const {
13531   MachineFunction &MF = DAG.getMachineFunction();
13532   MachineFrameInfo &MFI = MF.getFrameInfo();
13533   MFI.setReturnAddressIsTaken(true);
13534 
13535   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
13536     return SDValue();
13537 
13538   SDLoc dl(Op);
13539   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13540 
13541   // Make sure the function does not optimize away the store of the RA to
13542   // the stack.
13543   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
13544   FuncInfo->setLRStoreRequired();
13545   bool isPPC64 = Subtarget.isPPC64();
13546   auto PtrVT = getPointerTy(MF.getDataLayout());
13547 
13548   if (Depth > 0) {
13549     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
13550     SDValue Offset =
13551         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
13552                         isPPC64 ? MVT::i64 : MVT::i32);
13553     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
13554                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
13555                        MachinePointerInfo());
13556   }
13557 
13558   // Just load the return address off the stack.
13559   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
13560   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
13561                      MachinePointerInfo());
13562 }
13563 
13564 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
13565                                           SelectionDAG &DAG) const {
13566   SDLoc dl(Op);
13567   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13568 
13569   MachineFunction &MF = DAG.getMachineFunction();
13570   MachineFrameInfo &MFI = MF.getFrameInfo();
13571   MFI.setFrameAddressIsTaken(true);
13572 
13573   EVT PtrVT = getPointerTy(MF.getDataLayout());
13574   bool isPPC64 = PtrVT == MVT::i64;
13575 
13576   // Naked functions never have a frame pointer, and so we use r1. For all
13577   // other functions, this decision must be delayed until during PEI.
13578   unsigned FrameReg;
13579   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
13580     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
13581   else
13582     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
13583 
13584   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
13585                                          PtrVT);
13586   while (Depth--)
13587     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
13588                             FrameAddr, MachinePointerInfo());
13589   return FrameAddr;
13590 }
13591 
13592 // FIXME? Maybe this could be a TableGen attribute on some registers and
13593 // this table could be generated automatically from RegInfo.
13594 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT,
13595                                               SelectionDAG &DAG) const {
13596   bool isPPC64 = Subtarget.isPPC64();
13597   bool isDarwinABI = Subtarget.isDarwinABI();
13598 
13599   if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
13600       (!isPPC64 && VT != MVT::i32))
13601     report_fatal_error("Invalid register global variable type");
13602 
13603   bool is64Bit = isPPC64 && VT == MVT::i64;
13604   unsigned Reg = StringSwitch<unsigned>(RegName)
13605                    .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
13606                    .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
13607                    .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
13608                                   (is64Bit ? PPC::X13 : PPC::R13))
13609                    .Default(0);
13610 
13611   if (Reg)
13612     return Reg;
13613   report_fatal_error("Invalid register name global variable");
13614 }
13615 
13616 bool
13617 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
13618   // The PowerPC target isn't yet aware of offsets.
13619   return false;
13620 }
13621 
13622 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
13623                                            const CallInst &I,
13624                                            MachineFunction &MF,
13625                                            unsigned Intrinsic) const {
13626   switch (Intrinsic) {
13627   case Intrinsic::ppc_qpx_qvlfd:
13628   case Intrinsic::ppc_qpx_qvlfs:
13629   case Intrinsic::ppc_qpx_qvlfcd:
13630   case Intrinsic::ppc_qpx_qvlfcs:
13631   case Intrinsic::ppc_qpx_qvlfiwa:
13632   case Intrinsic::ppc_qpx_qvlfiwz:
13633   case Intrinsic::ppc_altivec_lvx:
13634   case Intrinsic::ppc_altivec_lvxl:
13635   case Intrinsic::ppc_altivec_lvebx:
13636   case Intrinsic::ppc_altivec_lvehx:
13637   case Intrinsic::ppc_altivec_lvewx:
13638   case Intrinsic::ppc_vsx_lxvd2x:
13639   case Intrinsic::ppc_vsx_lxvw4x: {
13640     EVT VT;
13641     switch (Intrinsic) {
13642     case Intrinsic::ppc_altivec_lvebx:
13643       VT = MVT::i8;
13644       break;
13645     case Intrinsic::ppc_altivec_lvehx:
13646       VT = MVT::i16;
13647       break;
13648     case Intrinsic::ppc_altivec_lvewx:
13649       VT = MVT::i32;
13650       break;
13651     case Intrinsic::ppc_vsx_lxvd2x:
13652       VT = MVT::v2f64;
13653       break;
13654     case Intrinsic::ppc_qpx_qvlfd:
13655       VT = MVT::v4f64;
13656       break;
13657     case Intrinsic::ppc_qpx_qvlfs:
13658       VT = MVT::v4f32;
13659       break;
13660     case Intrinsic::ppc_qpx_qvlfcd:
13661       VT = MVT::v2f64;
13662       break;
13663     case Intrinsic::ppc_qpx_qvlfcs:
13664       VT = MVT::v2f32;
13665       break;
13666     default:
13667       VT = MVT::v4i32;
13668       break;
13669     }
13670 
13671     Info.opc = ISD::INTRINSIC_W_CHAIN;
13672     Info.memVT = VT;
13673     Info.ptrVal = I.getArgOperand(0);
13674     Info.offset = -VT.getStoreSize()+1;
13675     Info.size = 2*VT.getStoreSize()-1;
13676     Info.align = 1;
13677     Info.flags = MachineMemOperand::MOLoad;
13678     return true;
13679   }
13680   case Intrinsic::ppc_qpx_qvlfda:
13681   case Intrinsic::ppc_qpx_qvlfsa:
13682   case Intrinsic::ppc_qpx_qvlfcda:
13683   case Intrinsic::ppc_qpx_qvlfcsa:
13684   case Intrinsic::ppc_qpx_qvlfiwaa:
13685   case Intrinsic::ppc_qpx_qvlfiwza: {
13686     EVT VT;
13687     switch (Intrinsic) {
13688     case Intrinsic::ppc_qpx_qvlfda:
13689       VT = MVT::v4f64;
13690       break;
13691     case Intrinsic::ppc_qpx_qvlfsa:
13692       VT = MVT::v4f32;
13693       break;
13694     case Intrinsic::ppc_qpx_qvlfcda:
13695       VT = MVT::v2f64;
13696       break;
13697     case Intrinsic::ppc_qpx_qvlfcsa:
13698       VT = MVT::v2f32;
13699       break;
13700     default:
13701       VT = MVT::v4i32;
13702       break;
13703     }
13704 
13705     Info.opc = ISD::INTRINSIC_W_CHAIN;
13706     Info.memVT = VT;
13707     Info.ptrVal = I.getArgOperand(0);
13708     Info.offset = 0;
13709     Info.size = VT.getStoreSize();
13710     Info.align = 1;
13711     Info.flags = MachineMemOperand::MOLoad;
13712     return true;
13713   }
13714   case Intrinsic::ppc_qpx_qvstfd:
13715   case Intrinsic::ppc_qpx_qvstfs:
13716   case Intrinsic::ppc_qpx_qvstfcd:
13717   case Intrinsic::ppc_qpx_qvstfcs:
13718   case Intrinsic::ppc_qpx_qvstfiw:
13719   case Intrinsic::ppc_altivec_stvx:
13720   case Intrinsic::ppc_altivec_stvxl:
13721   case Intrinsic::ppc_altivec_stvebx:
13722   case Intrinsic::ppc_altivec_stvehx:
13723   case Intrinsic::ppc_altivec_stvewx:
13724   case Intrinsic::ppc_vsx_stxvd2x:
13725   case Intrinsic::ppc_vsx_stxvw4x: {
13726     EVT VT;
13727     switch (Intrinsic) {
13728     case Intrinsic::ppc_altivec_stvebx:
13729       VT = MVT::i8;
13730       break;
13731     case Intrinsic::ppc_altivec_stvehx:
13732       VT = MVT::i16;
13733       break;
13734     case Intrinsic::ppc_altivec_stvewx:
13735       VT = MVT::i32;
13736       break;
13737     case Intrinsic::ppc_vsx_stxvd2x:
13738       VT = MVT::v2f64;
13739       break;
13740     case Intrinsic::ppc_qpx_qvstfd:
13741       VT = MVT::v4f64;
13742       break;
13743     case Intrinsic::ppc_qpx_qvstfs:
13744       VT = MVT::v4f32;
13745       break;
13746     case Intrinsic::ppc_qpx_qvstfcd:
13747       VT = MVT::v2f64;
13748       break;
13749     case Intrinsic::ppc_qpx_qvstfcs:
13750       VT = MVT::v2f32;
13751       break;
13752     default:
13753       VT = MVT::v4i32;
13754       break;
13755     }
13756 
13757     Info.opc = ISD::INTRINSIC_VOID;
13758     Info.memVT = VT;
13759     Info.ptrVal = I.getArgOperand(1);
13760     Info.offset = -VT.getStoreSize()+1;
13761     Info.size = 2*VT.getStoreSize()-1;
13762     Info.align = 1;
13763     Info.flags = MachineMemOperand::MOStore;
13764     return true;
13765   }
13766   case Intrinsic::ppc_qpx_qvstfda:
13767   case Intrinsic::ppc_qpx_qvstfsa:
13768   case Intrinsic::ppc_qpx_qvstfcda:
13769   case Intrinsic::ppc_qpx_qvstfcsa:
13770   case Intrinsic::ppc_qpx_qvstfiwa: {
13771     EVT VT;
13772     switch (Intrinsic) {
13773     case Intrinsic::ppc_qpx_qvstfda:
13774       VT = MVT::v4f64;
13775       break;
13776     case Intrinsic::ppc_qpx_qvstfsa:
13777       VT = MVT::v4f32;
13778       break;
13779     case Intrinsic::ppc_qpx_qvstfcda:
13780       VT = MVT::v2f64;
13781       break;
13782     case Intrinsic::ppc_qpx_qvstfcsa:
13783       VT = MVT::v2f32;
13784       break;
13785     default:
13786       VT = MVT::v4i32;
13787       break;
13788     }
13789 
13790     Info.opc = ISD::INTRINSIC_VOID;
13791     Info.memVT = VT;
13792     Info.ptrVal = I.getArgOperand(1);
13793     Info.offset = 0;
13794     Info.size = VT.getStoreSize();
13795     Info.align = 1;
13796     Info.flags = MachineMemOperand::MOStore;
13797     return true;
13798   }
13799   default:
13800     break;
13801   }
13802 
13803   return false;
13804 }
13805 
13806 /// getOptimalMemOpType - Returns the target specific optimal type for load
13807 /// and store operations as a result of memset, memcpy, and memmove
13808 /// lowering. If DstAlign is zero that means it's safe to destination
13809 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
13810 /// means there isn't a need to check it against alignment requirement,
13811 /// probably because the source does not need to be loaded. If 'IsMemset' is
13812 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
13813 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
13814 /// source is constant so it does not need to be loaded.
13815 /// It returns EVT::Other if the type should be determined using generic
13816 /// target-independent logic.
13817 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
13818                                            unsigned DstAlign, unsigned SrcAlign,
13819                                            bool IsMemset, bool ZeroMemset,
13820                                            bool MemcpyStrSrc,
13821                                            MachineFunction &MF) const {
13822   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
13823     const Function &F = MF.getFunction();
13824     // When expanding a memset, require at least two QPX instructions to cover
13825     // the cost of loading the value to be stored from the constant pool.
13826     if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
13827        (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
13828         !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
13829       return MVT::v4f64;
13830     }
13831 
13832     // We should use Altivec/VSX loads and stores when available. For unaligned
13833     // addresses, unaligned VSX loads are only fast starting with the P8.
13834     if (Subtarget.hasAltivec() && Size >= 16 &&
13835         (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
13836          ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
13837       return MVT::v4i32;
13838   }
13839 
13840   if (Subtarget.isPPC64()) {
13841     return MVT::i64;
13842   }
13843 
13844   return MVT::i32;
13845 }
13846 
13847 /// Returns true if it is beneficial to convert a load of a constant
13848 /// to just the constant itself.
13849 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13850                                                           Type *Ty) const {
13851   assert(Ty->isIntegerTy());
13852 
13853   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13854   return !(BitSize == 0 || BitSize > 64);
13855 }
13856 
13857 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
13858   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
13859     return false;
13860   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
13861   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
13862   return NumBits1 == 64 && NumBits2 == 32;
13863 }
13864 
13865 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
13866   if (!VT1.isInteger() || !VT2.isInteger())
13867     return false;
13868   unsigned NumBits1 = VT1.getSizeInBits();
13869   unsigned NumBits2 = VT2.getSizeInBits();
13870   return NumBits1 == 64 && NumBits2 == 32;
13871 }
13872 
13873 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
13874   // Generally speaking, zexts are not free, but they are free when they can be
13875   // folded with other operations.
13876   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
13877     EVT MemVT = LD->getMemoryVT();
13878     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
13879          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
13880         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
13881          LD->getExtensionType() == ISD::ZEXTLOAD))
13882       return true;
13883   }
13884 
13885   // FIXME: Add other cases...
13886   //  - 32-bit shifts with a zext to i64
13887   //  - zext after ctlz, bswap, etc.
13888   //  - zext after and by a constant mask
13889 
13890   return TargetLowering::isZExtFree(Val, VT2);
13891 }
13892 
13893 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
13894   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
13895          "invalid fpext types");
13896   // Extending to float128 is not free.
13897   if (DestVT == MVT::f128)
13898     return false;
13899   return true;
13900 }
13901 
13902 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
13903   return isInt<16>(Imm) || isUInt<16>(Imm);
13904 }
13905 
13906 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
13907   return isInt<16>(Imm) || isUInt<16>(Imm);
13908 }
13909 
13910 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
13911                                                        unsigned,
13912                                                        unsigned,
13913                                                        bool *Fast) const {
13914   if (DisablePPCUnaligned)
13915     return false;
13916 
13917   // PowerPC supports unaligned memory access for simple non-vector types.
13918   // Although accessing unaligned addresses is not as efficient as accessing
13919   // aligned addresses, it is generally more efficient than manual expansion,
13920   // and generally only traps for software emulation when crossing page
13921   // boundaries.
13922 
13923   if (!VT.isSimple())
13924     return false;
13925 
13926   if (VT.getSimpleVT().isVector()) {
13927     if (Subtarget.hasVSX()) {
13928       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
13929           VT != MVT::v4f32 && VT != MVT::v4i32)
13930         return false;
13931     } else {
13932       return false;
13933     }
13934   }
13935 
13936   if (VT == MVT::ppcf128)
13937     return false;
13938 
13939   if (Fast)
13940     *Fast = true;
13941 
13942   return true;
13943 }
13944 
13945 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
13946   VT = VT.getScalarType();
13947 
13948   if (!VT.isSimple())
13949     return false;
13950 
13951   switch (VT.getSimpleVT().SimpleTy) {
13952   case MVT::f32:
13953   case MVT::f64:
13954     return true;
13955   case MVT::f128:
13956     return (EnableQuadPrecision && Subtarget.hasP9Vector());
13957   default:
13958     break;
13959   }
13960 
13961   return false;
13962 }
13963 
13964 const MCPhysReg *
13965 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
13966   // LR is a callee-save register, but we must treat it as clobbered by any call
13967   // site. Hence we include LR in the scratch registers, which are in turn added
13968   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
13969   // to CTR, which is used by any indirect call.
13970   static const MCPhysReg ScratchRegs[] = {
13971     PPC::X12, PPC::LR8, PPC::CTR8, 0
13972   };
13973 
13974   return ScratchRegs;
13975 }
13976 
13977 unsigned PPCTargetLowering::getExceptionPointerRegister(
13978     const Constant *PersonalityFn) const {
13979   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
13980 }
13981 
13982 unsigned PPCTargetLowering::getExceptionSelectorRegister(
13983     const Constant *PersonalityFn) const {
13984   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
13985 }
13986 
13987 bool
13988 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
13989                      EVT VT , unsigned DefinedValues) const {
13990   if (VT == MVT::v2i64)
13991     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
13992 
13993   if (Subtarget.hasVSX() || Subtarget.hasQPX())
13994     return true;
13995 
13996   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
13997 }
13998 
13999 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
14000   if (DisableILPPref || Subtarget.enableMachineScheduler())
14001     return TargetLowering::getSchedulingPreference(N);
14002 
14003   return Sched::ILP;
14004 }
14005 
14006 // Create a fast isel object.
14007 FastISel *
14008 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
14009                                   const TargetLibraryInfo *LibInfo) const {
14010   return PPC::createFastISel(FuncInfo, LibInfo);
14011 }
14012 
14013 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
14014   if (Subtarget.isDarwinABI()) return;
14015   if (!Subtarget.isPPC64()) return;
14016 
14017   // Update IsSplitCSR in PPCFunctionInfo
14018   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
14019   PFI->setIsSplitCSR(true);
14020 }
14021 
14022 void PPCTargetLowering::insertCopiesSplitCSR(
14023   MachineBasicBlock *Entry,
14024   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
14025   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
14026   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
14027   if (!IStart)
14028     return;
14029 
14030   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
14031   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
14032   MachineBasicBlock::iterator MBBI = Entry->begin();
14033   for (const MCPhysReg *I = IStart; *I; ++I) {
14034     const TargetRegisterClass *RC = nullptr;
14035     if (PPC::G8RCRegClass.contains(*I))
14036       RC = &PPC::G8RCRegClass;
14037     else if (PPC::F8RCRegClass.contains(*I))
14038       RC = &PPC::F8RCRegClass;
14039     else if (PPC::CRRCRegClass.contains(*I))
14040       RC = &PPC::CRRCRegClass;
14041     else if (PPC::VRRCRegClass.contains(*I))
14042       RC = &PPC::VRRCRegClass;
14043     else
14044       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
14045 
14046     unsigned NewVR = MRI->createVirtualRegister(RC);
14047     // Create copy from CSR to a virtual register.
14048     // FIXME: this currently does not emit CFI pseudo-instructions, it works
14049     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
14050     // nounwind. If we want to generalize this later, we may need to emit
14051     // CFI pseudo-instructions.
14052     assert(Entry->getParent()->getFunction().hasFnAttribute(
14053              Attribute::NoUnwind) &&
14054            "Function should be nounwind in insertCopiesSplitCSR!");
14055     Entry->addLiveIn(*I);
14056     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
14057       .addReg(*I);
14058 
14059     // Insert the copy-back instructions right before the terminator
14060     for (auto *Exit : Exits)
14061       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
14062               TII->get(TargetOpcode::COPY), *I)
14063         .addReg(NewVR);
14064   }
14065 }
14066 
14067 // Override to enable LOAD_STACK_GUARD lowering on Linux.
14068 bool PPCTargetLowering::useLoadStackGuardNode() const {
14069   if (!Subtarget.isTargetLinux())
14070     return TargetLowering::useLoadStackGuardNode();
14071   return true;
14072 }
14073 
14074 // Override to disable global variable loading on Linux.
14075 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
14076   if (!Subtarget.isTargetLinux())
14077     return TargetLowering::insertSSPDeclarations(M);
14078 }
14079 
14080 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
14081   if (!VT.isSimple() || !Subtarget.hasVSX())
14082     return false;
14083 
14084   switch(VT.getSimpleVT().SimpleTy) {
14085   default:
14086     // For FP types that are currently not supported by PPC backend, return
14087     // false. Examples: f16, f80.
14088     return false;
14089   case MVT::f32:
14090   case MVT::f64:
14091   case MVT::ppcf128:
14092     return Imm.isPosZero();
14093   }
14094 }
14095 
14096 // For vector shift operation op, fold
14097 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
14098 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
14099                                   SelectionDAG &DAG) {
14100   SDValue N0 = N->getOperand(0);
14101   SDValue N1 = N->getOperand(1);
14102   EVT VT = N0.getValueType();
14103   unsigned OpSizeInBits = VT.getScalarSizeInBits();
14104   unsigned Opcode = N->getOpcode();
14105   unsigned TargetOpcode;
14106 
14107   switch (Opcode) {
14108   default:
14109     llvm_unreachable("Unexpected shift operation");
14110   case ISD::SHL:
14111     TargetOpcode = PPCISD::SHL;
14112     break;
14113   case ISD::SRL:
14114     TargetOpcode = PPCISD::SRL;
14115     break;
14116   case ISD::SRA:
14117     TargetOpcode = PPCISD::SRA;
14118     break;
14119   }
14120 
14121   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
14122       N1->getOpcode() == ISD::AND)
14123     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
14124       if (Mask->getZExtValue() == OpSizeInBits - 1)
14125         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
14126 
14127   return SDValue();
14128 }
14129 
14130 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
14131   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14132     return Value;
14133 
14134   return SDValue();
14135 }
14136 
14137 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
14138   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14139     return Value;
14140 
14141   return SDValue();
14142 }
14143 
14144 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
14145   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14146     return Value;
14147 
14148   return SDValue();
14149 }
14150 
14151 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
14152   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
14153   if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64())
14154     return false;
14155 
14156   // If not a tail call then no need to proceed.
14157   if (!CI->isTailCall())
14158     return false;
14159 
14160   // If tail calls are disabled for the caller then we are done.
14161   const Function *Caller = CI->getParent()->getParent();
14162   auto Attr = Caller->getFnAttribute("disable-tail-calls");
14163   if (Attr.getValueAsString() == "true")
14164     return false;
14165 
14166   // If sibling calls have been disabled and tail-calls aren't guaranteed
14167   // there is no reason to duplicate.
14168   auto &TM = getTargetMachine();
14169   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
14170     return false;
14171 
14172   // Can't tail call a function called indirectly, or if it has variadic args.
14173   const Function *Callee = CI->getCalledFunction();
14174   if (!Callee || Callee->isVarArg())
14175     return false;
14176 
14177   // Make sure the callee and caller calling conventions are eligible for tco.
14178   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
14179                                            CI->getCallingConv()))
14180       return false;
14181 
14182   // If the function is local then we have a good chance at tail-calling it
14183   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
14184 }
14185 
14186 bool PPCTargetLowering::
14187 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
14188   const Value *Mask = AndI.getOperand(1);
14189   // If the mask is suitable for andi. or andis. we should sink the and.
14190   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
14191     // Can't handle constants wider than 64-bits.
14192     if (CI->getBitWidth() > 64)
14193       return false;
14194     int64_t ConstVal = CI->getZExtValue();
14195     return isUInt<16>(ConstVal) ||
14196       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
14197   }
14198 
14199   // For non-constant masks, we can always use the record-form and.
14200   return true;
14201 }
14202