1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the PPCISelLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCISelLowering.h"
15 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPCCallingConv.h"
17 #include "PPCCCState.h"
18 #include "PPCMachineFunctionInfo.h"
19 #include "PPCPerfectShuffle.h"
20 #include "PPCTargetMachine.h"
21 #include "PPCTargetObjectFile.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/CodeGen/CallingConvLower.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/SelectionDAG.h"
33 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/Format.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetOptions.h"
45 #include <list>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "ppc-lowering"
50 
51 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
52 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
53 
54 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
55 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
56 
57 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
58 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
59 
60 static cl::opt<bool> DisableSCO("disable-ppc-sco",
61 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
62 
63 STATISTIC(NumTailCalls, "Number of tail calls");
64 STATISTIC(NumSiblingCalls, "Number of sibling calls");
65 
66 // FIXME: Remove this once the bug has been fixed!
67 extern cl::opt<bool> ANDIGlueBug;
68 
69 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
70                                      const PPCSubtarget &STI)
71     : TargetLowering(TM), Subtarget(STI) {
72   // Use _setjmp/_longjmp instead of setjmp/longjmp.
73   setUseUnderscoreSetJmp(true);
74   setUseUnderscoreLongJmp(true);
75 
76   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
77   // arguments are at least 4/8 bytes aligned.
78   bool isPPC64 = Subtarget.isPPC64();
79   setMinStackArgumentAlignment(isPPC64 ? 8:4);
80 
81   // Set up the register classes.
82   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
83   if (!useSoftFloat()) {
84     addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
85     addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
86   }
87 
88   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
89   for (MVT VT : MVT::integer_valuetypes()) {
90     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
91     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
92   }
93 
94   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
95 
96   // PowerPC has pre-inc load and store's.
97   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
98   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
99   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
100   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
101   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
102   setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
103   setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
104   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
105   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
106   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
107   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
108   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
109   setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
110   setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
111 
112   if (Subtarget.useCRBits()) {
113     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
114 
115     if (isPPC64 || Subtarget.hasFPCVT()) {
116       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
117       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
118                          isPPC64 ? MVT::i64 : MVT::i32);
119       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
120       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
121                         isPPC64 ? MVT::i64 : MVT::i32);
122     } else {
123       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
124       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
125     }
126 
127     // PowerPC does not support direct load / store of condition registers
128     setOperationAction(ISD::LOAD, MVT::i1, Custom);
129     setOperationAction(ISD::STORE, MVT::i1, Custom);
130 
131     // FIXME: Remove this once the ANDI glue bug is fixed:
132     if (ANDIGlueBug)
133       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
134 
135     for (MVT VT : MVT::integer_valuetypes()) {
136       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
137       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
138       setTruncStoreAction(VT, MVT::i1, Expand);
139     }
140 
141     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
142   }
143 
144   // This is used in the ppcf128->int sequence.  Note it has different semantics
145   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
146   setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
147 
148   // We do not currently implement these libm ops for PowerPC.
149   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
150   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
151   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
152   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
153   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
154   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
155 
156   // PowerPC has no SREM/UREM instructions
157   setOperationAction(ISD::SREM, MVT::i32, Expand);
158   setOperationAction(ISD::UREM, MVT::i32, Expand);
159   setOperationAction(ISD::SREM, MVT::i64, Expand);
160   setOperationAction(ISD::UREM, MVT::i64, Expand);
161 
162   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
163   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
164   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
165   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
166   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
167   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
168   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
169   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
170   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
171 
172   // We don't support sin/cos/sqrt/fmod/pow
173   setOperationAction(ISD::FSIN , MVT::f64, Expand);
174   setOperationAction(ISD::FCOS , MVT::f64, Expand);
175   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
176   setOperationAction(ISD::FREM , MVT::f64, Expand);
177   setOperationAction(ISD::FPOW , MVT::f64, Expand);
178   setOperationAction(ISD::FMA  , MVT::f64, Legal);
179   setOperationAction(ISD::FSIN , MVT::f32, Expand);
180   setOperationAction(ISD::FCOS , MVT::f32, Expand);
181   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
182   setOperationAction(ISD::FREM , MVT::f32, Expand);
183   setOperationAction(ISD::FPOW , MVT::f32, Expand);
184   setOperationAction(ISD::FMA  , MVT::f32, Legal);
185 
186   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
187 
188   // If we're enabling GP optimizations, use hardware square root
189   if (!Subtarget.hasFSQRT() &&
190       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
191         Subtarget.hasFRE()))
192     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
193 
194   if (!Subtarget.hasFSQRT() &&
195       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
196         Subtarget.hasFRES()))
197     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
198 
199   if (Subtarget.hasFCPSGN()) {
200     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
201     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
202   } else {
203     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
204     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
205   }
206 
207   if (Subtarget.hasFPRND()) {
208     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
209     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
210     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
211     setOperationAction(ISD::FROUND, MVT::f64, Legal);
212 
213     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
214     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
215     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
216     setOperationAction(ISD::FROUND, MVT::f32, Legal);
217   }
218 
219   // PowerPC does not have BSWAP
220   // CTPOP or CTTZ were introduced in P8/P9 respectivelly
221   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
222   setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
223   if (Subtarget.isISA3_0()) {
224     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
225     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
226   } else {
227     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
228     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
229   }
230 
231   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
232     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
233     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
234   } else {
235     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
236     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
237   }
238 
239   // PowerPC does not have ROTR
240   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
241   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
242 
243   if (!Subtarget.useCRBits()) {
244     // PowerPC does not have Select
245     setOperationAction(ISD::SELECT, MVT::i32, Expand);
246     setOperationAction(ISD::SELECT, MVT::i64, Expand);
247     setOperationAction(ISD::SELECT, MVT::f32, Expand);
248     setOperationAction(ISD::SELECT, MVT::f64, Expand);
249   }
250 
251   // PowerPC wants to turn select_cc of FP into fsel when possible.
252   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
253   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
254 
255   // PowerPC wants to optimize integer setcc a bit
256   if (!Subtarget.useCRBits())
257     setOperationAction(ISD::SETCC, MVT::i32, Custom);
258 
259   // PowerPC does not have BRCOND which requires SetCC
260   if (!Subtarget.useCRBits())
261     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
262 
263   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
264 
265   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
266   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
267 
268   // PowerPC does not have [U|S]INT_TO_FP
269   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
270   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
271 
272   if (Subtarget.hasDirectMove() && isPPC64) {
273     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
274     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
275     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
276     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
277   } else {
278     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
279     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
280     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
281     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
282   }
283 
284   // We cannot sextinreg(i1).  Expand to shifts.
285   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
286 
287   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
288   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
289   // support continuation, user-level threading, and etc.. As a result, no
290   // other SjLj exception interfaces are implemented and please don't build
291   // your own exception handling based on them.
292   // LLVM/Clang supports zero-cost DWARF exception handling.
293   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
294   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
295 
296   // We want to legalize GlobalAddress and ConstantPool nodes into the
297   // appropriate instructions to materialize the address.
298   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
299   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
300   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
301   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
302   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
303   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
304   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
305   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
306   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
307   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
308 
309   // TRAP is legal.
310   setOperationAction(ISD::TRAP, MVT::Other, Legal);
311 
312   // TRAMPOLINE is custom lowered.
313   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
314   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
315 
316   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
317   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
318 
319   if (Subtarget.isSVR4ABI()) {
320     if (isPPC64) {
321       // VAARG always uses double-word chunks, so promote anything smaller.
322       setOperationAction(ISD::VAARG, MVT::i1, Promote);
323       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
324       setOperationAction(ISD::VAARG, MVT::i8, Promote);
325       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
326       setOperationAction(ISD::VAARG, MVT::i16, Promote);
327       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
328       setOperationAction(ISD::VAARG, MVT::i32, Promote);
329       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
330       setOperationAction(ISD::VAARG, MVT::Other, Expand);
331     } else {
332       // VAARG is custom lowered with the 32-bit SVR4 ABI.
333       setOperationAction(ISD::VAARG, MVT::Other, Custom);
334       setOperationAction(ISD::VAARG, MVT::i64, Custom);
335     }
336   } else
337     setOperationAction(ISD::VAARG, MVT::Other, Expand);
338 
339   if (Subtarget.isSVR4ABI() && !isPPC64)
340     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
341     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
342   else
343     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
344 
345   // Use the default implementation.
346   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
347   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
348   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
349   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
350   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
351   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
352   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
353   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
354   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
355 
356   // We want to custom lower some of our intrinsics.
357   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
358 
359   // To handle counter-based loop conditions.
360   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
361 
362   // Comparisons that require checking two conditions.
363   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
364   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
365   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
366   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
367   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
368   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
369   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
370   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
371   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
372   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
373   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
374   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
375 
376   if (Subtarget.has64BitSupport()) {
377     // They also have instructions for converting between i64 and fp.
378     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
379     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
380     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
381     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
382     // This is just the low 32 bits of a (signed) fp->i64 conversion.
383     // We cannot do this with Promote because i64 is not a legal type.
384     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
385 
386     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
387       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
388   } else {
389     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
390     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
391   }
392 
393   // With the instructions enabled under FPCVT, we can do everything.
394   if (Subtarget.hasFPCVT()) {
395     if (Subtarget.has64BitSupport()) {
396       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
397       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
398       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
399       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
400     }
401 
402     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
403     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
404     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
405     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
406   }
407 
408   if (Subtarget.use64BitRegs()) {
409     // 64-bit PowerPC implementations can support i64 types directly
410     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
411     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
412     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
413     // 64-bit PowerPC wants to expand i128 shifts itself.
414     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
415     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
416     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
417   } else {
418     // 32-bit PowerPC wants to expand i64 shifts itself.
419     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
420     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
421     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
422   }
423 
424   if (Subtarget.hasAltivec()) {
425     // First set operation action for all vector types to expand. Then we
426     // will selectively turn on ones that can be effectively codegen'd.
427     for (MVT VT : MVT::vector_valuetypes()) {
428       // add/sub are legal for all supported vector VT's.
429       setOperationAction(ISD::ADD, VT, Legal);
430       setOperationAction(ISD::SUB, VT, Legal);
431 
432       // Vector instructions introduced in P8
433       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
434         setOperationAction(ISD::CTPOP, VT, Legal);
435         setOperationAction(ISD::CTLZ, VT, Legal);
436       }
437       else {
438         setOperationAction(ISD::CTPOP, VT, Expand);
439         setOperationAction(ISD::CTLZ, VT, Expand);
440       }
441 
442       // Vector instructions introduced in P9
443       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
444         setOperationAction(ISD::CTTZ, VT, Legal);
445       else
446         setOperationAction(ISD::CTTZ, VT, Expand);
447 
448       // We promote all shuffles to v16i8.
449       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
450       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
451 
452       // We promote all non-typed operations to v4i32.
453       setOperationAction(ISD::AND   , VT, Promote);
454       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
455       setOperationAction(ISD::OR    , VT, Promote);
456       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
457       setOperationAction(ISD::XOR   , VT, Promote);
458       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
459       setOperationAction(ISD::LOAD  , VT, Promote);
460       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
461       setOperationAction(ISD::SELECT, VT, Promote);
462       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
463       setOperationAction(ISD::SELECT_CC, VT, Promote);
464       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
465       setOperationAction(ISD::STORE, VT, Promote);
466       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
467 
468       // No other operations are legal.
469       setOperationAction(ISD::MUL , VT, Expand);
470       setOperationAction(ISD::SDIV, VT, Expand);
471       setOperationAction(ISD::SREM, VT, Expand);
472       setOperationAction(ISD::UDIV, VT, Expand);
473       setOperationAction(ISD::UREM, VT, Expand);
474       setOperationAction(ISD::FDIV, VT, Expand);
475       setOperationAction(ISD::FREM, VT, Expand);
476       setOperationAction(ISD::FNEG, VT, Expand);
477       setOperationAction(ISD::FSQRT, VT, Expand);
478       setOperationAction(ISD::FLOG, VT, Expand);
479       setOperationAction(ISD::FLOG10, VT, Expand);
480       setOperationAction(ISD::FLOG2, VT, Expand);
481       setOperationAction(ISD::FEXP, VT, Expand);
482       setOperationAction(ISD::FEXP2, VT, Expand);
483       setOperationAction(ISD::FSIN, VT, Expand);
484       setOperationAction(ISD::FCOS, VT, Expand);
485       setOperationAction(ISD::FABS, VT, Expand);
486       setOperationAction(ISD::FPOWI, VT, Expand);
487       setOperationAction(ISD::FFLOOR, VT, Expand);
488       setOperationAction(ISD::FCEIL,  VT, Expand);
489       setOperationAction(ISD::FTRUNC, VT, Expand);
490       setOperationAction(ISD::FRINT,  VT, Expand);
491       setOperationAction(ISD::FNEARBYINT, VT, Expand);
492       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
493       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
494       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
495       setOperationAction(ISD::MULHU, VT, Expand);
496       setOperationAction(ISD::MULHS, VT, Expand);
497       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
498       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
499       setOperationAction(ISD::UDIVREM, VT, Expand);
500       setOperationAction(ISD::SDIVREM, VT, Expand);
501       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
502       setOperationAction(ISD::FPOW, VT, Expand);
503       setOperationAction(ISD::BSWAP, VT, Expand);
504       setOperationAction(ISD::VSELECT, VT, Expand);
505       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
506       setOperationAction(ISD::ROTL, VT, Expand);
507       setOperationAction(ISD::ROTR, VT, Expand);
508 
509       for (MVT InnerVT : MVT::vector_valuetypes()) {
510         setTruncStoreAction(VT, InnerVT, Expand);
511         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
512         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
513         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
514       }
515     }
516 
517     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
518     // with merges, splats, etc.
519     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
520 
521     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
522     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
523     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
524     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
525     setOperationAction(ISD::SELECT, MVT::v4i32,
526                        Subtarget.useCRBits() ? Legal : Expand);
527     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
528     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
529     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
530     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
531     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
532     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
533     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
534     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
535     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
536 
537     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
538     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
539     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
540     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
541 
542     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
543     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
544 
545     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
546       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
547       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
548     }
549 
550     if (Subtarget.hasP8Altivec())
551       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
552     else
553       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
554 
555     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
556     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
557 
558     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
559     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
560 
561     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
562     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
563     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
564     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
565 
566     // Altivec does not contain unordered floating-point compare instructions
567     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
568     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
569     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
570     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
571 
572     if (Subtarget.hasVSX()) {
573       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
574       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
575       if (Subtarget.hasP8Vector()) {
576         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
577         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
578       }
579       if (Subtarget.hasDirectMove() && isPPC64) {
580         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
581         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
582         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
583         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
584         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
585         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
586         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
587         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
588       }
589       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
590 
591       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
592       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
593       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
594       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
595       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
596 
597       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
598 
599       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
600       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
601 
602       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
603       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
604 
605       setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
606       setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
607       setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
608       setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
609       setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
610 
611       // Share the Altivec comparison restrictions.
612       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
613       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
614       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
615       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
616 
617       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
618       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
619 
620       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
621 
622       if (Subtarget.hasP8Vector())
623         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
624 
625       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
626 
627       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
628       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
629       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
630 
631       if (Subtarget.hasP8Altivec()) {
632         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
633         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
634         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
635 
636         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
637       }
638       else {
639         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
640         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
641         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
642 
643         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
644 
645         // VSX v2i64 only supports non-arithmetic operations.
646         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
647         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
648       }
649 
650       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
651       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
652       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
653       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
654 
655       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
656 
657       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
658       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
659       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
660       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
661 
662       // Vector operation legalization checks the result type of
663       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
664       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
665       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
666       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
667       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
668 
669       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
670       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
671       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
672       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
673 
674       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
675     }
676 
677     if (Subtarget.hasP8Altivec()) {
678       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
679       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
680     }
681 
682     if (Subtarget.hasP9Vector()) {
683       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
684       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
685     }
686 
687     if (Subtarget.isISA3_0() && Subtarget.hasDirectMove())
688       setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
689   }
690 
691   if (Subtarget.hasQPX()) {
692     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
693     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
694     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
695     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
696 
697     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
698     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
699 
700     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
701     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
702 
703     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
704     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
705 
706     if (!Subtarget.useCRBits())
707       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
708     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
709 
710     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
711     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
712     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
713     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
714     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
715     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
716     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
717 
718     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
719     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
720 
721     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
722     setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
723     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
724 
725     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
726     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
727     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
728     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
729     setOperationAction(ISD::FPOWI , MVT::v4f64, Expand);
730     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
731     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
732     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
733     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
734     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
735     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
736 
737     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
738     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
739 
740     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
741     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
742 
743     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
744 
745     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
746     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
747     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
748     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
749 
750     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
751     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
752 
753     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
754     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
755 
756     if (!Subtarget.useCRBits())
757       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
758     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
759 
760     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
761     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
762     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
763     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
764     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
765     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
766     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
767 
768     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
769     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
770 
771     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
772     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
773     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
774     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
775     setOperationAction(ISD::FPOWI , MVT::v4f32, Expand);
776     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
777     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
778     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
779     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
780     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
781     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
782 
783     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
784     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
785 
786     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
787     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
788 
789     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
790 
791     setOperationAction(ISD::AND , MVT::v4i1, Legal);
792     setOperationAction(ISD::OR , MVT::v4i1, Legal);
793     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
794 
795     if (!Subtarget.useCRBits())
796       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
797     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
798 
799     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
800     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
801 
802     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
803     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
804     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
805     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
806     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
807     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
808     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
809 
810     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
811     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
812 
813     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
814 
815     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
816     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
817     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
818     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
819 
820     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
821     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
822     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
823     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
824 
825     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
826     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
827 
828     // These need to set FE_INEXACT, and so cannot be vectorized here.
829     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
830     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
831 
832     if (TM.Options.UnsafeFPMath) {
833       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
834       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
835 
836       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
837       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
838     } else {
839       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
840       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
841 
842       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
843       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
844     }
845   }
846 
847   if (Subtarget.has64BitSupport())
848     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
849 
850   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
851 
852   if (!isPPC64) {
853     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
854     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
855   }
856 
857   setBooleanContents(ZeroOrOneBooleanContent);
858 
859   if (Subtarget.hasAltivec()) {
860     // Altivec instructions set fields to all zeros or all ones.
861     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
862   }
863 
864   if (!isPPC64) {
865     // These libcalls are not available in 32-bit.
866     setLibcallName(RTLIB::SHL_I128, nullptr);
867     setLibcallName(RTLIB::SRL_I128, nullptr);
868     setLibcallName(RTLIB::SRA_I128, nullptr);
869   }
870 
871   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
872 
873   // We have target-specific dag combine patterns for the following nodes:
874   setTargetDAGCombine(ISD::SINT_TO_FP);
875   setTargetDAGCombine(ISD::BUILD_VECTOR);
876   if (Subtarget.hasFPCVT())
877     setTargetDAGCombine(ISD::UINT_TO_FP);
878   setTargetDAGCombine(ISD::LOAD);
879   setTargetDAGCombine(ISD::STORE);
880   setTargetDAGCombine(ISD::BR_CC);
881   if (Subtarget.useCRBits())
882     setTargetDAGCombine(ISD::BRCOND);
883   setTargetDAGCombine(ISD::BSWAP);
884   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
885   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
886   setTargetDAGCombine(ISD::INTRINSIC_VOID);
887 
888   setTargetDAGCombine(ISD::SIGN_EXTEND);
889   setTargetDAGCombine(ISD::ZERO_EXTEND);
890   setTargetDAGCombine(ISD::ANY_EXTEND);
891 
892   if (Subtarget.useCRBits()) {
893     setTargetDAGCombine(ISD::TRUNCATE);
894     setTargetDAGCombine(ISD::SETCC);
895     setTargetDAGCombine(ISD::SELECT_CC);
896   }
897 
898   // Use reciprocal estimates.
899   if (TM.Options.UnsafeFPMath) {
900     setTargetDAGCombine(ISD::FDIV);
901     setTargetDAGCombine(ISD::FSQRT);
902   }
903 
904   // For the estimates, convergence is quadratic, so we essentially double the
905   // number of digits correct after every iteration. For both FRE and FRSQRTE,
906   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
907   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
908   unsigned RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3,
909            RefinementSteps64 = RefinementSteps + 1;
910 
911   ReciprocalEstimates.set("sqrtf", true, RefinementSteps);
912   ReciprocalEstimates.set("vec-sqrtf", true, RefinementSteps);
913   ReciprocalEstimates.set("divf", true, RefinementSteps);
914   ReciprocalEstimates.set("vec-divf", true, RefinementSteps);
915 
916   ReciprocalEstimates.set("sqrtd", true, RefinementSteps64);
917   ReciprocalEstimates.set("vec-sqrtd", true, RefinementSteps64);
918   ReciprocalEstimates.set("divd", true, RefinementSteps64);
919   ReciprocalEstimates.set("vec-divd", true, RefinementSteps64);
920 
921   // Darwin long double math library functions have $LDBL128 appended.
922   if (Subtarget.isDarwin()) {
923     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
924     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
925     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
926     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
927     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
928     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
929     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
930     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
931     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
932     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
933   }
934 
935   // With 32 condition bits, we don't need to sink (and duplicate) compares
936   // aggressively in CodeGenPrep.
937   if (Subtarget.useCRBits()) {
938     setHasMultipleConditionRegisters();
939     setJumpIsExpensive();
940   }
941 
942   setMinFunctionAlignment(2);
943   if (Subtarget.isDarwin())
944     setPrefFunctionAlignment(4);
945 
946   switch (Subtarget.getDarwinDirective()) {
947   default: break;
948   case PPC::DIR_970:
949   case PPC::DIR_A2:
950   case PPC::DIR_E500mc:
951   case PPC::DIR_E5500:
952   case PPC::DIR_PWR4:
953   case PPC::DIR_PWR5:
954   case PPC::DIR_PWR5X:
955   case PPC::DIR_PWR6:
956   case PPC::DIR_PWR6X:
957   case PPC::DIR_PWR7:
958   case PPC::DIR_PWR8:
959   case PPC::DIR_PWR9:
960     setPrefFunctionAlignment(4);
961     setPrefLoopAlignment(4);
962     break;
963   }
964 
965   if (Subtarget.enableMachineScheduler())
966     setSchedulingPreference(Sched::Source);
967   else
968     setSchedulingPreference(Sched::Hybrid);
969 
970   computeRegisterProperties(STI.getRegisterInfo());
971 
972   // The Freescale cores do better with aggressive inlining of memcpy and
973   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
974   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
975       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
976     MaxStoresPerMemset = 32;
977     MaxStoresPerMemsetOptSize = 16;
978     MaxStoresPerMemcpy = 32;
979     MaxStoresPerMemcpyOptSize = 8;
980     MaxStoresPerMemmove = 32;
981     MaxStoresPerMemmoveOptSize = 8;
982   } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) {
983     // The A2 also benefits from (very) aggressive inlining of memcpy and
984     // friends. The overhead of a the function call, even when warm, can be
985     // over one hundred cycles.
986     MaxStoresPerMemset = 128;
987     MaxStoresPerMemcpy = 128;
988     MaxStoresPerMemmove = 128;
989   }
990 }
991 
992 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
993 /// the desired ByVal argument alignment.
994 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
995                              unsigned MaxMaxAlign) {
996   if (MaxAlign == MaxMaxAlign)
997     return;
998   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
999     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1000       MaxAlign = 32;
1001     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1002       MaxAlign = 16;
1003   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1004     unsigned EltAlign = 0;
1005     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1006     if (EltAlign > MaxAlign)
1007       MaxAlign = EltAlign;
1008   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1009     for (auto *EltTy : STy->elements()) {
1010       unsigned EltAlign = 0;
1011       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1012       if (EltAlign > MaxAlign)
1013         MaxAlign = EltAlign;
1014       if (MaxAlign == MaxMaxAlign)
1015         break;
1016     }
1017   }
1018 }
1019 
1020 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1021 /// function arguments in the caller parameter area.
1022 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1023                                                   const DataLayout &DL) const {
1024   // Darwin passes everything on 4 byte boundary.
1025   if (Subtarget.isDarwin())
1026     return 4;
1027 
1028   // 16byte and wider vectors are passed on 16byte boundary.
1029   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1030   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1031   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1032     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1033   return Align;
1034 }
1035 
1036 bool PPCTargetLowering::useSoftFloat() const {
1037   return Subtarget.useSoftFloat();
1038 }
1039 
1040 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1041   switch ((PPCISD::NodeType)Opcode) {
1042   case PPCISD::FIRST_NUMBER:    break;
1043   case PPCISD::FSEL:            return "PPCISD::FSEL";
1044   case PPCISD::FCFID:           return "PPCISD::FCFID";
1045   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1046   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1047   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1048   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1049   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1050   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1051   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1052   case PPCISD::FRE:             return "PPCISD::FRE";
1053   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1054   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1055   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1056   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1057   case PPCISD::VPERM:           return "PPCISD::VPERM";
1058   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1059   case PPCISD::XXINSERT:        return "PPCISD::XXINSERT";
1060   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1061   case PPCISD::CMPB:            return "PPCISD::CMPB";
1062   case PPCISD::Hi:              return "PPCISD::Hi";
1063   case PPCISD::Lo:              return "PPCISD::Lo";
1064   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1065   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1066   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1067   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1068   case PPCISD::SRL:             return "PPCISD::SRL";
1069   case PPCISD::SRA:             return "PPCISD::SRA";
1070   case PPCISD::SHL:             return "PPCISD::SHL";
1071   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1072   case PPCISD::CALL:            return "PPCISD::CALL";
1073   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1074   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1075   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1076   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1077   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1078   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1079   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1080   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1081   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1082   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1083   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1084   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1085   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1086   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1087   case PPCISD::ANDIo_1_EQ_BIT:  return "PPCISD::ANDIo_1_EQ_BIT";
1088   case PPCISD::ANDIo_1_GT_BIT:  return "PPCISD::ANDIo_1_GT_BIT";
1089   case PPCISD::VCMP:            return "PPCISD::VCMP";
1090   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1091   case PPCISD::LBRX:            return "PPCISD::LBRX";
1092   case PPCISD::STBRX:           return "PPCISD::STBRX";
1093   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1094   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1095   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1096   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1097   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1098   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1099   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1100   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1101   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1102   case PPCISD::BDZ:             return "PPCISD::BDZ";
1103   case PPCISD::MFFS:            return "PPCISD::MFFS";
1104   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1105   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1106   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1107   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1108   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1109   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1110   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1111   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1112   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1113   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1114   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1115   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1116   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1117   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1118   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1119   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1120   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1121   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1122   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1123   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1124   case PPCISD::SC:              return "PPCISD::SC";
1125   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1126   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1127   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1128   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1129   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1130   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1131   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1132   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1133   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1134   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1135   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1136   }
1137   return nullptr;
1138 }
1139 
1140 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1141                                           EVT VT) const {
1142   if (!VT.isVector())
1143     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1144 
1145   if (Subtarget.hasQPX())
1146     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1147 
1148   return VT.changeVectorElementTypeToInteger();
1149 }
1150 
1151 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1152   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1153   return true;
1154 }
1155 
1156 //===----------------------------------------------------------------------===//
1157 // Node matching predicates, for use by the tblgen matching code.
1158 //===----------------------------------------------------------------------===//
1159 
1160 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1161 static bool isFloatingPointZero(SDValue Op) {
1162   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1163     return CFP->getValueAPF().isZero();
1164   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1165     // Maybe this has already been legalized into the constant pool?
1166     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1167       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1168         return CFP->getValueAPF().isZero();
1169   }
1170   return false;
1171 }
1172 
1173 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1174 /// true if Op is undef or if it matches the specified value.
1175 static bool isConstantOrUndef(int Op, int Val) {
1176   return Op < 0 || Op == Val;
1177 }
1178 
1179 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1180 /// VPKUHUM instruction.
1181 /// The ShuffleKind distinguishes between big-endian operations with
1182 /// two different inputs (0), either-endian operations with two identical
1183 /// inputs (1), and little-endian operations with two different inputs (2).
1184 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1185 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1186                                SelectionDAG &DAG) {
1187   bool IsLE = DAG.getDataLayout().isLittleEndian();
1188   if (ShuffleKind == 0) {
1189     if (IsLE)
1190       return false;
1191     for (unsigned i = 0; i != 16; ++i)
1192       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1193         return false;
1194   } else if (ShuffleKind == 2) {
1195     if (!IsLE)
1196       return false;
1197     for (unsigned i = 0; i != 16; ++i)
1198       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1199         return false;
1200   } else if (ShuffleKind == 1) {
1201     unsigned j = IsLE ? 0 : 1;
1202     for (unsigned i = 0; i != 8; ++i)
1203       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1204           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1205         return false;
1206   }
1207   return true;
1208 }
1209 
1210 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1211 /// VPKUWUM instruction.
1212 /// The ShuffleKind distinguishes between big-endian operations with
1213 /// two different inputs (0), either-endian operations with two identical
1214 /// inputs (1), and little-endian operations with two different inputs (2).
1215 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1216 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1217                                SelectionDAG &DAG) {
1218   bool IsLE = DAG.getDataLayout().isLittleEndian();
1219   if (ShuffleKind == 0) {
1220     if (IsLE)
1221       return false;
1222     for (unsigned i = 0; i != 16; i += 2)
1223       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1224           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1225         return false;
1226   } else if (ShuffleKind == 2) {
1227     if (!IsLE)
1228       return false;
1229     for (unsigned i = 0; i != 16; i += 2)
1230       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1231           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1232         return false;
1233   } else if (ShuffleKind == 1) {
1234     unsigned j = IsLE ? 0 : 2;
1235     for (unsigned i = 0; i != 8; i += 2)
1236       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1237           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1238           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1239           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1240         return false;
1241   }
1242   return true;
1243 }
1244 
1245 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1246 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1247 /// current subtarget.
1248 ///
1249 /// The ShuffleKind distinguishes between big-endian operations with
1250 /// two different inputs (0), either-endian operations with two identical
1251 /// inputs (1), and little-endian operations with two different inputs (2).
1252 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1253 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1254                                SelectionDAG &DAG) {
1255   const PPCSubtarget& Subtarget =
1256     static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1257   if (!Subtarget.hasP8Vector())
1258     return false;
1259 
1260   bool IsLE = DAG.getDataLayout().isLittleEndian();
1261   if (ShuffleKind == 0) {
1262     if (IsLE)
1263       return false;
1264     for (unsigned i = 0; i != 16; i += 4)
1265       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1266           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1267           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1268           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1269         return false;
1270   } else if (ShuffleKind == 2) {
1271     if (!IsLE)
1272       return false;
1273     for (unsigned i = 0; i != 16; i += 4)
1274       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1275           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1276           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1277           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1278         return false;
1279   } else if (ShuffleKind == 1) {
1280     unsigned j = IsLE ? 0 : 4;
1281     for (unsigned i = 0; i != 8; i += 4)
1282       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1283           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1284           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1285           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1286           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1287           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1288           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1289           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1290         return false;
1291   }
1292   return true;
1293 }
1294 
1295 /// isVMerge - Common function, used to match vmrg* shuffles.
1296 ///
1297 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1298                      unsigned LHSStart, unsigned RHSStart) {
1299   if (N->getValueType(0) != MVT::v16i8)
1300     return false;
1301   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1302          "Unsupported merge size!");
1303 
1304   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1305     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1306       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1307                              LHSStart+j+i*UnitSize) ||
1308           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1309                              RHSStart+j+i*UnitSize))
1310         return false;
1311     }
1312   return true;
1313 }
1314 
1315 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1316 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1317 /// The ShuffleKind distinguishes between big-endian merges with two
1318 /// different inputs (0), either-endian merges with two identical inputs (1),
1319 /// and little-endian merges with two different inputs (2).  For the latter,
1320 /// the input operands are swapped (see PPCInstrAltivec.td).
1321 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1322                              unsigned ShuffleKind, SelectionDAG &DAG) {
1323   if (DAG.getDataLayout().isLittleEndian()) {
1324     if (ShuffleKind == 1) // unary
1325       return isVMerge(N, UnitSize, 0, 0);
1326     else if (ShuffleKind == 2) // swapped
1327       return isVMerge(N, UnitSize, 0, 16);
1328     else
1329       return false;
1330   } else {
1331     if (ShuffleKind == 1) // unary
1332       return isVMerge(N, UnitSize, 8, 8);
1333     else if (ShuffleKind == 0) // normal
1334       return isVMerge(N, UnitSize, 8, 24);
1335     else
1336       return false;
1337   }
1338 }
1339 
1340 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1341 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1342 /// The ShuffleKind distinguishes between big-endian merges with two
1343 /// different inputs (0), either-endian merges with two identical inputs (1),
1344 /// and little-endian merges with two different inputs (2).  For the latter,
1345 /// the input operands are swapped (see PPCInstrAltivec.td).
1346 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1347                              unsigned ShuffleKind, SelectionDAG &DAG) {
1348   if (DAG.getDataLayout().isLittleEndian()) {
1349     if (ShuffleKind == 1) // unary
1350       return isVMerge(N, UnitSize, 8, 8);
1351     else if (ShuffleKind == 2) // swapped
1352       return isVMerge(N, UnitSize, 8, 24);
1353     else
1354       return false;
1355   } else {
1356     if (ShuffleKind == 1) // unary
1357       return isVMerge(N, UnitSize, 0, 0);
1358     else if (ShuffleKind == 0) // normal
1359       return isVMerge(N, UnitSize, 0, 16);
1360     else
1361       return false;
1362   }
1363 }
1364 
1365 /**
1366  * \brief Common function used to match vmrgew and vmrgow shuffles
1367  *
1368  * The indexOffset determines whether to look for even or odd words in
1369  * the shuffle mask. This is based on the of the endianness of the target
1370  * machine.
1371  *   - Little Endian:
1372  *     - Use offset of 0 to check for odd elements
1373  *     - Use offset of 4 to check for even elements
1374  *   - Big Endian:
1375  *     - Use offset of 0 to check for even elements
1376  *     - Use offset of 4 to check for odd elements
1377  * A detailed description of the vector element ordering for little endian and
1378  * big endian can be found at
1379  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1380  * Targeting your applications - what little endian and big endian IBM XL C/C++
1381  * compiler differences mean to you
1382  *
1383  * The mask to the shuffle vector instruction specifies the indices of the
1384  * elements from the two input vectors to place in the result. The elements are
1385  * numbered in array-access order, starting with the first vector. These vectors
1386  * are always of type v16i8, thus each vector will contain 16 elements of size
1387  * 8. More info on the shuffle vector can be found in the
1388  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1389  * Language Reference.
1390  *
1391  * The RHSStartValue indicates whether the same input vectors are used (unary)
1392  * or two different input vectors are used, based on the following:
1393  *   - If the instruction uses the same vector for both inputs, the range of the
1394  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1395  *     be 0.
1396  *   - If the instruction has two different vectors then the range of the
1397  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1398  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1399  *     to 31 specify elements in the second vector).
1400  *
1401  * \param[in] N The shuffle vector SD Node to analyze
1402  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1403  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1404  * vector to the shuffle_vector instruction
1405  * \return true iff this shuffle vector represents an even or odd word merge
1406  */
1407 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1408                      unsigned RHSStartValue) {
1409   if (N->getValueType(0) != MVT::v16i8)
1410     return false;
1411 
1412   for (unsigned i = 0; i < 2; ++i)
1413     for (unsigned j = 0; j < 4; ++j)
1414       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1415                              i*RHSStartValue+j+IndexOffset) ||
1416           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1417                              i*RHSStartValue+j+IndexOffset+8))
1418         return false;
1419   return true;
1420 }
1421 
1422 /**
1423  * \brief Determine if the specified shuffle mask is suitable for the vmrgew or
1424  * vmrgow instructions.
1425  *
1426  * \param[in] N The shuffle vector SD Node to analyze
1427  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1428  * \param[in] ShuffleKind Identify the type of merge:
1429  *   - 0 = big-endian merge with two different inputs;
1430  *   - 1 = either-endian merge with two identical inputs;
1431  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1432  *     little-endian merges).
1433  * \param[in] DAG The current SelectionDAG
1434  * \return true iff this shuffle mask
1435  */
1436 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1437                               unsigned ShuffleKind, SelectionDAG &DAG) {
1438   if (DAG.getDataLayout().isLittleEndian()) {
1439     unsigned indexOffset = CheckEven ? 4 : 0;
1440     if (ShuffleKind == 1) // Unary
1441       return isVMerge(N, indexOffset, 0);
1442     else if (ShuffleKind == 2) // swapped
1443       return isVMerge(N, indexOffset, 16);
1444     else
1445       return false;
1446   }
1447   else {
1448     unsigned indexOffset = CheckEven ? 0 : 4;
1449     if (ShuffleKind == 1) // Unary
1450       return isVMerge(N, indexOffset, 0);
1451     else if (ShuffleKind == 0) // Normal
1452       return isVMerge(N, indexOffset, 16);
1453     else
1454       return false;
1455   }
1456   return false;
1457 }
1458 
1459 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1460 /// amount, otherwise return -1.
1461 /// The ShuffleKind distinguishes between big-endian operations with two
1462 /// different inputs (0), either-endian operations with two identical inputs
1463 /// (1), and little-endian operations with two different inputs (2).  For the
1464 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1465 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1466                              SelectionDAG &DAG) {
1467   if (N->getValueType(0) != MVT::v16i8)
1468     return -1;
1469 
1470   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1471 
1472   // Find the first non-undef value in the shuffle mask.
1473   unsigned i;
1474   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1475     /*search*/;
1476 
1477   if (i == 16) return -1;  // all undef.
1478 
1479   // Otherwise, check to see if the rest of the elements are consecutively
1480   // numbered from this value.
1481   unsigned ShiftAmt = SVOp->getMaskElt(i);
1482   if (ShiftAmt < i) return -1;
1483 
1484   ShiftAmt -= i;
1485   bool isLE = DAG.getDataLayout().isLittleEndian();
1486 
1487   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1488     // Check the rest of the elements to see if they are consecutive.
1489     for (++i; i != 16; ++i)
1490       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1491         return -1;
1492   } else if (ShuffleKind == 1) {
1493     // Check the rest of the elements to see if they are consecutive.
1494     for (++i; i != 16; ++i)
1495       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1496         return -1;
1497   } else
1498     return -1;
1499 
1500   if (isLE)
1501     ShiftAmt = 16 - ShiftAmt;
1502 
1503   return ShiftAmt;
1504 }
1505 
1506 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1507 /// specifies a splat of a single element that is suitable for input to
1508 /// VSPLTB/VSPLTH/VSPLTW.
1509 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1510   assert(N->getValueType(0) == MVT::v16i8 &&
1511          (EltSize == 1 || EltSize == 2 || EltSize == 4));
1512 
1513   // The consecutive indices need to specify an element, not part of two
1514   // different elements.  So abandon ship early if this isn't the case.
1515   if (N->getMaskElt(0) % EltSize != 0)
1516     return false;
1517 
1518   // This is a splat operation if each element of the permute is the same, and
1519   // if the value doesn't reference the second vector.
1520   unsigned ElementBase = N->getMaskElt(0);
1521 
1522   // FIXME: Handle UNDEF elements too!
1523   if (ElementBase >= 16)
1524     return false;
1525 
1526   // Check that the indices are consecutive, in the case of a multi-byte element
1527   // splatted with a v16i8 mask.
1528   for (unsigned i = 1; i != EltSize; ++i)
1529     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1530       return false;
1531 
1532   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1533     if (N->getMaskElt(i) < 0) continue;
1534     for (unsigned j = 0; j != EltSize; ++j)
1535       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1536         return false;
1537   }
1538   return true;
1539 }
1540 
1541 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1542                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1543 
1544   // Check that the mask is shuffling words
1545   for (unsigned i = 0; i < 4; ++i) {
1546     unsigned B0 = N->getMaskElt(i*4);
1547     unsigned B1 = N->getMaskElt(i*4+1);
1548     unsigned B2 = N->getMaskElt(i*4+2);
1549     unsigned B3 = N->getMaskElt(i*4+3);
1550     if (B0 % 4)
1551       return false;
1552     if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1)
1553       return false;
1554   }
1555 
1556   // Now we look at mask elements 0,4,8,12
1557   unsigned M0 = N->getMaskElt(0) / 4;
1558   unsigned M1 = N->getMaskElt(4) / 4;
1559   unsigned M2 = N->getMaskElt(8) / 4;
1560   unsigned M3 = N->getMaskElt(12) / 4;
1561   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1562   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1563 
1564   // Below, let H and L be arbitrary elements of the shuffle mask
1565   // where H is in the range [4,7] and L is in the range [0,3].
1566   // H, 1, 2, 3 or L, 5, 6, 7
1567   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1568       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1569     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1570     InsertAtByte = IsLE ? 12 : 0;
1571     Swap = M0 < 4;
1572     return true;
1573   }
1574   // 0, H, 2, 3 or 4, L, 6, 7
1575   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1576       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1577     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1578     InsertAtByte = IsLE ? 8 : 4;
1579     Swap = M1 < 4;
1580     return true;
1581   }
1582   // 0, 1, H, 3 or 4, 5, L, 7
1583   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1584       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1585     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1586     InsertAtByte = IsLE ? 4 : 8;
1587     Swap = M2 < 4;
1588     return true;
1589   }
1590   // 0, 1, 2, H or 4, 5, 6, L
1591   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1592       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1593     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1594     InsertAtByte = IsLE ? 0 : 12;
1595     Swap = M3 < 4;
1596     return true;
1597   }
1598 
1599   // If both vector operands for the shuffle are the same vector, the mask will
1600   // contain only elements from the first one and the second one will be undef.
1601   if (N->getOperand(1).isUndef()) {
1602     ShiftElts = 0;
1603     Swap = true;
1604     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1605     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1606       InsertAtByte = IsLE ? 12 : 0;
1607       return true;
1608     }
1609     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1610       InsertAtByte = IsLE ? 8 : 4;
1611       return true;
1612     }
1613     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1614       InsertAtByte = IsLE ? 4 : 8;
1615       return true;
1616     }
1617     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1618       InsertAtByte = IsLE ? 0 : 12;
1619       return true;
1620     }
1621   }
1622 
1623   return false;
1624 }
1625 
1626 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
1627 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
1628 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
1629                                 SelectionDAG &DAG) {
1630   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1631   assert(isSplatShuffleMask(SVOp, EltSize));
1632   if (DAG.getDataLayout().isLittleEndian())
1633     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
1634   else
1635     return SVOp->getMaskElt(0) / EltSize;
1636 }
1637 
1638 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
1639 /// by using a vspltis[bhw] instruction of the specified element size, return
1640 /// the constant being splatted.  The ByteSize field indicates the number of
1641 /// bytes of each element [124] -> [bhw].
1642 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
1643   SDValue OpVal(nullptr, 0);
1644 
1645   // If ByteSize of the splat is bigger than the element size of the
1646   // build_vector, then we have a case where we are checking for a splat where
1647   // multiple elements of the buildvector are folded together into a single
1648   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
1649   unsigned EltSize = 16/N->getNumOperands();
1650   if (EltSize < ByteSize) {
1651     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
1652     SDValue UniquedVals[4];
1653     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
1654 
1655     // See if all of the elements in the buildvector agree across.
1656     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1657       if (N->getOperand(i).isUndef()) continue;
1658       // If the element isn't a constant, bail fully out.
1659       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
1660 
1661 
1662       if (!UniquedVals[i&(Multiple-1)].getNode())
1663         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
1664       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
1665         return SDValue();  // no match.
1666     }
1667 
1668     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
1669     // either constant or undef values that are identical for each chunk.  See
1670     // if these chunks can form into a larger vspltis*.
1671 
1672     // Check to see if all of the leading entries are either 0 or -1.  If
1673     // neither, then this won't fit into the immediate field.
1674     bool LeadingZero = true;
1675     bool LeadingOnes = true;
1676     for (unsigned i = 0; i != Multiple-1; ++i) {
1677       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
1678 
1679       LeadingZero &= isNullConstant(UniquedVals[i]);
1680       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
1681     }
1682     // Finally, check the least significant entry.
1683     if (LeadingZero) {
1684       if (!UniquedVals[Multiple-1].getNode())
1685         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
1686       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
1687       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
1688         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
1689     }
1690     if (LeadingOnes) {
1691       if (!UniquedVals[Multiple-1].getNode())
1692         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
1693       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
1694       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
1695         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
1696     }
1697 
1698     return SDValue();
1699   }
1700 
1701   // Check to see if this buildvec has a single non-undef value in its elements.
1702   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1703     if (N->getOperand(i).isUndef()) continue;
1704     if (!OpVal.getNode())
1705       OpVal = N->getOperand(i);
1706     else if (OpVal != N->getOperand(i))
1707       return SDValue();
1708   }
1709 
1710   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
1711 
1712   unsigned ValSizeInBytes = EltSize;
1713   uint64_t Value = 0;
1714   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1715     Value = CN->getZExtValue();
1716   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
1717     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
1718     Value = FloatToBits(CN->getValueAPF().convertToFloat());
1719   }
1720 
1721   // If the splat value is larger than the element value, then we can never do
1722   // this splat.  The only case that we could fit the replicated bits into our
1723   // immediate field for would be zero, and we prefer to use vxor for it.
1724   if (ValSizeInBytes < ByteSize) return SDValue();
1725 
1726   // If the element value is larger than the splat value, check if it consists
1727   // of a repeated bit pattern of size ByteSize.
1728   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
1729     return SDValue();
1730 
1731   // Properly sign extend the value.
1732   int MaskVal = SignExtend32(Value, ByteSize * 8);
1733 
1734   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
1735   if (MaskVal == 0) return SDValue();
1736 
1737   // Finally, if this value fits in a 5 bit sext field, return it
1738   if (SignExtend32<5>(MaskVal) == MaskVal)
1739     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
1740   return SDValue();
1741 }
1742 
1743 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
1744 /// amount, otherwise return -1.
1745 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
1746   EVT VT = N->getValueType(0);
1747   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
1748     return -1;
1749 
1750   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1751 
1752   // Find the first non-undef value in the shuffle mask.
1753   unsigned i;
1754   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
1755     /*search*/;
1756 
1757   if (i == 4) return -1;  // all undef.
1758 
1759   // Otherwise, check to see if the rest of the elements are consecutively
1760   // numbered from this value.
1761   unsigned ShiftAmt = SVOp->getMaskElt(i);
1762   if (ShiftAmt < i) return -1;
1763   ShiftAmt -= i;
1764 
1765   // Check the rest of the elements to see if they are consecutive.
1766   for (++i; i != 4; ++i)
1767     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1768       return -1;
1769 
1770   return ShiftAmt;
1771 }
1772 
1773 //===----------------------------------------------------------------------===//
1774 //  Addressing Mode Selection
1775 //===----------------------------------------------------------------------===//
1776 
1777 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
1778 /// or 64-bit immediate, and if the value can be accurately represented as a
1779 /// sign extension from a 16-bit value.  If so, this returns true and the
1780 /// immediate.
1781 static bool isIntS16Immediate(SDNode *N, short &Imm) {
1782   if (!isa<ConstantSDNode>(N))
1783     return false;
1784 
1785   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
1786   if (N->getValueType(0) == MVT::i32)
1787     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
1788   else
1789     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
1790 }
1791 static bool isIntS16Immediate(SDValue Op, short &Imm) {
1792   return isIntS16Immediate(Op.getNode(), Imm);
1793 }
1794 
1795 /// SelectAddressRegReg - Given the specified addressed, check to see if it
1796 /// can be represented as an indexed [r+r] operation.  Returns false if it
1797 /// can be more efficiently represented with [r+imm].
1798 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
1799                                             SDValue &Index,
1800                                             SelectionDAG &DAG) const {
1801   short imm = 0;
1802   if (N.getOpcode() == ISD::ADD) {
1803     if (isIntS16Immediate(N.getOperand(1), imm))
1804       return false;    // r+i
1805     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
1806       return false;    // r+i
1807 
1808     Base = N.getOperand(0);
1809     Index = N.getOperand(1);
1810     return true;
1811   } else if (N.getOpcode() == ISD::OR) {
1812     if (isIntS16Immediate(N.getOperand(1), imm))
1813       return false;    // r+i can fold it if we can.
1814 
1815     // If this is an or of disjoint bitfields, we can codegen this as an add
1816     // (for better address arithmetic) if the LHS and RHS of the OR are provably
1817     // disjoint.
1818     APInt LHSKnownZero, LHSKnownOne;
1819     APInt RHSKnownZero, RHSKnownOne;
1820     DAG.computeKnownBits(N.getOperand(0),
1821                          LHSKnownZero, LHSKnownOne);
1822 
1823     if (LHSKnownZero.getBoolValue()) {
1824       DAG.computeKnownBits(N.getOperand(1),
1825                            RHSKnownZero, RHSKnownOne);
1826       // If all of the bits are known zero on the LHS or RHS, the add won't
1827       // carry.
1828       if (~(LHSKnownZero | RHSKnownZero) == 0) {
1829         Base = N.getOperand(0);
1830         Index = N.getOperand(1);
1831         return true;
1832       }
1833     }
1834   }
1835 
1836   return false;
1837 }
1838 
1839 // If we happen to be doing an i64 load or store into a stack slot that has
1840 // less than a 4-byte alignment, then the frame-index elimination may need to
1841 // use an indexed load or store instruction (because the offset may not be a
1842 // multiple of 4). The extra register needed to hold the offset comes from the
1843 // register scavenger, and it is possible that the scavenger will need to use
1844 // an emergency spill slot. As a result, we need to make sure that a spill slot
1845 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
1846 // stack slot.
1847 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
1848   // FIXME: This does not handle the LWA case.
1849   if (VT != MVT::i64)
1850     return;
1851 
1852   // NOTE: We'll exclude negative FIs here, which come from argument
1853   // lowering, because there are no known test cases triggering this problem
1854   // using packed structures (or similar). We can remove this exclusion if
1855   // we find such a test case. The reason why this is so test-case driven is
1856   // because this entire 'fixup' is only to prevent crashes (from the
1857   // register scavenger) on not-really-valid inputs. For example, if we have:
1858   //   %a = alloca i1
1859   //   %b = bitcast i1* %a to i64*
1860   //   store i64* a, i64 b
1861   // then the store should really be marked as 'align 1', but is not. If it
1862   // were marked as 'align 1' then the indexed form would have been
1863   // instruction-selected initially, and the problem this 'fixup' is preventing
1864   // won't happen regardless.
1865   if (FrameIdx < 0)
1866     return;
1867 
1868   MachineFunction &MF = DAG.getMachineFunction();
1869   MachineFrameInfo &MFI = MF.getFrameInfo();
1870 
1871   unsigned Align = MFI.getObjectAlignment(FrameIdx);
1872   if (Align >= 4)
1873     return;
1874 
1875   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1876   FuncInfo->setHasNonRISpills();
1877 }
1878 
1879 /// Returns true if the address N can be represented by a base register plus
1880 /// a signed 16-bit displacement [r+imm], and if it is not better
1881 /// represented as reg+reg.  If Aligned is true, only accept displacements
1882 /// suitable for STD and friends, i.e. multiples of 4.
1883 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
1884                                             SDValue &Base,
1885                                             SelectionDAG &DAG,
1886                                             bool Aligned) const {
1887   // FIXME dl should come from parent load or store, not from address
1888   SDLoc dl(N);
1889   // If this can be more profitably realized as r+r, fail.
1890   if (SelectAddressRegReg(N, Disp, Base, DAG))
1891     return false;
1892 
1893   if (N.getOpcode() == ISD::ADD) {
1894     short imm = 0;
1895     if (isIntS16Immediate(N.getOperand(1), imm) &&
1896         (!Aligned || (imm & 3) == 0)) {
1897       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
1898       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
1899         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1900         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
1901       } else {
1902         Base = N.getOperand(0);
1903       }
1904       return true; // [r+i]
1905     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
1906       // Match LOAD (ADD (X, Lo(G))).
1907       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
1908              && "Cannot handle constant offsets yet!");
1909       Disp = N.getOperand(1).getOperand(0);  // The global address.
1910       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
1911              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
1912              Disp.getOpcode() == ISD::TargetConstantPool ||
1913              Disp.getOpcode() == ISD::TargetJumpTable);
1914       Base = N.getOperand(0);
1915       return true;  // [&g+r]
1916     }
1917   } else if (N.getOpcode() == ISD::OR) {
1918     short imm = 0;
1919     if (isIntS16Immediate(N.getOperand(1), imm) &&
1920         (!Aligned || (imm & 3) == 0)) {
1921       // If this is an or of disjoint bitfields, we can codegen this as an add
1922       // (for better address arithmetic) if the LHS and RHS of the OR are
1923       // provably disjoint.
1924       APInt LHSKnownZero, LHSKnownOne;
1925       DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
1926 
1927       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
1928         // If all of the bits are known zero on the LHS or RHS, the add won't
1929         // carry.
1930         if (FrameIndexSDNode *FI =
1931               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
1932           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1933           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
1934         } else {
1935           Base = N.getOperand(0);
1936         }
1937         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
1938         return true;
1939       }
1940     }
1941   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1942     // Loading from a constant address.
1943 
1944     // If this address fits entirely in a 16-bit sext immediate field, codegen
1945     // this as "d, 0"
1946     short Imm;
1947     if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
1948       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
1949       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
1950                              CN->getValueType(0));
1951       return true;
1952     }
1953 
1954     // Handle 32-bit sext immediates with LIS + addr mode.
1955     if ((CN->getValueType(0) == MVT::i32 ||
1956          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
1957         (!Aligned || (CN->getZExtValue() & 3) == 0)) {
1958       int Addr = (int)CN->getZExtValue();
1959 
1960       // Otherwise, break this down into an LIS + disp.
1961       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
1962 
1963       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
1964                                    MVT::i32);
1965       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
1966       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
1967       return true;
1968     }
1969   }
1970 
1971   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
1972   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
1973     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1974     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
1975   } else
1976     Base = N;
1977   return true;      // [r+0]
1978 }
1979 
1980 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
1981 /// represented as an indexed [r+r] operation.
1982 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
1983                                                 SDValue &Index,
1984                                                 SelectionDAG &DAG) const {
1985   // Check to see if we can easily represent this as an [r+r] address.  This
1986   // will fail if it thinks that the address is more profitably represented as
1987   // reg+imm, e.g. where imm = 0.
1988   if (SelectAddressRegReg(N, Base, Index, DAG))
1989     return true;
1990 
1991   // If the operand is an addition, always emit this as [r+r], since this is
1992   // better (for code size, and execution, as the memop does the add for free)
1993   // than emitting an explicit add.
1994   if (N.getOpcode() == ISD::ADD) {
1995     Base = N.getOperand(0);
1996     Index = N.getOperand(1);
1997     return true;
1998   }
1999 
2000   // Otherwise, do it the hard way, using R0 as the base register.
2001   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2002                          N.getValueType());
2003   Index = N;
2004   return true;
2005 }
2006 
2007 /// getPreIndexedAddressParts - returns true by value, base pointer and
2008 /// offset pointer and addressing mode by reference if the node's address
2009 /// can be legally represented as pre-indexed load / store address.
2010 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2011                                                   SDValue &Offset,
2012                                                   ISD::MemIndexedMode &AM,
2013                                                   SelectionDAG &DAG) const {
2014   if (DisablePPCPreinc) return false;
2015 
2016   bool isLoad = true;
2017   SDValue Ptr;
2018   EVT VT;
2019   unsigned Alignment;
2020   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2021     Ptr = LD->getBasePtr();
2022     VT = LD->getMemoryVT();
2023     Alignment = LD->getAlignment();
2024   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2025     Ptr = ST->getBasePtr();
2026     VT  = ST->getMemoryVT();
2027     Alignment = ST->getAlignment();
2028     isLoad = false;
2029   } else
2030     return false;
2031 
2032   // PowerPC doesn't have preinc load/store instructions for vectors (except
2033   // for QPX, which does have preinc r+r forms).
2034   if (VT.isVector()) {
2035     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2036       return false;
2037     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2038       AM = ISD::PRE_INC;
2039       return true;
2040     }
2041   }
2042 
2043   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2044 
2045     // Common code will reject creating a pre-inc form if the base pointer
2046     // is a frame index, or if N is a store and the base pointer is either
2047     // the same as or a predecessor of the value being stored.  Check for
2048     // those situations here, and try with swapped Base/Offset instead.
2049     bool Swap = false;
2050 
2051     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2052       Swap = true;
2053     else if (!isLoad) {
2054       SDValue Val = cast<StoreSDNode>(N)->getValue();
2055       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2056         Swap = true;
2057     }
2058 
2059     if (Swap)
2060       std::swap(Base, Offset);
2061 
2062     AM = ISD::PRE_INC;
2063     return true;
2064   }
2065 
2066   // LDU/STU can only handle immediates that are a multiple of 4.
2067   if (VT != MVT::i64) {
2068     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false))
2069       return false;
2070   } else {
2071     // LDU/STU need an address with at least 4-byte alignment.
2072     if (Alignment < 4)
2073       return false;
2074 
2075     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true))
2076       return false;
2077   }
2078 
2079   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2080     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2081     // sext i32 to i64 when addr mode is r+i.
2082     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2083         LD->getExtensionType() == ISD::SEXTLOAD &&
2084         isa<ConstantSDNode>(Offset))
2085       return false;
2086   }
2087 
2088   AM = ISD::PRE_INC;
2089   return true;
2090 }
2091 
2092 //===----------------------------------------------------------------------===//
2093 //  LowerOperation implementation
2094 //===----------------------------------------------------------------------===//
2095 
2096 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2097 /// and LoOpFlags to the target MO flags.
2098 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2099                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2100                                const GlobalValue *GV = nullptr) {
2101   HiOpFlags = PPCII::MO_HA;
2102   LoOpFlags = PPCII::MO_LO;
2103 
2104   // Don't use the pic base if not in PIC relocation model.
2105   if (IsPIC) {
2106     HiOpFlags |= PPCII::MO_PIC_FLAG;
2107     LoOpFlags |= PPCII::MO_PIC_FLAG;
2108   }
2109 
2110   // If this is a reference to a global value that requires a non-lazy-ptr, make
2111   // sure that instruction lowering adds it.
2112   if (GV && Subtarget.hasLazyResolverStub(GV)) {
2113     HiOpFlags |= PPCII::MO_NLP_FLAG;
2114     LoOpFlags |= PPCII::MO_NLP_FLAG;
2115 
2116     if (GV->hasHiddenVisibility()) {
2117       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2118       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2119     }
2120   }
2121 }
2122 
2123 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2124                              SelectionDAG &DAG) {
2125   SDLoc DL(HiPart);
2126   EVT PtrVT = HiPart.getValueType();
2127   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2128 
2129   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2130   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2131 
2132   // With PIC, the first instruction is actually "GR+hi(&G)".
2133   if (isPIC)
2134     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2135                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2136 
2137   // Generate non-pic code that has direct accesses to the constant pool.
2138   // The address of the global is just (hi(&g)+lo(&g)).
2139   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2140 }
2141 
2142 static void setUsesTOCBasePtr(MachineFunction &MF) {
2143   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2144   FuncInfo->setUsesTOCBasePtr();
2145 }
2146 
2147 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2148   setUsesTOCBasePtr(DAG.getMachineFunction());
2149 }
2150 
2151 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit,
2152                            SDValue GA) {
2153   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2154   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
2155                 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2156 
2157   SDValue Ops[] = { GA, Reg };
2158   return DAG.getMemIntrinsicNode(
2159       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2160       MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true,
2161       false, 0);
2162 }
2163 
2164 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2165                                              SelectionDAG &DAG) const {
2166   EVT PtrVT = Op.getValueType();
2167   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2168   const Constant *C = CP->getConstVal();
2169 
2170   // 64-bit SVR4 ABI code is always position-independent.
2171   // The actual address of the GlobalValue is stored in the TOC.
2172   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2173     setUsesTOCBasePtr(DAG);
2174     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2175     return getTOCEntry(DAG, SDLoc(CP), true, GA);
2176   }
2177 
2178   unsigned MOHiFlag, MOLoFlag;
2179   bool IsPIC = isPositionIndependent();
2180   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2181 
2182   if (IsPIC && Subtarget.isSVR4ABI()) {
2183     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2184                                            PPCII::MO_PIC_FLAG);
2185     return getTOCEntry(DAG, SDLoc(CP), false, GA);
2186   }
2187 
2188   SDValue CPIHi =
2189     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2190   SDValue CPILo =
2191     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2192   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2193 }
2194 
2195 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2196   EVT PtrVT = Op.getValueType();
2197   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2198 
2199   // 64-bit SVR4 ABI code is always position-independent.
2200   // The actual address of the GlobalValue is stored in the TOC.
2201   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2202     setUsesTOCBasePtr(DAG);
2203     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2204     return getTOCEntry(DAG, SDLoc(JT), true, GA);
2205   }
2206 
2207   unsigned MOHiFlag, MOLoFlag;
2208   bool IsPIC = isPositionIndependent();
2209   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2210 
2211   if (IsPIC && Subtarget.isSVR4ABI()) {
2212     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2213                                         PPCII::MO_PIC_FLAG);
2214     return getTOCEntry(DAG, SDLoc(GA), false, GA);
2215   }
2216 
2217   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2218   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2219   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2220 }
2221 
2222 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2223                                              SelectionDAG &DAG) const {
2224   EVT PtrVT = Op.getValueType();
2225   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2226   const BlockAddress *BA = BASDN->getBlockAddress();
2227 
2228   // 64-bit SVR4 ABI code is always position-independent.
2229   // The actual BlockAddress is stored in the TOC.
2230   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2231     setUsesTOCBasePtr(DAG);
2232     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2233     return getTOCEntry(DAG, SDLoc(BASDN), true, GA);
2234   }
2235 
2236   unsigned MOHiFlag, MOLoFlag;
2237   bool IsPIC = isPositionIndependent();
2238   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2239   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2240   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2241   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2242 }
2243 
2244 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2245                                               SelectionDAG &DAG) const {
2246 
2247   // FIXME: TLS addresses currently use medium model code sequences,
2248   // which is the most useful form.  Eventually support for small and
2249   // large models could be added if users need it, at the cost of
2250   // additional complexity.
2251   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2252   if (DAG.getTarget().Options.EmulatedTLS)
2253     return LowerToTLSEmulatedModel(GA, DAG);
2254 
2255   SDLoc dl(GA);
2256   const GlobalValue *GV = GA->getGlobal();
2257   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2258   bool is64bit = Subtarget.isPPC64();
2259   const Module *M = DAG.getMachineFunction().getFunction()->getParent();
2260   PICLevel::Level picLevel = M->getPICLevel();
2261 
2262   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
2263 
2264   if (Model == TLSModel::LocalExec) {
2265     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2266                                                PPCII::MO_TPREL_HA);
2267     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2268                                                PPCII::MO_TPREL_LO);
2269     SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
2270                                      is64bit ? MVT::i64 : MVT::i32);
2271     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2272     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2273   }
2274 
2275   if (Model == TLSModel::InitialExec) {
2276     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2277     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2278                                                 PPCII::MO_TLS);
2279     SDValue GOTPtr;
2280     if (is64bit) {
2281       setUsesTOCBasePtr(DAG);
2282       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2283       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2284                            PtrVT, GOTReg, TGA);
2285     } else
2286       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2287     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2288                                    PtrVT, TGA, GOTPtr);
2289     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2290   }
2291 
2292   if (Model == TLSModel::GeneralDynamic) {
2293     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2294     SDValue GOTPtr;
2295     if (is64bit) {
2296       setUsesTOCBasePtr(DAG);
2297       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2298       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2299                                    GOTReg, TGA);
2300     } else {
2301       if (picLevel == PICLevel::SmallPIC)
2302         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2303       else
2304         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2305     }
2306     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2307                        GOTPtr, TGA, TGA);
2308   }
2309 
2310   if (Model == TLSModel::LocalDynamic) {
2311     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2312     SDValue GOTPtr;
2313     if (is64bit) {
2314       setUsesTOCBasePtr(DAG);
2315       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2316       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2317                            GOTReg, TGA);
2318     } else {
2319       if (picLevel == PICLevel::SmallPIC)
2320         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2321       else
2322         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2323     }
2324     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2325                                   PtrVT, GOTPtr, TGA, TGA);
2326     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2327                                       PtrVT, TLSAddr, TGA);
2328     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2329   }
2330 
2331   llvm_unreachable("Unknown TLS model!");
2332 }
2333 
2334 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2335                                               SelectionDAG &DAG) const {
2336   EVT PtrVT = Op.getValueType();
2337   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2338   SDLoc DL(GSDN);
2339   const GlobalValue *GV = GSDN->getGlobal();
2340 
2341   // 64-bit SVR4 ABI code is always position-independent.
2342   // The actual address of the GlobalValue is stored in the TOC.
2343   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2344     setUsesTOCBasePtr(DAG);
2345     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2346     return getTOCEntry(DAG, DL, true, GA);
2347   }
2348 
2349   unsigned MOHiFlag, MOLoFlag;
2350   bool IsPIC = isPositionIndependent();
2351   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2352 
2353   if (IsPIC && Subtarget.isSVR4ABI()) {
2354     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
2355                                             GSDN->getOffset(),
2356                                             PPCII::MO_PIC_FLAG);
2357     return getTOCEntry(DAG, DL, false, GA);
2358   }
2359 
2360   SDValue GAHi =
2361     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
2362   SDValue GALo =
2363     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
2364 
2365   SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
2366 
2367   // If the global reference is actually to a non-lazy-pointer, we have to do an
2368   // extra load to get the address of the global.
2369   if (MOHiFlag & PPCII::MO_NLP_FLAG)
2370     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2371   return Ptr;
2372 }
2373 
2374 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2375   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2376   SDLoc dl(Op);
2377 
2378   if (Op.getValueType() == MVT::v2i64) {
2379     // When the operands themselves are v2i64 values, we need to do something
2380     // special because VSX has no underlying comparison operations for these.
2381     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
2382       // Equality can be handled by casting to the legal type for Altivec
2383       // comparisons, everything else needs to be expanded.
2384       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
2385         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
2386                  DAG.getSetCC(dl, MVT::v4i32,
2387                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
2388                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
2389                    CC));
2390       }
2391 
2392       return SDValue();
2393     }
2394 
2395     // We handle most of these in the usual way.
2396     return Op;
2397   }
2398 
2399   // If we're comparing for equality to zero, expose the fact that this is
2400   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
2401   // fold the new nodes.
2402   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
2403     return V;
2404 
2405   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2406     // Leave comparisons against 0 and -1 alone for now, since they're usually
2407     // optimized.  FIXME: revisit this when we can custom lower all setcc
2408     // optimizations.
2409     if (C->isAllOnesValue() || C->isNullValue())
2410       return SDValue();
2411   }
2412 
2413   // If we have an integer seteq/setne, turn it into a compare against zero
2414   // by xor'ing the rhs with the lhs, which is faster than setting a
2415   // condition register, reading it back out, and masking the correct bit.  The
2416   // normal approach here uses sub to do this instead of xor.  Using xor exposes
2417   // the result to other bit-twiddling opportunities.
2418   EVT LHSVT = Op.getOperand(0).getValueType();
2419   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2420     EVT VT = Op.getValueType();
2421     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
2422                                 Op.getOperand(1));
2423     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
2424   }
2425   return SDValue();
2426 }
2427 
2428 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2429   SDNode *Node = Op.getNode();
2430   EVT VT = Node->getValueType(0);
2431   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2432   SDValue InChain = Node->getOperand(0);
2433   SDValue VAListPtr = Node->getOperand(1);
2434   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2435   SDLoc dl(Node);
2436 
2437   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
2438 
2439   // gpr_index
2440   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2441                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
2442   InChain = GprIndex.getValue(1);
2443 
2444   if (VT == MVT::i64) {
2445     // Check if GprIndex is even
2446     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
2447                                  DAG.getConstant(1, dl, MVT::i32));
2448     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
2449                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
2450     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
2451                                           DAG.getConstant(1, dl, MVT::i32));
2452     // Align GprIndex to be even if it isn't
2453     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
2454                            GprIndex);
2455   }
2456 
2457   // fpr index is 1 byte after gpr
2458   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2459                                DAG.getConstant(1, dl, MVT::i32));
2460 
2461   // fpr
2462   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2463                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
2464   InChain = FprIndex.getValue(1);
2465 
2466   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2467                                        DAG.getConstant(8, dl, MVT::i32));
2468 
2469   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2470                                         DAG.getConstant(4, dl, MVT::i32));
2471 
2472   // areas
2473   SDValue OverflowArea =
2474       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
2475   InChain = OverflowArea.getValue(1);
2476 
2477   SDValue RegSaveArea =
2478       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
2479   InChain = RegSaveArea.getValue(1);
2480 
2481   // select overflow_area if index > 8
2482   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
2483                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
2484 
2485   // adjustment constant gpr_index * 4/8
2486   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
2487                                     VT.isInteger() ? GprIndex : FprIndex,
2488                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
2489                                                     MVT::i32));
2490 
2491   // OurReg = RegSaveArea + RegConstant
2492   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
2493                                RegConstant);
2494 
2495   // Floating types are 32 bytes into RegSaveArea
2496   if (VT.isFloatingPoint())
2497     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
2498                          DAG.getConstant(32, dl, MVT::i32));
2499 
2500   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
2501   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2502                                    VT.isInteger() ? GprIndex : FprIndex,
2503                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
2504                                                    MVT::i32));
2505 
2506   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
2507                               VT.isInteger() ? VAListPtr : FprPtr,
2508                               MachinePointerInfo(SV), MVT::i8);
2509 
2510   // determine if we should load from reg_save_area or overflow_area
2511   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
2512 
2513   // increase overflow_area by 4/8 if gpr/fpr > 8
2514   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
2515                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
2516                                           dl, MVT::i32));
2517 
2518   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
2519                              OverflowAreaPlusN);
2520 
2521   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
2522                               MachinePointerInfo(), MVT::i32);
2523 
2524   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
2525 }
2526 
2527 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2528   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
2529 
2530   // We have to copy the entire va_list struct:
2531   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
2532   return DAG.getMemcpy(Op.getOperand(0), Op,
2533                        Op.getOperand(1), Op.getOperand(2),
2534                        DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
2535                        false, MachinePointerInfo(), MachinePointerInfo());
2536 }
2537 
2538 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
2539                                                   SelectionDAG &DAG) const {
2540   return Op.getOperand(0);
2541 }
2542 
2543 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
2544                                                 SelectionDAG &DAG) const {
2545   SDValue Chain = Op.getOperand(0);
2546   SDValue Trmp = Op.getOperand(1); // trampoline
2547   SDValue FPtr = Op.getOperand(2); // nested function
2548   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
2549   SDLoc dl(Op);
2550 
2551   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2552   bool isPPC64 = (PtrVT == MVT::i64);
2553   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
2554 
2555   TargetLowering::ArgListTy Args;
2556   TargetLowering::ArgListEntry Entry;
2557 
2558   Entry.Ty = IntPtrTy;
2559   Entry.Node = Trmp; Args.push_back(Entry);
2560 
2561   // TrampSize == (isPPC64 ? 48 : 40);
2562   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
2563                                isPPC64 ? MVT::i64 : MVT::i32);
2564   Args.push_back(Entry);
2565 
2566   Entry.Node = FPtr; Args.push_back(Entry);
2567   Entry.Node = Nest; Args.push_back(Entry);
2568 
2569   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
2570   TargetLowering::CallLoweringInfo CLI(DAG);
2571   CLI.setDebugLoc(dl).setChain(Chain)
2572     .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
2573                DAG.getExternalSymbol("__trampoline_setup", PtrVT),
2574                std::move(Args));
2575 
2576   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2577   return CallResult.second;
2578 }
2579 
2580 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2581   MachineFunction &MF = DAG.getMachineFunction();
2582   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2583   EVT PtrVT = getPointerTy(MF.getDataLayout());
2584 
2585   SDLoc dl(Op);
2586 
2587   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
2588     // vastart just stores the address of the VarArgsFrameIndex slot into the
2589     // memory location argument.
2590     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2591     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2592     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
2593                         MachinePointerInfo(SV));
2594   }
2595 
2596   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
2597   // We suppose the given va_list is already allocated.
2598   //
2599   // typedef struct {
2600   //  char gpr;     /* index into the array of 8 GPRs
2601   //                 * stored in the register save area
2602   //                 * gpr=0 corresponds to r3,
2603   //                 * gpr=1 to r4, etc.
2604   //                 */
2605   //  char fpr;     /* index into the array of 8 FPRs
2606   //                 * stored in the register save area
2607   //                 * fpr=0 corresponds to f1,
2608   //                 * fpr=1 to f2, etc.
2609   //                 */
2610   //  char *overflow_arg_area;
2611   //                /* location on stack that holds
2612   //                 * the next overflow argument
2613   //                 */
2614   //  char *reg_save_area;
2615   //               /* where r3:r10 and f1:f8 (if saved)
2616   //                * are stored
2617   //                */
2618   // } va_list[1];
2619 
2620   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
2621   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
2622   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
2623                                             PtrVT);
2624   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2625                                  PtrVT);
2626 
2627   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
2628   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
2629 
2630   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
2631   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
2632 
2633   uint64_t FPROffset = 1;
2634   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
2635 
2636   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2637 
2638   // Store first byte : number of int regs
2639   SDValue firstStore =
2640       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
2641                         MachinePointerInfo(SV), MVT::i8);
2642   uint64_t nextOffset = FPROffset;
2643   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
2644                                   ConstFPROffset);
2645 
2646   // Store second byte : number of float regs
2647   SDValue secondStore =
2648       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
2649                         MachinePointerInfo(SV, nextOffset), MVT::i8);
2650   nextOffset += StackOffset;
2651   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
2652 
2653   // Store second word : arguments given on stack
2654   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
2655                                     MachinePointerInfo(SV, nextOffset));
2656   nextOffset += FrameOffset;
2657   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
2658 
2659   // Store third word : arguments given in registers
2660   return DAG.getStore(thirdStore, dl, FR, nextPtr,
2661                       MachinePointerInfo(SV, nextOffset));
2662 }
2663 
2664 #include "PPCGenCallingConv.inc"
2665 
2666 // Function whose sole purpose is to kill compiler warnings
2667 // stemming from unused functions included from PPCGenCallingConv.inc.
2668 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
2669   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
2670 }
2671 
2672 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
2673                                       CCValAssign::LocInfo &LocInfo,
2674                                       ISD::ArgFlagsTy &ArgFlags,
2675                                       CCState &State) {
2676   return true;
2677 }
2678 
2679 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
2680                                              MVT &LocVT,
2681                                              CCValAssign::LocInfo &LocInfo,
2682                                              ISD::ArgFlagsTy &ArgFlags,
2683                                              CCState &State) {
2684   static const MCPhysReg ArgRegs[] = {
2685     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
2686     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2687   };
2688   const unsigned NumArgRegs = array_lengthof(ArgRegs);
2689 
2690   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
2691 
2692   // Skip one register if the first unallocated register has an even register
2693   // number and there are still argument registers available which have not been
2694   // allocated yet. RegNum is actually an index into ArgRegs, which means we
2695   // need to skip a register if RegNum is odd.
2696   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
2697     State.AllocateReg(ArgRegs[RegNum]);
2698   }
2699 
2700   // Always return false here, as this function only makes sure that the first
2701   // unallocated register has an odd register number and does not actually
2702   // allocate a register for the current argument.
2703   return false;
2704 }
2705 
2706 bool
2707 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
2708                                                   MVT &LocVT,
2709                                                   CCValAssign::LocInfo &LocInfo,
2710                                                   ISD::ArgFlagsTy &ArgFlags,
2711                                                   CCState &State) {
2712   static const MCPhysReg ArgRegs[] = {
2713     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
2714     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2715   };
2716   const unsigned NumArgRegs = array_lengthof(ArgRegs);
2717 
2718   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
2719   int RegsLeft = NumArgRegs - RegNum;
2720 
2721   // Skip if there is not enough registers left for long double type (4 gpr regs
2722   // in soft float mode) and put long double argument on the stack.
2723   if (RegNum != NumArgRegs && RegsLeft < 4) {
2724     for (int i = 0; i < RegsLeft; i++) {
2725       State.AllocateReg(ArgRegs[RegNum + i]);
2726     }
2727   }
2728 
2729   return false;
2730 }
2731 
2732 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
2733                                                MVT &LocVT,
2734                                                CCValAssign::LocInfo &LocInfo,
2735                                                ISD::ArgFlagsTy &ArgFlags,
2736                                                CCState &State) {
2737   static const MCPhysReg ArgRegs[] = {
2738     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
2739     PPC::F8
2740   };
2741 
2742   const unsigned NumArgRegs = array_lengthof(ArgRegs);
2743 
2744   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
2745 
2746   // If there is only one Floating-point register left we need to put both f64
2747   // values of a split ppc_fp128 value on the stack.
2748   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
2749     State.AllocateReg(ArgRegs[RegNum]);
2750   }
2751 
2752   // Always return false here, as this function only makes sure that the two f64
2753   // values a ppc_fp128 value is split into are both passed in registers or both
2754   // passed on the stack and does not actually allocate a register for the
2755   // current argument.
2756   return false;
2757 }
2758 
2759 /// FPR - The set of FP registers that should be allocated for arguments,
2760 /// on Darwin.
2761 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
2762                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
2763                                 PPC::F11, PPC::F12, PPC::F13};
2764 
2765 /// QFPR - The set of QPX registers that should be allocated for arguments.
2766 static const MCPhysReg QFPR[] = {
2767     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
2768     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
2769 
2770 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
2771 /// the stack.
2772 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
2773                                        unsigned PtrByteSize) {
2774   unsigned ArgSize = ArgVT.getStoreSize();
2775   if (Flags.isByVal())
2776     ArgSize = Flags.getByValSize();
2777 
2778   // Round up to multiples of the pointer size, except for array members,
2779   // which are always packed.
2780   if (!Flags.isInConsecutiveRegs())
2781     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2782 
2783   return ArgSize;
2784 }
2785 
2786 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
2787 /// on the stack.
2788 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
2789                                             ISD::ArgFlagsTy Flags,
2790                                             unsigned PtrByteSize) {
2791   unsigned Align = PtrByteSize;
2792 
2793   // Altivec parameters are padded to a 16 byte boundary.
2794   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
2795       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
2796       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
2797       ArgVT == MVT::v1i128)
2798     Align = 16;
2799   // QPX vector types stored in double-precision are padded to a 32 byte
2800   // boundary.
2801   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
2802     Align = 32;
2803 
2804   // ByVal parameters are aligned as requested.
2805   if (Flags.isByVal()) {
2806     unsigned BVAlign = Flags.getByValAlign();
2807     if (BVAlign > PtrByteSize) {
2808       if (BVAlign % PtrByteSize != 0)
2809           llvm_unreachable(
2810             "ByVal alignment is not a multiple of the pointer size");
2811 
2812       Align = BVAlign;
2813     }
2814   }
2815 
2816   // Array members are always packed to their original alignment.
2817   if (Flags.isInConsecutiveRegs()) {
2818     // If the array member was split into multiple registers, the first
2819     // needs to be aligned to the size of the full type.  (Except for
2820     // ppcf128, which is only aligned as its f64 components.)
2821     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
2822       Align = OrigVT.getStoreSize();
2823     else
2824       Align = ArgVT.getStoreSize();
2825   }
2826 
2827   return Align;
2828 }
2829 
2830 /// CalculateStackSlotUsed - Return whether this argument will use its
2831 /// stack slot (instead of being passed in registers).  ArgOffset,
2832 /// AvailableFPRs, and AvailableVRs must hold the current argument
2833 /// position, and will be updated to account for this argument.
2834 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
2835                                    ISD::ArgFlagsTy Flags,
2836                                    unsigned PtrByteSize,
2837                                    unsigned LinkageSize,
2838                                    unsigned ParamAreaSize,
2839                                    unsigned &ArgOffset,
2840                                    unsigned &AvailableFPRs,
2841                                    unsigned &AvailableVRs, bool HasQPX) {
2842   bool UseMemory = false;
2843 
2844   // Respect alignment of argument on the stack.
2845   unsigned Align =
2846     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
2847   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
2848   // If there's no space left in the argument save area, we must
2849   // use memory (this check also catches zero-sized arguments).
2850   if (ArgOffset >= LinkageSize + ParamAreaSize)
2851     UseMemory = true;
2852 
2853   // Allocate argument on the stack.
2854   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
2855   if (Flags.isInConsecutiveRegsLast())
2856     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2857   // If we overran the argument save area, we must use memory
2858   // (this check catches arguments passed partially in memory)
2859   if (ArgOffset > LinkageSize + ParamAreaSize)
2860     UseMemory = true;
2861 
2862   // However, if the argument is actually passed in an FPR or a VR,
2863   // we don't use memory after all.
2864   if (!Flags.isByVal()) {
2865     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
2866         // QPX registers overlap with the scalar FP registers.
2867         (HasQPX && (ArgVT == MVT::v4f32 ||
2868                     ArgVT == MVT::v4f64 ||
2869                     ArgVT == MVT::v4i1)))
2870       if (AvailableFPRs > 0) {
2871         --AvailableFPRs;
2872         return false;
2873       }
2874     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
2875         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
2876         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
2877         ArgVT == MVT::v1i128)
2878       if (AvailableVRs > 0) {
2879         --AvailableVRs;
2880         return false;
2881       }
2882   }
2883 
2884   return UseMemory;
2885 }
2886 
2887 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
2888 /// ensure minimum alignment required for target.
2889 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
2890                                      unsigned NumBytes) {
2891   unsigned TargetAlign = Lowering->getStackAlignment();
2892   unsigned AlignMask = TargetAlign - 1;
2893   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
2894   return NumBytes;
2895 }
2896 
2897 SDValue PPCTargetLowering::LowerFormalArguments(
2898     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2899     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2900     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2901   if (Subtarget.isSVR4ABI()) {
2902     if (Subtarget.isPPC64())
2903       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
2904                                          dl, DAG, InVals);
2905     else
2906       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
2907                                          dl, DAG, InVals);
2908   } else {
2909     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
2910                                        dl, DAG, InVals);
2911   }
2912 }
2913 
2914 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
2915     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2916     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2917     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2918 
2919   // 32-bit SVR4 ABI Stack Frame Layout:
2920   //              +-----------------------------------+
2921   //        +-->  |            Back chain             |
2922   //        |     +-----------------------------------+
2923   //        |     | Floating-point register save area |
2924   //        |     +-----------------------------------+
2925   //        |     |    General register save area     |
2926   //        |     +-----------------------------------+
2927   //        |     |          CR save word             |
2928   //        |     +-----------------------------------+
2929   //        |     |         VRSAVE save word          |
2930   //        |     +-----------------------------------+
2931   //        |     |         Alignment padding         |
2932   //        |     +-----------------------------------+
2933   //        |     |     Vector register save area     |
2934   //        |     +-----------------------------------+
2935   //        |     |       Local variable space        |
2936   //        |     +-----------------------------------+
2937   //        |     |        Parameter list area        |
2938   //        |     +-----------------------------------+
2939   //        |     |           LR save word            |
2940   //        |     +-----------------------------------+
2941   // SP-->  +---  |            Back chain             |
2942   //              +-----------------------------------+
2943   //
2944   // Specifications:
2945   //   System V Application Binary Interface PowerPC Processor Supplement
2946   //   AltiVec Technology Programming Interface Manual
2947 
2948   MachineFunction &MF = DAG.getMachineFunction();
2949   MachineFrameInfo &MFI = MF.getFrameInfo();
2950   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2951 
2952   EVT PtrVT = getPointerTy(MF.getDataLayout());
2953   // Potential tail calls could cause overwriting of argument stack slots.
2954   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
2955                        (CallConv == CallingConv::Fast));
2956   unsigned PtrByteSize = 4;
2957 
2958   // Assign locations to all of the incoming arguments.
2959   SmallVector<CCValAssign, 16> ArgLocs;
2960   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2961                  *DAG.getContext());
2962 
2963   // Reserve space for the linkage area on the stack.
2964   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
2965   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
2966   if (useSoftFloat())
2967     CCInfo.PreAnalyzeFormalArguments(Ins);
2968 
2969   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
2970   CCInfo.clearWasPPCF128();
2971 
2972   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2973     CCValAssign &VA = ArgLocs[i];
2974 
2975     // Arguments stored in registers.
2976     if (VA.isRegLoc()) {
2977       const TargetRegisterClass *RC;
2978       EVT ValVT = VA.getValVT();
2979 
2980       switch (ValVT.getSimpleVT().SimpleTy) {
2981         default:
2982           llvm_unreachable("ValVT not supported by formal arguments Lowering");
2983         case MVT::i1:
2984         case MVT::i32:
2985           RC = &PPC::GPRCRegClass;
2986           break;
2987         case MVT::f32:
2988           if (Subtarget.hasP8Vector())
2989             RC = &PPC::VSSRCRegClass;
2990           else
2991             RC = &PPC::F4RCRegClass;
2992           break;
2993         case MVT::f64:
2994           if (Subtarget.hasVSX())
2995             RC = &PPC::VSFRCRegClass;
2996           else
2997             RC = &PPC::F8RCRegClass;
2998           break;
2999         case MVT::v16i8:
3000         case MVT::v8i16:
3001         case MVT::v4i32:
3002           RC = &PPC::VRRCRegClass;
3003           break;
3004         case MVT::v4f32:
3005           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3006           break;
3007         case MVT::v2f64:
3008         case MVT::v2i64:
3009           RC = &PPC::VRRCRegClass;
3010           break;
3011         case MVT::v4f64:
3012           RC = &PPC::QFRCRegClass;
3013           break;
3014         case MVT::v4i1:
3015           RC = &PPC::QBRCRegClass;
3016           break;
3017       }
3018 
3019       // Transform the arguments stored in physical registers into virtual ones.
3020       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3021       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3022                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
3023 
3024       if (ValVT == MVT::i1)
3025         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3026 
3027       InVals.push_back(ArgValue);
3028     } else {
3029       // Argument stored in memory.
3030       assert(VA.isMemLoc());
3031 
3032       unsigned ArgSize = VA.getLocVT().getStoreSize();
3033       int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(),
3034                                      isImmutable);
3035 
3036       // Create load nodes to retrieve arguments from the stack.
3037       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3038       InVals.push_back(
3039           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3040     }
3041   }
3042 
3043   // Assign locations to all of the incoming aggregate by value arguments.
3044   // Aggregates passed by value are stored in the local variable space of the
3045   // caller's stack frame, right above the parameter list area.
3046   SmallVector<CCValAssign, 16> ByValArgLocs;
3047   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3048                       ByValArgLocs, *DAG.getContext());
3049 
3050   // Reserve stack space for the allocations in CCInfo.
3051   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3052 
3053   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3054 
3055   // Area that is at least reserved in the caller of this function.
3056   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3057   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3058 
3059   // Set the size that is at least reserved in caller of this function.  Tail
3060   // call optimized function's reserved stack space needs to be aligned so that
3061   // taking the difference between two stack areas will result in an aligned
3062   // stack.
3063   MinReservedArea =
3064       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3065   FuncInfo->setMinReservedArea(MinReservedArea);
3066 
3067   SmallVector<SDValue, 8> MemOps;
3068 
3069   // If the function takes variable number of arguments, make a frame index for
3070   // the start of the first vararg value... for expansion of llvm.va_start.
3071   if (isVarArg) {
3072     static const MCPhysReg GPArgRegs[] = {
3073       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3074       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3075     };
3076     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3077 
3078     static const MCPhysReg FPArgRegs[] = {
3079       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3080       PPC::F8
3081     };
3082     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3083 
3084     if (useSoftFloat())
3085        NumFPArgRegs = 0;
3086 
3087     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3088     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3089 
3090     // Make room for NumGPArgRegs and NumFPArgRegs.
3091     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3092                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3093 
3094     FuncInfo->setVarArgsStackOffset(
3095       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3096                             CCInfo.getNextStackOffset(), true));
3097 
3098     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3099     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3100 
3101     // The fixed integer arguments of a variadic function are stored to the
3102     // VarArgsFrameIndex on the stack so that they may be loaded by
3103     // dereferencing the result of va_next.
3104     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3105       // Get an existing live-in vreg, or add a new one.
3106       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3107       if (!VReg)
3108         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3109 
3110       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3111       SDValue Store =
3112           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3113       MemOps.push_back(Store);
3114       // Increment the address by four for the next argument to store
3115       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3116       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3117     }
3118 
3119     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3120     // is set.
3121     // The double arguments are stored to the VarArgsFrameIndex
3122     // on the stack.
3123     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3124       // Get an existing live-in vreg, or add a new one.
3125       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3126       if (!VReg)
3127         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3128 
3129       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3130       SDValue Store =
3131           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3132       MemOps.push_back(Store);
3133       // Increment the address by eight for the next argument to store
3134       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3135                                          PtrVT);
3136       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3137     }
3138   }
3139 
3140   if (!MemOps.empty())
3141     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3142 
3143   return Chain;
3144 }
3145 
3146 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3147 // value to MVT::i64 and then truncate to the correct register size.
3148 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3149                                              EVT ObjectVT, SelectionDAG &DAG,
3150                                              SDValue ArgVal,
3151                                              const SDLoc &dl) const {
3152   if (Flags.isSExt())
3153     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3154                          DAG.getValueType(ObjectVT));
3155   else if (Flags.isZExt())
3156     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3157                          DAG.getValueType(ObjectVT));
3158 
3159   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3160 }
3161 
3162 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3163     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3164     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3165     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3166   // TODO: add description of PPC stack frame format, or at least some docs.
3167   //
3168   bool isELFv2ABI = Subtarget.isELFv2ABI();
3169   bool isLittleEndian = Subtarget.isLittleEndian();
3170   MachineFunction &MF = DAG.getMachineFunction();
3171   MachineFrameInfo &MFI = MF.getFrameInfo();
3172   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3173 
3174   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3175          "fastcc not supported on varargs functions");
3176 
3177   EVT PtrVT = getPointerTy(MF.getDataLayout());
3178   // Potential tail calls could cause overwriting of argument stack slots.
3179   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3180                        (CallConv == CallingConv::Fast));
3181   unsigned PtrByteSize = 8;
3182   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3183 
3184   static const MCPhysReg GPR[] = {
3185     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3186     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3187   };
3188   static const MCPhysReg VR[] = {
3189     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3190     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3191   };
3192 
3193   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3194   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3195   const unsigned Num_VR_Regs  = array_lengthof(VR);
3196   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3197 
3198   // Do a first pass over the arguments to determine whether the ABI
3199   // guarantees that our caller has allocated the parameter save area
3200   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3201   // in the ELFv2 ABI, it is true if this is a vararg function or if
3202   // any parameter is located in a stack slot.
3203 
3204   bool HasParameterArea = !isELFv2ABI || isVarArg;
3205   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3206   unsigned NumBytes = LinkageSize;
3207   unsigned AvailableFPRs = Num_FPR_Regs;
3208   unsigned AvailableVRs = Num_VR_Regs;
3209   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3210     if (Ins[i].Flags.isNest())
3211       continue;
3212 
3213     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3214                                PtrByteSize, LinkageSize, ParamAreaSize,
3215                                NumBytes, AvailableFPRs, AvailableVRs,
3216                                Subtarget.hasQPX()))
3217       HasParameterArea = true;
3218   }
3219 
3220   // Add DAG nodes to load the arguments or copy them out of registers.  On
3221   // entry to a function on PPC, the arguments start after the linkage area,
3222   // although the first ones are often in registers.
3223 
3224   unsigned ArgOffset = LinkageSize;
3225   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3226   unsigned &QFPR_idx = FPR_idx;
3227   SmallVector<SDValue, 8> MemOps;
3228   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
3229   unsigned CurArgIdx = 0;
3230   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3231     SDValue ArgVal;
3232     bool needsLoad = false;
3233     EVT ObjectVT = Ins[ArgNo].VT;
3234     EVT OrigVT = Ins[ArgNo].ArgVT;
3235     unsigned ObjSize = ObjectVT.getStoreSize();
3236     unsigned ArgSize = ObjSize;
3237     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3238     if (Ins[ArgNo].isOrigArg()) {
3239       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3240       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3241     }
3242     // We re-align the argument offset for each argument, except when using the
3243     // fast calling convention, when we need to make sure we do that only when
3244     // we'll actually use a stack slot.
3245     unsigned CurArgOffset, Align;
3246     auto ComputeArgOffset = [&]() {
3247       /* Respect alignment of argument on the stack.  */
3248       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3249       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3250       CurArgOffset = ArgOffset;
3251     };
3252 
3253     if (CallConv != CallingConv::Fast) {
3254       ComputeArgOffset();
3255 
3256       /* Compute GPR index associated with argument offset.  */
3257       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3258       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3259     }
3260 
3261     // FIXME the codegen can be much improved in some cases.
3262     // We do not have to keep everything in memory.
3263     if (Flags.isByVal()) {
3264       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3265 
3266       if (CallConv == CallingConv::Fast)
3267         ComputeArgOffset();
3268 
3269       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3270       ObjSize = Flags.getByValSize();
3271       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3272       // Empty aggregate parameters do not take up registers.  Examples:
3273       //   struct { } a;
3274       //   union  { } b;
3275       //   int c[0];
3276       // etc.  However, we have to provide a place-holder in InVals, so
3277       // pretend we have an 8-byte item at the current address for that
3278       // purpose.
3279       if (!ObjSize) {
3280         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3281         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3282         InVals.push_back(FIN);
3283         continue;
3284       }
3285 
3286       // Create a stack object covering all stack doublewords occupied
3287       // by the argument.  If the argument is (fully or partially) on
3288       // the stack, or if the argument is fully in registers but the
3289       // caller has allocated the parameter save anyway, we can refer
3290       // directly to the caller's stack frame.  Otherwise, create a
3291       // local copy in our own frame.
3292       int FI;
3293       if (HasParameterArea ||
3294           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3295         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3296       else
3297         FI = MFI.CreateStackObject(ArgSize, Align, false);
3298       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3299 
3300       // Handle aggregates smaller than 8 bytes.
3301       if (ObjSize < PtrByteSize) {
3302         // The value of the object is its address, which differs from the
3303         // address of the enclosing doubleword on big-endian systems.
3304         SDValue Arg = FIN;
3305         if (!isLittleEndian) {
3306           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3307           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3308         }
3309         InVals.push_back(Arg);
3310 
3311         if (GPR_idx != Num_GPR_Regs) {
3312           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3313           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3314           SDValue Store;
3315 
3316           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3317             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3318                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3319             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3320                                       MachinePointerInfo(&*FuncArg), ObjType);
3321           } else {
3322             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3323             // store the whole register as-is to the parameter save area
3324             // slot.
3325             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3326                                  MachinePointerInfo(&*FuncArg));
3327           }
3328 
3329           MemOps.push_back(Store);
3330         }
3331         // Whether we copied from a register or not, advance the offset
3332         // into the parameter save area by a full doubleword.
3333         ArgOffset += PtrByteSize;
3334         continue;
3335       }
3336 
3337       // The value of the object is its address, which is the address of
3338       // its first stack doubleword.
3339       InVals.push_back(FIN);
3340 
3341       // Store whatever pieces of the object are in registers to memory.
3342       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3343         if (GPR_idx == Num_GPR_Regs)
3344           break;
3345 
3346         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3347         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3348         SDValue Addr = FIN;
3349         if (j) {
3350           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3351           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3352         }
3353         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3354                                      MachinePointerInfo(&*FuncArg, j));
3355         MemOps.push_back(Store);
3356         ++GPR_idx;
3357       }
3358       ArgOffset += ArgSize;
3359       continue;
3360     }
3361 
3362     switch (ObjectVT.getSimpleVT().SimpleTy) {
3363     default: llvm_unreachable("Unhandled argument type!");
3364     case MVT::i1:
3365     case MVT::i32:
3366     case MVT::i64:
3367       if (Flags.isNest()) {
3368         // The 'nest' parameter, if any, is passed in R11.
3369         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3370         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3371 
3372         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3373           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3374 
3375         break;
3376       }
3377 
3378       // These can be scalar arguments or elements of an integer array type
3379       // passed directly.  Clang may use those instead of "byval" aggregate
3380       // types to avoid forcing arguments to memory unnecessarily.
3381       if (GPR_idx != Num_GPR_Regs) {
3382         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3383         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3384 
3385         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3386           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3387           // value to MVT::i64 and then truncate to the correct register size.
3388           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3389       } else {
3390         if (CallConv == CallingConv::Fast)
3391           ComputeArgOffset();
3392 
3393         needsLoad = true;
3394         ArgSize = PtrByteSize;
3395       }
3396       if (CallConv != CallingConv::Fast || needsLoad)
3397         ArgOffset += 8;
3398       break;
3399 
3400     case MVT::f32:
3401     case MVT::f64:
3402       // These can be scalar arguments or elements of a float array type
3403       // passed directly.  The latter are used to implement ELFv2 homogenous
3404       // float aggregates.
3405       if (FPR_idx != Num_FPR_Regs) {
3406         unsigned VReg;
3407 
3408         if (ObjectVT == MVT::f32)
3409           VReg = MF.addLiveIn(FPR[FPR_idx],
3410                               Subtarget.hasP8Vector()
3411                                   ? &PPC::VSSRCRegClass
3412                                   : &PPC::F4RCRegClass);
3413         else
3414           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3415                                                 ? &PPC::VSFRCRegClass
3416                                                 : &PPC::F8RCRegClass);
3417 
3418         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3419         ++FPR_idx;
3420       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
3421         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
3422         // once we support fp <-> gpr moves.
3423 
3424         // This can only ever happen in the presence of f32 array types,
3425         // since otherwise we never run out of FPRs before running out
3426         // of GPRs.
3427         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3428         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3429 
3430         if (ObjectVT == MVT::f32) {
3431           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3432             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
3433                                  DAG.getConstant(32, dl, MVT::i32));
3434           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
3435         }
3436 
3437         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
3438       } else {
3439         if (CallConv == CallingConv::Fast)
3440           ComputeArgOffset();
3441 
3442         needsLoad = true;
3443       }
3444 
3445       // When passing an array of floats, the array occupies consecutive
3446       // space in the argument area; only round up to the next doubleword
3447       // at the end of the array.  Otherwise, each float takes 8 bytes.
3448       if (CallConv != CallingConv::Fast || needsLoad) {
3449         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
3450         ArgOffset += ArgSize;
3451         if (Flags.isInConsecutiveRegsLast())
3452           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3453       }
3454       break;
3455     case MVT::v4f32:
3456     case MVT::v4i32:
3457     case MVT::v8i16:
3458     case MVT::v16i8:
3459     case MVT::v2f64:
3460     case MVT::v2i64:
3461     case MVT::v1i128:
3462       if (!Subtarget.hasQPX()) {
3463       // These can be scalar arguments or elements of a vector array type
3464       // passed directly.  The latter are used to implement ELFv2 homogenous
3465       // vector aggregates.
3466       if (VR_idx != Num_VR_Regs) {
3467         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3468         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3469         ++VR_idx;
3470       } else {
3471         if (CallConv == CallingConv::Fast)
3472           ComputeArgOffset();
3473 
3474         needsLoad = true;
3475       }
3476       if (CallConv != CallingConv::Fast || needsLoad)
3477         ArgOffset += 16;
3478       break;
3479       } // not QPX
3480 
3481       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
3482              "Invalid QPX parameter type");
3483       /* fall through */
3484 
3485     case MVT::v4f64:
3486     case MVT::v4i1:
3487       // QPX vectors are treated like their scalar floating-point subregisters
3488       // (except that they're larger).
3489       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
3490       if (QFPR_idx != Num_QFPR_Regs) {
3491         const TargetRegisterClass *RC;
3492         switch (ObjectVT.getSimpleVT().SimpleTy) {
3493         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
3494         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
3495         default:         RC = &PPC::QBRCRegClass; break;
3496         }
3497 
3498         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
3499         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3500         ++QFPR_idx;
3501       } else {
3502         if (CallConv == CallingConv::Fast)
3503           ComputeArgOffset();
3504         needsLoad = true;
3505       }
3506       if (CallConv != CallingConv::Fast || needsLoad)
3507         ArgOffset += Sz;
3508       break;
3509     }
3510 
3511     // We need to load the argument to a virtual register if we determined
3512     // above that we ran out of physical registers of the appropriate type.
3513     if (needsLoad) {
3514       if (ObjSize < ArgSize && !isLittleEndian)
3515         CurArgOffset += ArgSize - ObjSize;
3516       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
3517       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3518       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
3519     }
3520 
3521     InVals.push_back(ArgVal);
3522   }
3523 
3524   // Area that is at least reserved in the caller of this function.
3525   unsigned MinReservedArea;
3526   if (HasParameterArea)
3527     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
3528   else
3529     MinReservedArea = LinkageSize;
3530 
3531   // Set the size that is at least reserved in caller of this function.  Tail
3532   // call optimized functions' reserved stack space needs to be aligned so that
3533   // taking the difference between two stack areas will result in an aligned
3534   // stack.
3535   MinReservedArea =
3536       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3537   FuncInfo->setMinReservedArea(MinReservedArea);
3538 
3539   // If the function takes variable number of arguments, make a frame index for
3540   // the start of the first vararg value... for expansion of llvm.va_start.
3541   if (isVarArg) {
3542     int Depth = ArgOffset;
3543 
3544     FuncInfo->setVarArgsFrameIndex(
3545       MFI.CreateFixedObject(PtrByteSize, Depth, true));
3546     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3547 
3548     // If this function is vararg, store any remaining integer argument regs
3549     // to their spots on the stack so that they may be loaded by dereferencing
3550     // the result of va_next.
3551     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3552          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
3553       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3554       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3555       SDValue Store =
3556           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3557       MemOps.push_back(Store);
3558       // Increment the address by four for the next argument to store
3559       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
3560       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3561     }
3562   }
3563 
3564   if (!MemOps.empty())
3565     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3566 
3567   return Chain;
3568 }
3569 
3570 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
3571     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3572     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3573     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3574   // TODO: add description of PPC stack frame format, or at least some docs.
3575   //
3576   MachineFunction &MF = DAG.getMachineFunction();
3577   MachineFrameInfo &MFI = MF.getFrameInfo();
3578   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3579 
3580   EVT PtrVT = getPointerTy(MF.getDataLayout());
3581   bool isPPC64 = PtrVT == MVT::i64;
3582   // Potential tail calls could cause overwriting of argument stack slots.
3583   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3584                        (CallConv == CallingConv::Fast));
3585   unsigned PtrByteSize = isPPC64 ? 8 : 4;
3586   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3587   unsigned ArgOffset = LinkageSize;
3588   // Area that is at least reserved in caller of this function.
3589   unsigned MinReservedArea = ArgOffset;
3590 
3591   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
3592     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3593     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3594   };
3595   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
3596     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3597     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3598   };
3599   static const MCPhysReg VR[] = {
3600     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3601     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3602   };
3603 
3604   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
3605   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3606   const unsigned Num_VR_Regs  = array_lengthof( VR);
3607 
3608   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3609 
3610   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
3611 
3612   // In 32-bit non-varargs functions, the stack space for vectors is after the
3613   // stack space for non-vectors.  We do not use this space unless we have
3614   // too many vectors to fit in registers, something that only occurs in
3615   // constructed examples:), but we have to walk the arglist to figure
3616   // that out...for the pathological case, compute VecArgOffset as the
3617   // start of the vector parameter area.  Computing VecArgOffset is the
3618   // entire point of the following loop.
3619   unsigned VecArgOffset = ArgOffset;
3620   if (!isVarArg && !isPPC64) {
3621     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
3622          ++ArgNo) {
3623       EVT ObjectVT = Ins[ArgNo].VT;
3624       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3625 
3626       if (Flags.isByVal()) {
3627         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
3628         unsigned ObjSize = Flags.getByValSize();
3629         unsigned ArgSize =
3630                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3631         VecArgOffset += ArgSize;
3632         continue;
3633       }
3634 
3635       switch(ObjectVT.getSimpleVT().SimpleTy) {
3636       default: llvm_unreachable("Unhandled argument type!");
3637       case MVT::i1:
3638       case MVT::i32:
3639       case MVT::f32:
3640         VecArgOffset += 4;
3641         break;
3642       case MVT::i64:  // PPC64
3643       case MVT::f64:
3644         // FIXME: We are guaranteed to be !isPPC64 at this point.
3645         // Does MVT::i64 apply?
3646         VecArgOffset += 8;
3647         break;
3648       case MVT::v4f32:
3649       case MVT::v4i32:
3650       case MVT::v8i16:
3651       case MVT::v16i8:
3652         // Nothing to do, we're only looking at Nonvector args here.
3653         break;
3654       }
3655     }
3656   }
3657   // We've found where the vector parameter area in memory is.  Skip the
3658   // first 12 parameters; these don't use that memory.
3659   VecArgOffset = ((VecArgOffset+15)/16)*16;
3660   VecArgOffset += 12*16;
3661 
3662   // Add DAG nodes to load the arguments or copy them out of registers.  On
3663   // entry to a function on PPC, the arguments start after the linkage area,
3664   // although the first ones are often in registers.
3665 
3666   SmallVector<SDValue, 8> MemOps;
3667   unsigned nAltivecParamsAtEnd = 0;
3668   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
3669   unsigned CurArgIdx = 0;
3670   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3671     SDValue ArgVal;
3672     bool needsLoad = false;
3673     EVT ObjectVT = Ins[ArgNo].VT;
3674     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
3675     unsigned ArgSize = ObjSize;
3676     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3677     if (Ins[ArgNo].isOrigArg()) {
3678       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3679       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3680     }
3681     unsigned CurArgOffset = ArgOffset;
3682 
3683     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
3684     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
3685         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
3686       if (isVarArg || isPPC64) {
3687         MinReservedArea = ((MinReservedArea+15)/16)*16;
3688         MinReservedArea += CalculateStackSlotSize(ObjectVT,
3689                                                   Flags,
3690                                                   PtrByteSize);
3691       } else  nAltivecParamsAtEnd++;
3692     } else
3693       // Calculate min reserved area.
3694       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
3695                                                 Flags,
3696                                                 PtrByteSize);
3697 
3698     // FIXME the codegen can be much improved in some cases.
3699     // We do not have to keep everything in memory.
3700     if (Flags.isByVal()) {
3701       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3702 
3703       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3704       ObjSize = Flags.getByValSize();
3705       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3706       // Objects of size 1 and 2 are right justified, everything else is
3707       // left justified.  This means the memory address is adjusted forwards.
3708       if (ObjSize==1 || ObjSize==2) {
3709         CurArgOffset = CurArgOffset + (4 - ObjSize);
3710       }
3711       // The value of the object is its address.
3712       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
3713       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3714       InVals.push_back(FIN);
3715       if (ObjSize==1 || ObjSize==2) {
3716         if (GPR_idx != Num_GPR_Regs) {
3717           unsigned VReg;
3718           if (isPPC64)
3719             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3720           else
3721             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3722           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3723           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
3724           SDValue Store =
3725               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
3726                                 MachinePointerInfo(&*FuncArg), ObjType);
3727           MemOps.push_back(Store);
3728           ++GPR_idx;
3729         }
3730 
3731         ArgOffset += PtrByteSize;
3732 
3733         continue;
3734       }
3735       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3736         // Store whatever pieces of the object are in registers
3737         // to memory.  ArgOffset will be the address of the beginning
3738         // of the object.
3739         if (GPR_idx != Num_GPR_Regs) {
3740           unsigned VReg;
3741           if (isPPC64)
3742             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3743           else
3744             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3745           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3746           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3747           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3748           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3749                                        MachinePointerInfo(&*FuncArg, j));
3750           MemOps.push_back(Store);
3751           ++GPR_idx;
3752           ArgOffset += PtrByteSize;
3753         } else {
3754           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
3755           break;
3756         }
3757       }
3758       continue;
3759     }
3760 
3761     switch (ObjectVT.getSimpleVT().SimpleTy) {
3762     default: llvm_unreachable("Unhandled argument type!");
3763     case MVT::i1:
3764     case MVT::i32:
3765       if (!isPPC64) {
3766         if (GPR_idx != Num_GPR_Regs) {
3767           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3768           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3769 
3770           if (ObjectVT == MVT::i1)
3771             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
3772 
3773           ++GPR_idx;
3774         } else {
3775           needsLoad = true;
3776           ArgSize = PtrByteSize;
3777         }
3778         // All int arguments reserve stack space in the Darwin ABI.
3779         ArgOffset += PtrByteSize;
3780         break;
3781       }
3782       LLVM_FALLTHROUGH;
3783     case MVT::i64:  // PPC64
3784       if (GPR_idx != Num_GPR_Regs) {
3785         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3786         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3787 
3788         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3789           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3790           // value to MVT::i64 and then truncate to the correct register size.
3791           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3792 
3793         ++GPR_idx;
3794       } else {
3795         needsLoad = true;
3796         ArgSize = PtrByteSize;
3797       }
3798       // All int arguments reserve stack space in the Darwin ABI.
3799       ArgOffset += 8;
3800       break;
3801 
3802     case MVT::f32:
3803     case MVT::f64:
3804       // Every 4 bytes of argument space consumes one of the GPRs available for
3805       // argument passing.
3806       if (GPR_idx != Num_GPR_Regs) {
3807         ++GPR_idx;
3808         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
3809           ++GPR_idx;
3810       }
3811       if (FPR_idx != Num_FPR_Regs) {
3812         unsigned VReg;
3813 
3814         if (ObjectVT == MVT::f32)
3815           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
3816         else
3817           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
3818 
3819         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3820         ++FPR_idx;
3821       } else {
3822         needsLoad = true;
3823       }
3824 
3825       // All FP arguments reserve stack space in the Darwin ABI.
3826       ArgOffset += isPPC64 ? 8 : ObjSize;
3827       break;
3828     case MVT::v4f32:
3829     case MVT::v4i32:
3830     case MVT::v8i16:
3831     case MVT::v16i8:
3832       // Note that vector arguments in registers don't reserve stack space,
3833       // except in varargs functions.
3834       if (VR_idx != Num_VR_Regs) {
3835         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3836         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3837         if (isVarArg) {
3838           while ((ArgOffset % 16) != 0) {
3839             ArgOffset += PtrByteSize;
3840             if (GPR_idx != Num_GPR_Regs)
3841               GPR_idx++;
3842           }
3843           ArgOffset += 16;
3844           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
3845         }
3846         ++VR_idx;
3847       } else {
3848         if (!isVarArg && !isPPC64) {
3849           // Vectors go after all the nonvectors.
3850           CurArgOffset = VecArgOffset;
3851           VecArgOffset += 16;
3852         } else {
3853           // Vectors are aligned.
3854           ArgOffset = ((ArgOffset+15)/16)*16;
3855           CurArgOffset = ArgOffset;
3856           ArgOffset += 16;
3857         }
3858         needsLoad = true;
3859       }
3860       break;
3861     }
3862 
3863     // We need to load the argument to a virtual register if we determined above
3864     // that we ran out of physical registers of the appropriate type.
3865     if (needsLoad) {
3866       int FI = MFI.CreateFixedObject(ObjSize,
3867                                      CurArgOffset + (ArgSize - ObjSize),
3868                                      isImmutable);
3869       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3870       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
3871     }
3872 
3873     InVals.push_back(ArgVal);
3874   }
3875 
3876   // Allow for Altivec parameters at the end, if needed.
3877   if (nAltivecParamsAtEnd) {
3878     MinReservedArea = ((MinReservedArea+15)/16)*16;
3879     MinReservedArea += 16*nAltivecParamsAtEnd;
3880   }
3881 
3882   // Area that is at least reserved in the caller of this function.
3883   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
3884 
3885   // Set the size that is at least reserved in caller of this function.  Tail
3886   // call optimized functions' reserved stack space needs to be aligned so that
3887   // taking the difference between two stack areas will result in an aligned
3888   // stack.
3889   MinReservedArea =
3890       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3891   FuncInfo->setMinReservedArea(MinReservedArea);
3892 
3893   // If the function takes variable number of arguments, make a frame index for
3894   // the start of the first vararg value... for expansion of llvm.va_start.
3895   if (isVarArg) {
3896     int Depth = ArgOffset;
3897 
3898     FuncInfo->setVarArgsFrameIndex(
3899       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3900                             Depth, true));
3901     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3902 
3903     // If this function is vararg, store any remaining integer argument regs
3904     // to their spots on the stack so that they may be loaded by dereferencing
3905     // the result of va_next.
3906     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
3907       unsigned VReg;
3908 
3909       if (isPPC64)
3910         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3911       else
3912         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3913 
3914       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3915       SDValue Store =
3916           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3917       MemOps.push_back(Store);
3918       // Increment the address by four for the next argument to store
3919       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3920       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3921     }
3922   }
3923 
3924   if (!MemOps.empty())
3925     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3926 
3927   return Chain;
3928 }
3929 
3930 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
3931 /// adjusted to accommodate the arguments for the tailcall.
3932 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
3933                                    unsigned ParamSize) {
3934 
3935   if (!isTailCall) return 0;
3936 
3937   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
3938   unsigned CallerMinReservedArea = FI->getMinReservedArea();
3939   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
3940   // Remember only if the new adjustement is bigger.
3941   if (SPDiff < FI->getTailCallSPDelta())
3942     FI->setTailCallSPDelta(SPDiff);
3943 
3944   return SPDiff;
3945 }
3946 
3947 static bool isFunctionGlobalAddress(SDValue Callee);
3948 
3949 static bool
3950 resideInSameModule(SDValue Callee, Reloc::Model RelMod) {
3951   // If !G, Callee can be an external symbol.
3952   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3953   if (!G) return false;
3954 
3955   const GlobalValue *GV = G->getGlobal();
3956 
3957   if (GV->isDeclaration()) return false;
3958 
3959   switch(GV->getLinkage()) {
3960   default: llvm_unreachable("unknow linkage type");
3961   case GlobalValue::AvailableExternallyLinkage:
3962   case GlobalValue::ExternalWeakLinkage:
3963     return false;
3964 
3965   // Callee with weak linkage is allowed if it has hidden or protected
3966   // visibility
3967   case GlobalValue::LinkOnceAnyLinkage:
3968   case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions
3969   case GlobalValue::WeakAnyLinkage:
3970   case GlobalValue::WeakODRLinkage:     // e.g. c++ template instantiation
3971     if (GV->hasDefaultVisibility())
3972       return false;
3973 
3974   case GlobalValue::ExternalLinkage:
3975   case GlobalValue::InternalLinkage:
3976   case GlobalValue::PrivateLinkage:
3977     break;
3978   }
3979 
3980   // With '-fPIC', calling default visiblity function need insert 'nop' after
3981   // function call, no matter that function resides in same module or not, so
3982   // we treat it as in different module.
3983   if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility())
3984     return false;
3985 
3986   return true;
3987 }
3988 
3989 static bool
3990 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
3991                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
3992   assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
3993 
3994   const unsigned PtrByteSize = 8;
3995   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3996 
3997   static const MCPhysReg GPR[] = {
3998     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3999     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4000   };
4001   static const MCPhysReg VR[] = {
4002     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4003     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4004   };
4005 
4006   const unsigned NumGPRs = array_lengthof(GPR);
4007   const unsigned NumFPRs = 13;
4008   const unsigned NumVRs = array_lengthof(VR);
4009   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4010 
4011   unsigned NumBytes = LinkageSize;
4012   unsigned AvailableFPRs = NumFPRs;
4013   unsigned AvailableVRs = NumVRs;
4014 
4015   for (const ISD::OutputArg& Param : Outs) {
4016     if (Param.Flags.isNest()) continue;
4017 
4018     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4019                                PtrByteSize, LinkageSize, ParamAreaSize,
4020                                NumBytes, AvailableFPRs, AvailableVRs,
4021                                Subtarget.hasQPX()))
4022       return true;
4023   }
4024   return false;
4025 }
4026 
4027 static bool
4028 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) {
4029   if (CS->arg_size() != CallerFn->getArgumentList().size())
4030     return false;
4031 
4032   ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin();
4033   ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end();
4034   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4035 
4036   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4037     const Value* CalleeArg = *CalleeArgIter;
4038     const Value* CallerArg = &(*CallerArgIter);
4039     if (CalleeArg == CallerArg)
4040       continue;
4041 
4042     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4043     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4044     //      }
4045     // 1st argument of callee is undef and has the same type as caller.
4046     if (CalleeArg->getType() == CallerArg->getType() &&
4047         isa<UndefValue>(CalleeArg))
4048       continue;
4049 
4050     return false;
4051   }
4052 
4053   return true;
4054 }
4055 
4056 bool
4057 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4058                                     SDValue Callee,
4059                                     CallingConv::ID CalleeCC,
4060                                     ImmutableCallSite *CS,
4061                                     bool isVarArg,
4062                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4063                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4064                                     SelectionDAG& DAG) const {
4065   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4066 
4067   if (DisableSCO && !TailCallOpt) return false;
4068 
4069   // Variadic argument functions are not supported.
4070   if (isVarArg) return false;
4071 
4072   MachineFunction &MF = DAG.getMachineFunction();
4073   CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
4074 
4075   // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has
4076   // the same calling convention
4077   if (CallerCC != CalleeCC) return false;
4078 
4079   // SCO support C calling convention
4080   if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C)
4081     return false;
4082 
4083   // Caller contains any byval parameter is not supported.
4084   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4085     return false;
4086 
4087   // Callee contains any byval parameter is not supported, too.
4088   // Note: This is a quick work around, because in some cases, e.g.
4089   // caller's stack size > callee's stack size, we are still able to apply
4090   // sibling call optimization. See: https://reviews.llvm.org/D23441#513574
4091   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4092     return false;
4093 
4094   // No TCO/SCO on indirect call because Caller have to restore its TOC
4095   if (!isFunctionGlobalAddress(Callee) &&
4096       !isa<ExternalSymbolSDNode>(Callee))
4097     return false;
4098 
4099   // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI
4100   // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another
4101   // module.
4102   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4103   if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel()))
4104     return false;
4105 
4106   // TCO allows altering callee ABI, so we don't have to check further.
4107   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4108     return true;
4109 
4110   if (DisableSCO) return false;
4111 
4112   // If callee use the same argument list that caller is using, then we can
4113   // apply SCO on this case. If it is not, then we need to check if callee needs
4114   // stack for passing arguments.
4115   if (!hasSameArgumentList(MF.getFunction(), CS) &&
4116       needStackSlotPassParameters(Subtarget, Outs)) {
4117     return false;
4118   }
4119 
4120   return true;
4121 }
4122 
4123 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4124 /// for tail call optimization. Targets which want to do tail call
4125 /// optimization should implement this function.
4126 bool
4127 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4128                                                      CallingConv::ID CalleeCC,
4129                                                      bool isVarArg,
4130                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4131                                                      SelectionDAG& DAG) const {
4132   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4133     return false;
4134 
4135   // Variable argument functions are not supported.
4136   if (isVarArg)
4137     return false;
4138 
4139   MachineFunction &MF = DAG.getMachineFunction();
4140   CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
4141   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4142     // Functions containing by val parameters are not supported.
4143     for (unsigned i = 0; i != Ins.size(); i++) {
4144        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4145        if (Flags.isByVal()) return false;
4146     }
4147 
4148     // Non-PIC/GOT tail calls are supported.
4149     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4150       return true;
4151 
4152     // At the moment we can only do local tail calls (in same module, hidden
4153     // or protected) if we are generating PIC.
4154     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4155       return G->getGlobal()->hasHiddenVisibility()
4156           || G->getGlobal()->hasProtectedVisibility();
4157   }
4158 
4159   return false;
4160 }
4161 
4162 /// isCallCompatibleAddress - Return the immediate to use if the specified
4163 /// 32-bit value is representable in the immediate field of a BxA instruction.
4164 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4165   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4166   if (!C) return nullptr;
4167 
4168   int Addr = C->getZExtValue();
4169   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4170       SignExtend32<26>(Addr) != Addr)
4171     return nullptr;  // Top 6 bits have to be sext of immediate.
4172 
4173   return DAG
4174       .getConstant(
4175           (int)C->getZExtValue() >> 2, SDLoc(Op),
4176           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4177       .getNode();
4178 }
4179 
4180 namespace {
4181 
4182 struct TailCallArgumentInfo {
4183   SDValue Arg;
4184   SDValue FrameIdxOp;
4185   int       FrameIdx;
4186 
4187   TailCallArgumentInfo() : FrameIdx(0) {}
4188 };
4189 }
4190 
4191 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4192 static void StoreTailCallArgumentsToStackSlot(
4193     SelectionDAG &DAG, SDValue Chain,
4194     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4195     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4196   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4197     SDValue Arg = TailCallArgs[i].Arg;
4198     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4199     int FI = TailCallArgs[i].FrameIdx;
4200     // Store relative to framepointer.
4201     MemOpChains.push_back(DAG.getStore(
4202         Chain, dl, Arg, FIN,
4203         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4204   }
4205 }
4206 
4207 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4208 /// the appropriate stack slot for the tail call optimized function call.
4209 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4210                                              SDValue OldRetAddr, SDValue OldFP,
4211                                              int SPDiff, const SDLoc &dl) {
4212   if (SPDiff) {
4213     // Calculate the new stack slot for the return address.
4214     MachineFunction &MF = DAG.getMachineFunction();
4215     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4216     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4217     bool isPPC64 = Subtarget.isPPC64();
4218     int SlotSize = isPPC64 ? 8 : 4;
4219     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4220     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4221                                                          NewRetAddrLoc, true);
4222     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4223     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4224     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4225                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4226 
4227     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4228     // slot as the FP is never overwritten.
4229     if (Subtarget.isDarwinABI()) {
4230       int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4231       int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4232                                                          true);
4233       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4234       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4235                            MachinePointerInfo::getFixedStack(
4236                                DAG.getMachineFunction(), NewFPIdx));
4237     }
4238   }
4239   return Chain;
4240 }
4241 
4242 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4243 /// the position of the argument.
4244 static void
4245 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4246                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4247                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4248   int Offset = ArgOffset + SPDiff;
4249   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4250   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4251   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4252   SDValue FIN = DAG.getFrameIndex(FI, VT);
4253   TailCallArgumentInfo Info;
4254   Info.Arg = Arg;
4255   Info.FrameIdxOp = FIN;
4256   Info.FrameIdx = FI;
4257   TailCallArguments.push_back(Info);
4258 }
4259 
4260 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4261 /// stack slot. Returns the chain as result and the loaded frame pointers in
4262 /// LROpOut/FPOpout. Used when tail calling.
4263 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4264     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4265     SDValue &FPOpOut, const SDLoc &dl) const {
4266   if (SPDiff) {
4267     // Load the LR and FP stack slot for later adjusting.
4268     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4269     LROpOut = getReturnAddrFrameIndex(DAG);
4270     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4271     Chain = SDValue(LROpOut.getNode(), 1);
4272 
4273     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4274     // slot as the FP is never overwritten.
4275     if (Subtarget.isDarwinABI()) {
4276       FPOpOut = getFramePointerFrameIndex(DAG);
4277       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4278       Chain = SDValue(FPOpOut.getNode(), 1);
4279     }
4280   }
4281   return Chain;
4282 }
4283 
4284 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4285 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4286 /// specified by the specific parameter attribute. The copy will be passed as
4287 /// a byval function parameter.
4288 /// Sometimes what we are copying is the end of a larger object, the part that
4289 /// does not fit in registers.
4290 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4291                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4292                                          SelectionDAG &DAG, const SDLoc &dl) {
4293   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4294   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4295                        false, false, false, MachinePointerInfo(),
4296                        MachinePointerInfo());
4297 }
4298 
4299 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4300 /// tail calls.
4301 static void LowerMemOpCallTo(
4302     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4303     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4304     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4305     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4306   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4307   if (!isTailCall) {
4308     if (isVector) {
4309       SDValue StackPtr;
4310       if (isPPC64)
4311         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4312       else
4313         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4314       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4315                            DAG.getConstant(ArgOffset, dl, PtrVT));
4316     }
4317     MemOpChains.push_back(
4318         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4319     // Calculate and remember argument location.
4320   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4321                                   TailCallArguments);
4322 }
4323 
4324 static void
4325 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4326                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4327                 SDValue FPOp,
4328                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4329   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4330   // might overwrite each other in case of tail call optimization.
4331   SmallVector<SDValue, 8> MemOpChains2;
4332   // Do not flag preceding copytoreg stuff together with the following stuff.
4333   InFlag = SDValue();
4334   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4335                                     MemOpChains2, dl);
4336   if (!MemOpChains2.empty())
4337     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4338 
4339   // Store the return address to the appropriate stack slot.
4340   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4341 
4342   // Emit callseq_end just before tailcall node.
4343   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4344                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4345   InFlag = Chain.getValue(1);
4346 }
4347 
4348 // Is this global address that of a function that can be called by name? (as
4349 // opposed to something that must hold a descriptor for an indirect call).
4350 static bool isFunctionGlobalAddress(SDValue Callee) {
4351   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4352     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4353         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4354       return false;
4355 
4356     return G->getGlobal()->getValueType()->isFunctionTy();
4357   }
4358 
4359   return false;
4360 }
4361 
4362 static unsigned
4363 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
4364             SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
4365             bool isPatchPoint, bool hasNest,
4366             SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
4367             SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
4368             ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
4369 
4370   bool isPPC64 = Subtarget.isPPC64();
4371   bool isSVR4ABI = Subtarget.isSVR4ABI();
4372   bool isELFv2ABI = Subtarget.isELFv2ABI();
4373 
4374   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4375   NodeTys.push_back(MVT::Other);   // Returns a chain
4376   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
4377 
4378   unsigned CallOpc = PPCISD::CALL;
4379 
4380   bool needIndirectCall = true;
4381   if (!isSVR4ABI || !isPPC64)
4382     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
4383       // If this is an absolute destination address, use the munged value.
4384       Callee = SDValue(Dest, 0);
4385       needIndirectCall = false;
4386     }
4387 
4388   // PC-relative references to external symbols should go through $stub, unless
4389   // we're building with the leopard linker or later, which automatically
4390   // synthesizes these stubs.
4391   const TargetMachine &TM = DAG.getTarget();
4392   const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
4393   const GlobalValue *GV = nullptr;
4394   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
4395     GV = G->getGlobal();
4396   bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
4397   bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
4398 
4399   if (isFunctionGlobalAddress(Callee)) {
4400     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
4401     // A call to a TLS address is actually an indirect call to a
4402     // thread-specific pointer.
4403     unsigned OpFlags = 0;
4404     if (UsePlt)
4405       OpFlags = PPCII::MO_PLT;
4406 
4407     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
4408     // every direct call is) turn it into a TargetGlobalAddress /
4409     // TargetExternalSymbol node so that legalize doesn't hack it.
4410     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
4411                                         Callee.getValueType(), 0, OpFlags);
4412     needIndirectCall = false;
4413   }
4414 
4415   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
4416     unsigned char OpFlags = 0;
4417 
4418     if (UsePlt)
4419       OpFlags = PPCII::MO_PLT;
4420 
4421     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
4422                                          OpFlags);
4423     needIndirectCall = false;
4424   }
4425 
4426   if (isPatchPoint) {
4427     // We'll form an invalid direct call when lowering a patchpoint; the full
4428     // sequence for an indirect call is complicated, and many of the
4429     // instructions introduced might have side effects (and, thus, can't be
4430     // removed later). The call itself will be removed as soon as the
4431     // argument/return lowering is complete, so the fact that it has the wrong
4432     // kind of operands should not really matter.
4433     needIndirectCall = false;
4434   }
4435 
4436   if (needIndirectCall) {
4437     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
4438     // to do the call, we can't use PPCISD::CALL.
4439     SDValue MTCTROps[] = {Chain, Callee, InFlag};
4440 
4441     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4442       // Function pointers in the 64-bit SVR4 ABI do not point to the function
4443       // entry point, but to the function descriptor (the function entry point
4444       // address is part of the function descriptor though).
4445       // The function descriptor is a three doubleword structure with the
4446       // following fields: function entry point, TOC base address and
4447       // environment pointer.
4448       // Thus for a call through a function pointer, the following actions need
4449       // to be performed:
4450       //   1. Save the TOC of the caller in the TOC save area of its stack
4451       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
4452       //   2. Load the address of the function entry point from the function
4453       //      descriptor.
4454       //   3. Load the TOC of the callee from the function descriptor into r2.
4455       //   4. Load the environment pointer from the function descriptor into
4456       //      r11.
4457       //   5. Branch to the function entry point address.
4458       //   6. On return of the callee, the TOC of the caller needs to be
4459       //      restored (this is done in FinishCall()).
4460       //
4461       // The loads are scheduled at the beginning of the call sequence, and the
4462       // register copies are flagged together to ensure that no other
4463       // operations can be scheduled in between. E.g. without flagging the
4464       // copies together, a TOC access in the caller could be scheduled between
4465       // the assignment of the callee TOC and the branch to the callee, which
4466       // results in the TOC access going through the TOC of the callee instead
4467       // of going through the TOC of the caller, which leads to incorrect code.
4468 
4469       // Load the address of the function entry point from the function
4470       // descriptor.
4471       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
4472       if (LDChain.getValueType() == MVT::Glue)
4473         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
4474 
4475       auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
4476                           ? (MachineMemOperand::MODereferenceable |
4477                              MachineMemOperand::MOInvariant)
4478                           : MachineMemOperand::MONone;
4479 
4480       MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr);
4481       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
4482                                         /* Alignment = */ 8, MMOFlags);
4483 
4484       // Load environment pointer into r11.
4485       SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
4486       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
4487       SDValue LoadEnvPtr =
4488           DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
4489                       /* Alignment = */ 8, MMOFlags);
4490 
4491       SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
4492       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
4493       SDValue TOCPtr =
4494           DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
4495                       /* Alignment = */ 8, MMOFlags);
4496 
4497       setUsesTOCBasePtr(DAG);
4498       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
4499                                         InFlag);
4500       Chain = TOCVal.getValue(0);
4501       InFlag = TOCVal.getValue(1);
4502 
4503       // If the function call has an explicit 'nest' parameter, it takes the
4504       // place of the environment pointer.
4505       if (!hasNest) {
4506         SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
4507                                           InFlag);
4508 
4509         Chain = EnvVal.getValue(0);
4510         InFlag = EnvVal.getValue(1);
4511       }
4512 
4513       MTCTROps[0] = Chain;
4514       MTCTROps[1] = LoadFuncPtr;
4515       MTCTROps[2] = InFlag;
4516     }
4517 
4518     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
4519                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
4520     InFlag = Chain.getValue(1);
4521 
4522     NodeTys.clear();
4523     NodeTys.push_back(MVT::Other);
4524     NodeTys.push_back(MVT::Glue);
4525     Ops.push_back(Chain);
4526     CallOpc = PPCISD::BCTRL;
4527     Callee.setNode(nullptr);
4528     // Add use of X11 (holding environment pointer)
4529     if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
4530       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
4531     // Add CTR register as callee so a bctr can be emitted later.
4532     if (isTailCall)
4533       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
4534   }
4535 
4536   // If this is a direct call, pass the chain and the callee.
4537   if (Callee.getNode()) {
4538     Ops.push_back(Chain);
4539     Ops.push_back(Callee);
4540   }
4541   // If this is a tail call add stack pointer delta.
4542   if (isTailCall)
4543     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
4544 
4545   // Add argument registers to the end of the list so that they are known live
4546   // into the call.
4547   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4548     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4549                                   RegsToPass[i].second.getValueType()));
4550 
4551   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
4552   // into the call.
4553   if (isSVR4ABI && isPPC64 && !isPatchPoint) {
4554     setUsesTOCBasePtr(DAG);
4555     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
4556   }
4557 
4558   return CallOpc;
4559 }
4560 
4561 static
4562 bool isLocalCall(const SDValue &Callee)
4563 {
4564   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4565     return G->getGlobal()->isStrongDefinitionForLinker();
4566   return false;
4567 }
4568 
4569 SDValue PPCTargetLowering::LowerCallResult(
4570     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4571     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4572     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4573 
4574   SmallVector<CCValAssign, 16> RVLocs;
4575   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4576                     *DAG.getContext());
4577   CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
4578 
4579   // Copy all of the result registers out of their specified physreg.
4580   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4581     CCValAssign &VA = RVLocs[i];
4582     assert(VA.isRegLoc() && "Can only return in registers!");
4583 
4584     SDValue Val = DAG.getCopyFromReg(Chain, dl,
4585                                      VA.getLocReg(), VA.getLocVT(), InFlag);
4586     Chain = Val.getValue(1);
4587     InFlag = Val.getValue(2);
4588 
4589     switch (VA.getLocInfo()) {
4590     default: llvm_unreachable("Unknown loc info!");
4591     case CCValAssign::Full: break;
4592     case CCValAssign::AExt:
4593       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4594       break;
4595     case CCValAssign::ZExt:
4596       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
4597                         DAG.getValueType(VA.getValVT()));
4598       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4599       break;
4600     case CCValAssign::SExt:
4601       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
4602                         DAG.getValueType(VA.getValVT()));
4603       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4604       break;
4605     }
4606 
4607     InVals.push_back(Val);
4608   }
4609 
4610   return Chain;
4611 }
4612 
4613 SDValue PPCTargetLowering::FinishCall(
4614     CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
4615     bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
4616     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
4617     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
4618     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
4619     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const {
4620 
4621   std::vector<EVT> NodeTys;
4622   SmallVector<SDValue, 8> Ops;
4623   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
4624                                  SPDiff, isTailCall, isPatchPoint, hasNest,
4625                                  RegsToPass, Ops, NodeTys, CS, Subtarget);
4626 
4627   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
4628   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
4629     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
4630 
4631   // When performing tail call optimization the callee pops its arguments off
4632   // the stack. Account for this here so these bytes can be pushed back on in
4633   // PPCFrameLowering::eliminateCallFramePseudoInstr.
4634   int BytesCalleePops =
4635     (CallConv == CallingConv::Fast &&
4636      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
4637 
4638   // Add a register mask operand representing the call-preserved registers.
4639   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4640   const uint32_t *Mask =
4641       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
4642   assert(Mask && "Missing call preserved mask for calling convention");
4643   Ops.push_back(DAG.getRegisterMask(Mask));
4644 
4645   if (InFlag.getNode())
4646     Ops.push_back(InFlag);
4647 
4648   // Emit tail call.
4649   if (isTailCall) {
4650     assert(((Callee.getOpcode() == ISD::Register &&
4651              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
4652             Callee.getOpcode() == ISD::TargetExternalSymbol ||
4653             Callee.getOpcode() == ISD::TargetGlobalAddress ||
4654             isa<ConstantSDNode>(Callee)) &&
4655     "Expecting an global address, external symbol, absolute value or register");
4656 
4657     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
4658     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
4659   }
4660 
4661   // Add a NOP immediately after the branch instruction when using the 64-bit
4662   // SVR4 ABI. At link time, if caller and callee are in a different module and
4663   // thus have a different TOC, the call will be replaced with a call to a stub
4664   // function which saves the current TOC, loads the TOC of the callee and
4665   // branches to the callee. The NOP will be replaced with a load instruction
4666   // which restores the TOC of the caller from the TOC save slot of the current
4667   // stack frame. If caller and callee belong to the same module (and have the
4668   // same TOC), the NOP will remain unchanged.
4669 
4670   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
4671       !isPatchPoint) {
4672     if (CallOpc == PPCISD::BCTRL) {
4673       // This is a call through a function pointer.
4674       // Restore the caller TOC from the save area into R2.
4675       // See PrepareCall() for more information about calls through function
4676       // pointers in the 64-bit SVR4 ABI.
4677       // We are using a target-specific load with r2 hard coded, because the
4678       // result of a target-independent load would never go directly into r2,
4679       // since r2 is a reserved register (which prevents the register allocator
4680       // from allocating it), resulting in an additional register being
4681       // allocated and an unnecessary move instruction being generated.
4682       CallOpc = PPCISD::BCTRL_LOAD_TOC;
4683 
4684       EVT PtrVT = getPointerTy(DAG.getDataLayout());
4685       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
4686       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
4687       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
4688       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
4689 
4690       // The address needs to go after the chain input but before the flag (or
4691       // any other variadic arguments).
4692       Ops.insert(std::next(Ops.begin()), AddTOC);
4693     } else if ((CallOpc == PPCISD::CALL) &&
4694                (!isLocalCall(Callee) ||
4695                 DAG.getTarget().getRelocationModel() == Reloc::PIC_))
4696       // Otherwise insert NOP for non-local calls.
4697       CallOpc = PPCISD::CALL_NOP;
4698   }
4699 
4700   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
4701   InFlag = Chain.getValue(1);
4702 
4703   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4704                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
4705                              InFlag, dl);
4706   if (!Ins.empty())
4707     InFlag = Chain.getValue(1);
4708 
4709   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
4710                          Ins, dl, DAG, InVals);
4711 }
4712 
4713 SDValue
4714 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4715                              SmallVectorImpl<SDValue> &InVals) const {
4716   SelectionDAG &DAG                     = CLI.DAG;
4717   SDLoc &dl                             = CLI.DL;
4718   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
4719   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
4720   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
4721   SDValue Chain                         = CLI.Chain;
4722   SDValue Callee                        = CLI.Callee;
4723   bool &isTailCall                      = CLI.IsTailCall;
4724   CallingConv::ID CallConv              = CLI.CallConv;
4725   bool isVarArg                         = CLI.IsVarArg;
4726   bool isPatchPoint                     = CLI.IsPatchPoint;
4727   ImmutableCallSite *CS                 = CLI.CS;
4728 
4729   if (isTailCall) {
4730     if (Subtarget.useLongCalls() && !(CS && CS->isMustTailCall()))
4731       isTailCall = false;
4732     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
4733       isTailCall =
4734         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
4735                                                  isVarArg, Outs, Ins, DAG);
4736     else
4737       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
4738                                                      Ins, DAG);
4739     if (isTailCall) {
4740       ++NumTailCalls;
4741       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4742         ++NumSiblingCalls;
4743 
4744       assert(isa<GlobalAddressSDNode>(Callee) &&
4745              "Callee should be an llvm::Function object.");
4746       DEBUG(
4747         const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
4748         const unsigned Width = 80 - strlen("TCO caller: ")
4749                                   - strlen(", callee linkage: 0, 0");
4750         dbgs() << "TCO caller: "
4751                << left_justify(DAG.getMachineFunction().getName(), Width)
4752                << ", callee linkage: "
4753                << GV->getVisibility() << ", " << GV->getLinkage() << "\n"
4754       );
4755     }
4756   }
4757 
4758   if (!isTailCall && CS && CS->isMustTailCall())
4759     report_fatal_error("failed to perform tail call elimination on a call "
4760                        "site marked musttail");
4761 
4762   // When long calls (i.e. indirect calls) are always used, calls are always
4763   // made via function pointer. If we have a function name, first translate it
4764   // into a pointer.
4765   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
4766       !isTailCall)
4767     Callee = LowerGlobalAddress(Callee, DAG);
4768 
4769   if (Subtarget.isSVR4ABI()) {
4770     if (Subtarget.isPPC64())
4771       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
4772                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
4773                               dl, DAG, InVals, CS);
4774     else
4775       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
4776                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
4777                               dl, DAG, InVals, CS);
4778   }
4779 
4780   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
4781                           isTailCall, isPatchPoint, Outs, OutVals, Ins,
4782                           dl, DAG, InVals, CS);
4783 }
4784 
4785 SDValue PPCTargetLowering::LowerCall_32SVR4(
4786     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
4787     bool isTailCall, bool isPatchPoint,
4788     const SmallVectorImpl<ISD::OutputArg> &Outs,
4789     const SmallVectorImpl<SDValue> &OutVals,
4790     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4791     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
4792     ImmutableCallSite *CS) const {
4793   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
4794   // of the 32-bit SVR4 ABI stack frame layout.
4795 
4796   assert((CallConv == CallingConv::C ||
4797           CallConv == CallingConv::Fast) && "Unknown calling convention!");
4798 
4799   unsigned PtrByteSize = 4;
4800 
4801   MachineFunction &MF = DAG.getMachineFunction();
4802 
4803   // Mark this function as potentially containing a function that contains a
4804   // tail call. As a consequence the frame pointer will be used for dynamicalloc
4805   // and restoring the callers stack pointer in this functions epilog. This is
4806   // done because by tail calling the called function might overwrite the value
4807   // in this function's (MF) stack pointer stack slot 0(SP).
4808   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
4809       CallConv == CallingConv::Fast)
4810     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
4811 
4812   // Count how many bytes are to be pushed on the stack, including the linkage
4813   // area, parameter list area and the part of the local variable space which
4814   // contains copies of aggregates which are passed by value.
4815 
4816   // Assign locations to all of the outgoing arguments.
4817   SmallVector<CCValAssign, 16> ArgLocs;
4818   PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
4819 
4820   // Reserve space for the linkage area on the stack.
4821   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
4822                        PtrByteSize);
4823   if (useSoftFloat())
4824     CCInfo.PreAnalyzeCallOperands(Outs);
4825 
4826   if (isVarArg) {
4827     // Handle fixed and variable vector arguments differently.
4828     // Fixed vector arguments go into registers as long as registers are
4829     // available. Variable vector arguments always go into memory.
4830     unsigned NumArgs = Outs.size();
4831 
4832     for (unsigned i = 0; i != NumArgs; ++i) {
4833       MVT ArgVT = Outs[i].VT;
4834       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
4835       bool Result;
4836 
4837       if (Outs[i].IsFixed) {
4838         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
4839                                CCInfo);
4840       } else {
4841         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
4842                                       ArgFlags, CCInfo);
4843       }
4844 
4845       if (Result) {
4846 #ifndef NDEBUG
4847         errs() << "Call operand #" << i << " has unhandled type "
4848              << EVT(ArgVT).getEVTString() << "\n";
4849 #endif
4850         llvm_unreachable(nullptr);
4851       }
4852     }
4853   } else {
4854     // All arguments are treated the same.
4855     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
4856   }
4857   CCInfo.clearWasPPCF128();
4858 
4859   // Assign locations to all of the outgoing aggregate by value arguments.
4860   SmallVector<CCValAssign, 16> ByValArgLocs;
4861   CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
4862 
4863   // Reserve stack space for the allocations in CCInfo.
4864   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
4865 
4866   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
4867 
4868   // Size of the linkage area, parameter list area and the part of the local
4869   // space variable where copies of aggregates which are passed by value are
4870   // stored.
4871   unsigned NumBytes = CCByValInfo.getNextStackOffset();
4872 
4873   // Calculate by how many bytes the stack has to be adjusted in case of tail
4874   // call optimization.
4875   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
4876 
4877   // Adjust the stack pointer for the new arguments...
4878   // These operations are automatically eliminated by the prolog/epilog pass
4879   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4880                                dl);
4881   SDValue CallSeqStart = Chain;
4882 
4883   // Load the return address and frame pointer so it can be moved somewhere else
4884   // later.
4885   SDValue LROp, FPOp;
4886   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
4887 
4888   // Set up a copy of the stack pointer for use loading and storing any
4889   // arguments that may not fit in the registers available for argument
4890   // passing.
4891   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4892 
4893   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
4894   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
4895   SmallVector<SDValue, 8> MemOpChains;
4896 
4897   bool seenFloatArg = false;
4898   // Walk the register/memloc assignments, inserting copies/loads.
4899   for (unsigned i = 0, j = 0, e = ArgLocs.size();
4900        i != e;
4901        ++i) {
4902     CCValAssign &VA = ArgLocs[i];
4903     SDValue Arg = OutVals[i];
4904     ISD::ArgFlagsTy Flags = Outs[i].Flags;
4905 
4906     if (Flags.isByVal()) {
4907       // Argument is an aggregate which is passed by value, thus we need to
4908       // create a copy of it in the local variable space of the current stack
4909       // frame (which is the stack frame of the caller) and pass the address of
4910       // this copy to the callee.
4911       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
4912       CCValAssign &ByValVA = ByValArgLocs[j++];
4913       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
4914 
4915       // Memory reserved in the local variable space of the callers stack frame.
4916       unsigned LocMemOffset = ByValVA.getLocMemOffset();
4917 
4918       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4919       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
4920                            StackPtr, PtrOff);
4921 
4922       // Create a copy of the argument in the local area of the current
4923       // stack frame.
4924       SDValue MemcpyCall =
4925         CreateCopyOfByValArgument(Arg, PtrOff,
4926                                   CallSeqStart.getNode()->getOperand(0),
4927                                   Flags, DAG, dl);
4928 
4929       // This must go outside the CALLSEQ_START..END.
4930       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
4931                            CallSeqStart.getNode()->getOperand(1),
4932                            SDLoc(MemcpyCall));
4933       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
4934                              NewCallSeqStart.getNode());
4935       Chain = CallSeqStart = NewCallSeqStart;
4936 
4937       // Pass the address of the aggregate copy on the stack either in a
4938       // physical register or in the parameter list area of the current stack
4939       // frame to the callee.
4940       Arg = PtrOff;
4941     }
4942 
4943     if (VA.isRegLoc()) {
4944       if (Arg.getValueType() == MVT::i1)
4945         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
4946 
4947       seenFloatArg |= VA.getLocVT().isFloatingPoint();
4948       // Put argument in a physical register.
4949       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4950     } else {
4951       // Put argument in the parameter list area of the current stack frame.
4952       assert(VA.isMemLoc());
4953       unsigned LocMemOffset = VA.getLocMemOffset();
4954 
4955       if (!isTailCall) {
4956         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4957         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
4958                              StackPtr, PtrOff);
4959 
4960         MemOpChains.push_back(
4961             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4962       } else {
4963         // Calculate and remember argument location.
4964         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
4965                                  TailCallArguments);
4966       }
4967     }
4968   }
4969 
4970   if (!MemOpChains.empty())
4971     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4972 
4973   // Build a sequence of copy-to-reg nodes chained together with token chain
4974   // and flag operands which copy the outgoing args into the appropriate regs.
4975   SDValue InFlag;
4976   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4977     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4978                              RegsToPass[i].second, InFlag);
4979     InFlag = Chain.getValue(1);
4980   }
4981 
4982   // Set CR bit 6 to true if this is a vararg call with floating args passed in
4983   // registers.
4984   if (isVarArg) {
4985     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
4986     SDValue Ops[] = { Chain, InFlag };
4987 
4988     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
4989                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
4990 
4991     InFlag = Chain.getValue(1);
4992   }
4993 
4994   if (isTailCall)
4995     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
4996                     TailCallArguments);
4997 
4998   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
4999                     /* unused except on PPC64 ELFv1 */ false, DAG,
5000                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5001                     NumBytes, Ins, InVals, CS);
5002 }
5003 
5004 // Copy an argument into memory, being careful to do this outside the
5005 // call sequence for the call to which the argument belongs.
5006 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5007     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5008     SelectionDAG &DAG, const SDLoc &dl) const {
5009   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5010                         CallSeqStart.getNode()->getOperand(0),
5011                         Flags, DAG, dl);
5012   // The MEMCPY must go outside the CALLSEQ_START..END.
5013   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
5014                              CallSeqStart.getNode()->getOperand(1),
5015                              SDLoc(MemcpyCall));
5016   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5017                          NewCallSeqStart.getNode());
5018   return NewCallSeqStart;
5019 }
5020 
5021 SDValue PPCTargetLowering::LowerCall_64SVR4(
5022     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5023     bool isTailCall, bool isPatchPoint,
5024     const SmallVectorImpl<ISD::OutputArg> &Outs,
5025     const SmallVectorImpl<SDValue> &OutVals,
5026     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5027     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5028     ImmutableCallSite *CS) const {
5029 
5030   bool isELFv2ABI = Subtarget.isELFv2ABI();
5031   bool isLittleEndian = Subtarget.isLittleEndian();
5032   unsigned NumOps = Outs.size();
5033   bool hasNest = false;
5034   bool IsSibCall = false;
5035 
5036   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5037   unsigned PtrByteSize = 8;
5038 
5039   MachineFunction &MF = DAG.getMachineFunction();
5040 
5041   if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5042     IsSibCall = true;
5043 
5044   // Mark this function as potentially containing a function that contains a
5045   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5046   // and restoring the callers stack pointer in this functions epilog. This is
5047   // done because by tail calling the called function might overwrite the value
5048   // in this function's (MF) stack pointer stack slot 0(SP).
5049   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5050       CallConv == CallingConv::Fast)
5051     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5052 
5053   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5054          "fastcc not supported on varargs functions");
5055 
5056   // Count how many bytes are to be pushed on the stack, including the linkage
5057   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5058   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5059   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5060   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5061   unsigned NumBytes = LinkageSize;
5062   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5063   unsigned &QFPR_idx = FPR_idx;
5064 
5065   static const MCPhysReg GPR[] = {
5066     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5067     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5068   };
5069   static const MCPhysReg VR[] = {
5070     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5071     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5072   };
5073 
5074   const unsigned NumGPRs = array_lengthof(GPR);
5075   const unsigned NumFPRs = 13;
5076   const unsigned NumVRs  = array_lengthof(VR);
5077   const unsigned NumQFPRs = NumFPRs;
5078 
5079   // When using the fast calling convention, we don't provide backing for
5080   // arguments that will be in registers.
5081   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5082 
5083   // Add up all the space actually used.
5084   for (unsigned i = 0; i != NumOps; ++i) {
5085     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5086     EVT ArgVT = Outs[i].VT;
5087     EVT OrigVT = Outs[i].ArgVT;
5088 
5089     if (Flags.isNest())
5090       continue;
5091 
5092     if (CallConv == CallingConv::Fast) {
5093       if (Flags.isByVal())
5094         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5095       else
5096         switch (ArgVT.getSimpleVT().SimpleTy) {
5097         default: llvm_unreachable("Unexpected ValueType for argument!");
5098         case MVT::i1:
5099         case MVT::i32:
5100         case MVT::i64:
5101           if (++NumGPRsUsed <= NumGPRs)
5102             continue;
5103           break;
5104         case MVT::v4i32:
5105         case MVT::v8i16:
5106         case MVT::v16i8:
5107         case MVT::v2f64:
5108         case MVT::v2i64:
5109         case MVT::v1i128:
5110           if (++NumVRsUsed <= NumVRs)
5111             continue;
5112           break;
5113         case MVT::v4f32:
5114           // When using QPX, this is handled like a FP register, otherwise, it
5115           // is an Altivec register.
5116           if (Subtarget.hasQPX()) {
5117             if (++NumFPRsUsed <= NumFPRs)
5118               continue;
5119           } else {
5120             if (++NumVRsUsed <= NumVRs)
5121               continue;
5122           }
5123           break;
5124         case MVT::f32:
5125         case MVT::f64:
5126         case MVT::v4f64: // QPX
5127         case MVT::v4i1:  // QPX
5128           if (++NumFPRsUsed <= NumFPRs)
5129             continue;
5130           break;
5131         }
5132     }
5133 
5134     /* Respect alignment of argument on the stack.  */
5135     unsigned Align =
5136       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5137     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5138 
5139     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5140     if (Flags.isInConsecutiveRegsLast())
5141       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5142   }
5143 
5144   unsigned NumBytesActuallyUsed = NumBytes;
5145 
5146   // The prolog code of the callee may store up to 8 GPR argument registers to
5147   // the stack, allowing va_start to index over them in memory if its varargs.
5148   // Because we cannot tell if this is needed on the caller side, we have to
5149   // conservatively assume that it is needed.  As such, make sure we have at
5150   // least enough stack space for the caller to store the 8 GPRs.
5151   // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
5152   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5153 
5154   // Tail call needs the stack to be aligned.
5155   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5156       CallConv == CallingConv::Fast)
5157     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5158 
5159   int SPDiff = 0;
5160 
5161   // Calculate by how many bytes the stack has to be adjusted in case of tail
5162   // call optimization.
5163   if (!IsSibCall)
5164     SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5165 
5166   // To protect arguments on the stack from being clobbered in a tail call,
5167   // force all the loads to happen before doing any other lowering.
5168   if (isTailCall)
5169     Chain = DAG.getStackArgumentTokenFactor(Chain);
5170 
5171   // Adjust the stack pointer for the new arguments...
5172   // These operations are automatically eliminated by the prolog/epilog pass
5173   if (!IsSibCall)
5174     Chain = DAG.getCALLSEQ_START(Chain,
5175                                  DAG.getIntPtrConstant(NumBytes, dl, true), dl);
5176   SDValue CallSeqStart = Chain;
5177 
5178   // Load the return address and frame pointer so it can be move somewhere else
5179   // later.
5180   SDValue LROp, FPOp;
5181   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5182 
5183   // Set up a copy of the stack pointer for use loading and storing any
5184   // arguments that may not fit in the registers available for argument
5185   // passing.
5186   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5187 
5188   // Figure out which arguments are going to go in registers, and which in
5189   // memory.  Also, if this is a vararg function, floating point operations
5190   // must be stored to our stack, and loaded into integer regs as well, if
5191   // any integer regs are available for argument passing.
5192   unsigned ArgOffset = LinkageSize;
5193 
5194   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5195   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5196 
5197   SmallVector<SDValue, 8> MemOpChains;
5198   for (unsigned i = 0; i != NumOps; ++i) {
5199     SDValue Arg = OutVals[i];
5200     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5201     EVT ArgVT = Outs[i].VT;
5202     EVT OrigVT = Outs[i].ArgVT;
5203 
5204     // PtrOff will be used to store the current argument to the stack if a
5205     // register cannot be found for it.
5206     SDValue PtrOff;
5207 
5208     // We re-align the argument offset for each argument, except when using the
5209     // fast calling convention, when we need to make sure we do that only when
5210     // we'll actually use a stack slot.
5211     auto ComputePtrOff = [&]() {
5212       /* Respect alignment of argument on the stack.  */
5213       unsigned Align =
5214         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5215       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5216 
5217       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5218 
5219       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5220     };
5221 
5222     if (CallConv != CallingConv::Fast) {
5223       ComputePtrOff();
5224 
5225       /* Compute GPR index associated with argument offset.  */
5226       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5227       GPR_idx = std::min(GPR_idx, NumGPRs);
5228     }
5229 
5230     // Promote integers to 64-bit values.
5231     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5232       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5233       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5234       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5235     }
5236 
5237     // FIXME memcpy is used way more than necessary.  Correctness first.
5238     // Note: "by value" is code for passing a structure by value, not
5239     // basic types.
5240     if (Flags.isByVal()) {
5241       // Note: Size includes alignment padding, so
5242       //   struct x { short a; char b; }
5243       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5244       // These are the proper values we need for right-justifying the
5245       // aggregate in a parameter register.
5246       unsigned Size = Flags.getByValSize();
5247 
5248       // An empty aggregate parameter takes up no storage and no
5249       // registers.
5250       if (Size == 0)
5251         continue;
5252 
5253       if (CallConv == CallingConv::Fast)
5254         ComputePtrOff();
5255 
5256       // All aggregates smaller than 8 bytes must be passed right-justified.
5257       if (Size==1 || Size==2 || Size==4) {
5258         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5259         if (GPR_idx != NumGPRs) {
5260           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5261                                         MachinePointerInfo(), VT);
5262           MemOpChains.push_back(Load.getValue(1));
5263           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5264 
5265           ArgOffset += PtrByteSize;
5266           continue;
5267         }
5268       }
5269 
5270       if (GPR_idx == NumGPRs && Size < 8) {
5271         SDValue AddPtr = PtrOff;
5272         if (!isLittleEndian) {
5273           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5274                                           PtrOff.getValueType());
5275           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5276         }
5277         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5278                                                           CallSeqStart,
5279                                                           Flags, DAG, dl);
5280         ArgOffset += PtrByteSize;
5281         continue;
5282       }
5283       // Copy entire object into memory.  There are cases where gcc-generated
5284       // code assumes it is there, even if it could be put entirely into
5285       // registers.  (This is not what the doc says.)
5286 
5287       // FIXME: The above statement is likely due to a misunderstanding of the
5288       // documents.  All arguments must be copied into the parameter area BY
5289       // THE CALLEE in the event that the callee takes the address of any
5290       // formal argument.  That has not yet been implemented.  However, it is
5291       // reasonable to use the stack area as a staging area for the register
5292       // load.
5293 
5294       // Skip this for small aggregates, as we will use the same slot for a
5295       // right-justified copy, below.
5296       if (Size >= 8)
5297         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5298                                                           CallSeqStart,
5299                                                           Flags, DAG, dl);
5300 
5301       // When a register is available, pass a small aggregate right-justified.
5302       if (Size < 8 && GPR_idx != NumGPRs) {
5303         // The easiest way to get this right-justified in a register
5304         // is to copy the structure into the rightmost portion of a
5305         // local variable slot, then load the whole slot into the
5306         // register.
5307         // FIXME: The memcpy seems to produce pretty awful code for
5308         // small aggregates, particularly for packed ones.
5309         // FIXME: It would be preferable to use the slot in the
5310         // parameter save area instead of a new local variable.
5311         SDValue AddPtr = PtrOff;
5312         if (!isLittleEndian) {
5313           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5314           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5315         }
5316         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5317                                                           CallSeqStart,
5318                                                           Flags, DAG, dl);
5319 
5320         // Load the slot into the register.
5321         SDValue Load =
5322             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5323         MemOpChains.push_back(Load.getValue(1));
5324         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5325 
5326         // Done with this argument.
5327         ArgOffset += PtrByteSize;
5328         continue;
5329       }
5330 
5331       // For aggregates larger than PtrByteSize, copy the pieces of the
5332       // object that fit into registers from the parameter save area.
5333       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5334         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5335         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5336         if (GPR_idx != NumGPRs) {
5337           SDValue Load =
5338               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5339           MemOpChains.push_back(Load.getValue(1));
5340           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5341           ArgOffset += PtrByteSize;
5342         } else {
5343           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5344           break;
5345         }
5346       }
5347       continue;
5348     }
5349 
5350     switch (Arg.getSimpleValueType().SimpleTy) {
5351     default: llvm_unreachable("Unexpected ValueType for argument!");
5352     case MVT::i1:
5353     case MVT::i32:
5354     case MVT::i64:
5355       if (Flags.isNest()) {
5356         // The 'nest' parameter, if any, is passed in R11.
5357         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
5358         hasNest = true;
5359         break;
5360       }
5361 
5362       // These can be scalar arguments or elements of an integer array type
5363       // passed directly.  Clang may use those instead of "byval" aggregate
5364       // types to avoid forcing arguments to memory unnecessarily.
5365       if (GPR_idx != NumGPRs) {
5366         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
5367       } else {
5368         if (CallConv == CallingConv::Fast)
5369           ComputePtrOff();
5370 
5371         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5372                          true, isTailCall, false, MemOpChains,
5373                          TailCallArguments, dl);
5374         if (CallConv == CallingConv::Fast)
5375           ArgOffset += PtrByteSize;
5376       }
5377       if (CallConv != CallingConv::Fast)
5378         ArgOffset += PtrByteSize;
5379       break;
5380     case MVT::f32:
5381     case MVT::f64: {
5382       // These can be scalar arguments or elements of a float array type
5383       // passed directly.  The latter are used to implement ELFv2 homogenous
5384       // float aggregates.
5385 
5386       // Named arguments go into FPRs first, and once they overflow, the
5387       // remaining arguments go into GPRs and then the parameter save area.
5388       // Unnamed arguments for vararg functions always go to GPRs and
5389       // then the parameter save area.  For now, put all arguments to vararg
5390       // routines always in both locations (FPR *and* GPR or stack slot).
5391       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5392       bool NeededLoad = false;
5393 
5394       // First load the argument into the next available FPR.
5395       if (FPR_idx != NumFPRs)
5396         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
5397 
5398       // Next, load the argument into GPR or stack slot if needed.
5399       if (!NeedGPROrStack)
5400         ;
5401       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
5402         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
5403         // once we support fp <-> gpr moves.
5404 
5405         // In the non-vararg case, this can only ever happen in the
5406         // presence of f32 array types, since otherwise we never run
5407         // out of FPRs before running out of GPRs.
5408         SDValue ArgVal;
5409 
5410         // Double values are always passed in a single GPR.
5411         if (Arg.getValueType() != MVT::f32) {
5412           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
5413 
5414         // Non-array float values are extended and passed in a GPR.
5415         } else if (!Flags.isInConsecutiveRegs()) {
5416           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5417           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5418 
5419         // If we have an array of floats, we collect every odd element
5420         // together with its predecessor into one GPR.
5421         } else if (ArgOffset % PtrByteSize != 0) {
5422           SDValue Lo, Hi;
5423           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
5424           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5425           if (!isLittleEndian)
5426             std::swap(Lo, Hi);
5427           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5428 
5429         // The final element, if even, goes into the first half of a GPR.
5430         } else if (Flags.isInConsecutiveRegsLast()) {
5431           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5432           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5433           if (!isLittleEndian)
5434             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
5435                                  DAG.getConstant(32, dl, MVT::i32));
5436 
5437         // Non-final even elements are skipped; they will be handled
5438         // together the with subsequent argument on the next go-around.
5439         } else
5440           ArgVal = SDValue();
5441 
5442         if (ArgVal.getNode())
5443           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
5444       } else {
5445         if (CallConv == CallingConv::Fast)
5446           ComputePtrOff();
5447 
5448         // Single-precision floating-point values are mapped to the
5449         // second (rightmost) word of the stack doubleword.
5450         if (Arg.getValueType() == MVT::f32 &&
5451             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
5452           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
5453           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
5454         }
5455 
5456         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5457                          true, isTailCall, false, MemOpChains,
5458                          TailCallArguments, dl);
5459 
5460         NeededLoad = true;
5461       }
5462       // When passing an array of floats, the array occupies consecutive
5463       // space in the argument area; only round up to the next doubleword
5464       // at the end of the array.  Otherwise, each float takes 8 bytes.
5465       if (CallConv != CallingConv::Fast || NeededLoad) {
5466         ArgOffset += (Arg.getValueType() == MVT::f32 &&
5467                       Flags.isInConsecutiveRegs()) ? 4 : 8;
5468         if (Flags.isInConsecutiveRegsLast())
5469           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5470       }
5471       break;
5472     }
5473     case MVT::v4f32:
5474     case MVT::v4i32:
5475     case MVT::v8i16:
5476     case MVT::v16i8:
5477     case MVT::v2f64:
5478     case MVT::v2i64:
5479     case MVT::v1i128:
5480       if (!Subtarget.hasQPX()) {
5481       // These can be scalar arguments or elements of a vector array type
5482       // passed directly.  The latter are used to implement ELFv2 homogenous
5483       // vector aggregates.
5484 
5485       // For a varargs call, named arguments go into VRs or on the stack as
5486       // usual; unnamed arguments always go to the stack or the corresponding
5487       // GPRs when within range.  For now, we always put the value in both
5488       // locations (or even all three).
5489       if (isVarArg) {
5490         // We could elide this store in the case where the object fits
5491         // entirely in R registers.  Maybe later.
5492         SDValue Store =
5493             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5494         MemOpChains.push_back(Store);
5495         if (VR_idx != NumVRs) {
5496           SDValue Load =
5497               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
5498           MemOpChains.push_back(Load.getValue(1));
5499           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
5500         }
5501         ArgOffset += 16;
5502         for (unsigned i=0; i<16; i+=PtrByteSize) {
5503           if (GPR_idx == NumGPRs)
5504             break;
5505           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
5506                                    DAG.getConstant(i, dl, PtrVT));
5507           SDValue Load =
5508               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
5509           MemOpChains.push_back(Load.getValue(1));
5510           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5511         }
5512         break;
5513       }
5514 
5515       // Non-varargs Altivec params go into VRs or on the stack.
5516       if (VR_idx != NumVRs) {
5517         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
5518       } else {
5519         if (CallConv == CallingConv::Fast)
5520           ComputePtrOff();
5521 
5522         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5523                          true, isTailCall, true, MemOpChains,
5524                          TailCallArguments, dl);
5525         if (CallConv == CallingConv::Fast)
5526           ArgOffset += 16;
5527       }
5528 
5529       if (CallConv != CallingConv::Fast)
5530         ArgOffset += 16;
5531       break;
5532       } // not QPX
5533 
5534       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
5535              "Invalid QPX parameter type");
5536 
5537       /* fall through */
5538     case MVT::v4f64:
5539     case MVT::v4i1: {
5540       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
5541       if (isVarArg) {
5542         // We could elide this store in the case where the object fits
5543         // entirely in R registers.  Maybe later.
5544         SDValue Store =
5545             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5546         MemOpChains.push_back(Store);
5547         if (QFPR_idx != NumQFPRs) {
5548           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
5549                                      PtrOff, MachinePointerInfo());
5550           MemOpChains.push_back(Load.getValue(1));
5551           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
5552         }
5553         ArgOffset += (IsF32 ? 16 : 32);
5554         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
5555           if (GPR_idx == NumGPRs)
5556             break;
5557           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
5558                                    DAG.getConstant(i, dl, PtrVT));
5559           SDValue Load =
5560               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
5561           MemOpChains.push_back(Load.getValue(1));
5562           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5563         }
5564         break;
5565       }
5566 
5567       // Non-varargs QPX params go into registers or on the stack.
5568       if (QFPR_idx != NumQFPRs) {
5569         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
5570       } else {
5571         if (CallConv == CallingConv::Fast)
5572           ComputePtrOff();
5573 
5574         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5575                          true, isTailCall, true, MemOpChains,
5576                          TailCallArguments, dl);
5577         if (CallConv == CallingConv::Fast)
5578           ArgOffset += (IsF32 ? 16 : 32);
5579       }
5580 
5581       if (CallConv != CallingConv::Fast)
5582         ArgOffset += (IsF32 ? 16 : 32);
5583       break;
5584       }
5585     }
5586   }
5587 
5588   assert(NumBytesActuallyUsed == ArgOffset);
5589   (void)NumBytesActuallyUsed;
5590 
5591   if (!MemOpChains.empty())
5592     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5593 
5594   // Check if this is an indirect call (MTCTR/BCTRL).
5595   // See PrepareCall() for more information about calls through function
5596   // pointers in the 64-bit SVR4 ABI.
5597   if (!isTailCall && !isPatchPoint &&
5598       !isFunctionGlobalAddress(Callee) &&
5599       !isa<ExternalSymbolSDNode>(Callee)) {
5600     // Load r2 into a virtual register and store it to the TOC save area.
5601     setUsesTOCBasePtr(DAG);
5602     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
5603     // TOC save area offset.
5604     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5605     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5606     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5607     Chain = DAG.getStore(
5608         Val.getValue(1), dl, Val, AddPtr,
5609         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
5610     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
5611     // This does not mean the MTCTR instruction must use R12; it's easier
5612     // to model this as an extra parameter, so do that.
5613     if (isELFv2ABI && !isPatchPoint)
5614       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
5615   }
5616 
5617   // Build a sequence of copy-to-reg nodes chained together with token chain
5618   // and flag operands which copy the outgoing args into the appropriate regs.
5619   SDValue InFlag;
5620   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5621     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5622                              RegsToPass[i].second, InFlag);
5623     InFlag = Chain.getValue(1);
5624   }
5625 
5626   if (isTailCall && !IsSibCall)
5627     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5628                     TailCallArguments);
5629 
5630   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
5631                     DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
5632                     SPDiff, NumBytes, Ins, InVals, CS);
5633 }
5634 
5635 SDValue PPCTargetLowering::LowerCall_Darwin(
5636     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5637     bool isTailCall, bool isPatchPoint,
5638     const SmallVectorImpl<ISD::OutputArg> &Outs,
5639     const SmallVectorImpl<SDValue> &OutVals,
5640     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5641     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5642     ImmutableCallSite *CS) const {
5643 
5644   unsigned NumOps = Outs.size();
5645 
5646   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5647   bool isPPC64 = PtrVT == MVT::i64;
5648   unsigned PtrByteSize = isPPC64 ? 8 : 4;
5649 
5650   MachineFunction &MF = DAG.getMachineFunction();
5651 
5652   // Mark this function as potentially containing a function that contains a
5653   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5654   // and restoring the callers stack pointer in this functions epilog. This is
5655   // done because by tail calling the called function might overwrite the value
5656   // in this function's (MF) stack pointer stack slot 0(SP).
5657   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5658       CallConv == CallingConv::Fast)
5659     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5660 
5661   // Count how many bytes are to be pushed on the stack, including the linkage
5662   // area, and parameter passing area.  We start with 24/48 bytes, which is
5663   // prereserved space for [SP][CR][LR][3 x unused].
5664   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5665   unsigned NumBytes = LinkageSize;
5666 
5667   // Add up all the space actually used.
5668   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
5669   // they all go in registers, but we must reserve stack space for them for
5670   // possible use by the caller.  In varargs or 64-bit calls, parameters are
5671   // assigned stack space in order, with padding so Altivec parameters are
5672   // 16-byte aligned.
5673   unsigned nAltivecParamsAtEnd = 0;
5674   for (unsigned i = 0; i != NumOps; ++i) {
5675     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5676     EVT ArgVT = Outs[i].VT;
5677     // Varargs Altivec parameters are padded to a 16 byte boundary.
5678     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
5679         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
5680         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
5681       if (!isVarArg && !isPPC64) {
5682         // Non-varargs Altivec parameters go after all the non-Altivec
5683         // parameters; handle those later so we know how much padding we need.
5684         nAltivecParamsAtEnd++;
5685         continue;
5686       }
5687       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
5688       NumBytes = ((NumBytes+15)/16)*16;
5689     }
5690     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5691   }
5692 
5693   // Allow for Altivec parameters at the end, if needed.
5694   if (nAltivecParamsAtEnd) {
5695     NumBytes = ((NumBytes+15)/16)*16;
5696     NumBytes += 16*nAltivecParamsAtEnd;
5697   }
5698 
5699   // The prolog code of the callee may store up to 8 GPR argument registers to
5700   // the stack, allowing va_start to index over them in memory if its varargs.
5701   // Because we cannot tell if this is needed on the caller side, we have to
5702   // conservatively assume that it is needed.  As such, make sure we have at
5703   // least enough stack space for the caller to store the 8 GPRs.
5704   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5705 
5706   // Tail call needs the stack to be aligned.
5707   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5708       CallConv == CallingConv::Fast)
5709     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5710 
5711   // Calculate by how many bytes the stack has to be adjusted in case of tail
5712   // call optimization.
5713   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5714 
5715   // To protect arguments on the stack from being clobbered in a tail call,
5716   // force all the loads to happen before doing any other lowering.
5717   if (isTailCall)
5718     Chain = DAG.getStackArgumentTokenFactor(Chain);
5719 
5720   // Adjust the stack pointer for the new arguments...
5721   // These operations are automatically eliminated by the prolog/epilog pass
5722   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5723                                dl);
5724   SDValue CallSeqStart = Chain;
5725 
5726   // Load the return address and frame pointer so it can be move somewhere else
5727   // later.
5728   SDValue LROp, FPOp;
5729   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5730 
5731   // Set up a copy of the stack pointer for use loading and storing any
5732   // arguments that may not fit in the registers available for argument
5733   // passing.
5734   SDValue StackPtr;
5735   if (isPPC64)
5736     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5737   else
5738     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5739 
5740   // Figure out which arguments are going to go in registers, and which in
5741   // memory.  Also, if this is a vararg function, floating point operations
5742   // must be stored to our stack, and loaded into integer regs as well, if
5743   // any integer regs are available for argument passing.
5744   unsigned ArgOffset = LinkageSize;
5745   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5746 
5747   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
5748     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
5749     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
5750   };
5751   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
5752     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5753     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5754   };
5755   static const MCPhysReg VR[] = {
5756     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5757     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5758   };
5759   const unsigned NumGPRs = array_lengthof(GPR_32);
5760   const unsigned NumFPRs = 13;
5761   const unsigned NumVRs  = array_lengthof(VR);
5762 
5763   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
5764 
5765   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5766   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5767 
5768   SmallVector<SDValue, 8> MemOpChains;
5769   for (unsigned i = 0; i != NumOps; ++i) {
5770     SDValue Arg = OutVals[i];
5771     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5772 
5773     // PtrOff will be used to store the current argument to the stack if a
5774     // register cannot be found for it.
5775     SDValue PtrOff;
5776 
5777     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5778 
5779     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5780 
5781     // On PPC64, promote integers to 64-bit values.
5782     if (isPPC64 && Arg.getValueType() == MVT::i32) {
5783       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5784       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5785       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5786     }
5787 
5788     // FIXME memcpy is used way more than necessary.  Correctness first.
5789     // Note: "by value" is code for passing a structure by value, not
5790     // basic types.
5791     if (Flags.isByVal()) {
5792       unsigned Size = Flags.getByValSize();
5793       // Very small objects are passed right-justified.  Everything else is
5794       // passed left-justified.
5795       if (Size==1 || Size==2) {
5796         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
5797         if (GPR_idx != NumGPRs) {
5798           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5799                                         MachinePointerInfo(), VT);
5800           MemOpChains.push_back(Load.getValue(1));
5801           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5802 
5803           ArgOffset += PtrByteSize;
5804         } else {
5805           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5806                                           PtrOff.getValueType());
5807           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5808           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5809                                                             CallSeqStart,
5810                                                             Flags, DAG, dl);
5811           ArgOffset += PtrByteSize;
5812         }
5813         continue;
5814       }
5815       // Copy entire object into memory.  There are cases where gcc-generated
5816       // code assumes it is there, even if it could be put entirely into
5817       // registers.  (This is not what the doc says.)
5818       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5819                                                         CallSeqStart,
5820                                                         Flags, DAG, dl);
5821 
5822       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
5823       // copy the pieces of the object that fit into registers from the
5824       // parameter save area.
5825       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5826         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5827         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5828         if (GPR_idx != NumGPRs) {
5829           SDValue Load =
5830               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5831           MemOpChains.push_back(Load.getValue(1));
5832           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5833           ArgOffset += PtrByteSize;
5834         } else {
5835           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5836           break;
5837         }
5838       }
5839       continue;
5840     }
5841 
5842     switch (Arg.getSimpleValueType().SimpleTy) {
5843     default: llvm_unreachable("Unexpected ValueType for argument!");
5844     case MVT::i1:
5845     case MVT::i32:
5846     case MVT::i64:
5847       if (GPR_idx != NumGPRs) {
5848         if (Arg.getValueType() == MVT::i1)
5849           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
5850 
5851         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
5852       } else {
5853         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5854                          isPPC64, isTailCall, false, MemOpChains,
5855                          TailCallArguments, dl);
5856       }
5857       ArgOffset += PtrByteSize;
5858       break;
5859     case MVT::f32:
5860     case MVT::f64:
5861       if (FPR_idx != NumFPRs) {
5862         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
5863 
5864         if (isVarArg) {
5865           SDValue Store =
5866               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5867           MemOpChains.push_back(Store);
5868 
5869           // Float varargs are always shadowed in available integer registers
5870           if (GPR_idx != NumGPRs) {
5871             SDValue Load =
5872                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
5873             MemOpChains.push_back(Load.getValue(1));
5874             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5875           }
5876           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
5877             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
5878             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
5879             SDValue Load =
5880                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
5881             MemOpChains.push_back(Load.getValue(1));
5882             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5883           }
5884         } else {
5885           // If we have any FPRs remaining, we may also have GPRs remaining.
5886           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
5887           // GPRs.
5888           if (GPR_idx != NumGPRs)
5889             ++GPR_idx;
5890           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
5891               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
5892             ++GPR_idx;
5893         }
5894       } else
5895         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5896                          isPPC64, isTailCall, false, MemOpChains,
5897                          TailCallArguments, dl);
5898       if (isPPC64)
5899         ArgOffset += 8;
5900       else
5901         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
5902       break;
5903     case MVT::v4f32:
5904     case MVT::v4i32:
5905     case MVT::v8i16:
5906     case MVT::v16i8:
5907       if (isVarArg) {
5908         // These go aligned on the stack, or in the corresponding R registers
5909         // when within range.  The Darwin PPC ABI doc claims they also go in
5910         // V registers; in fact gcc does this only for arguments that are
5911         // prototyped, not for those that match the ...  We do it for all
5912         // arguments, seems to work.
5913         while (ArgOffset % 16 !=0) {
5914           ArgOffset += PtrByteSize;
5915           if (GPR_idx != NumGPRs)
5916             GPR_idx++;
5917         }
5918         // We could elide this store in the case where the object fits
5919         // entirely in R registers.  Maybe later.
5920         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5921                              DAG.getConstant(ArgOffset, dl, PtrVT));
5922         SDValue Store =
5923             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
5924         MemOpChains.push_back(Store);
5925         if (VR_idx != NumVRs) {
5926           SDValue Load =
5927               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
5928           MemOpChains.push_back(Load.getValue(1));
5929           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
5930         }
5931         ArgOffset += 16;
5932         for (unsigned i=0; i<16; i+=PtrByteSize) {
5933           if (GPR_idx == NumGPRs)
5934             break;
5935           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
5936                                    DAG.getConstant(i, dl, PtrVT));
5937           SDValue Load =
5938               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
5939           MemOpChains.push_back(Load.getValue(1));
5940           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5941         }
5942         break;
5943       }
5944 
5945       // Non-varargs Altivec params generally go in registers, but have
5946       // stack space allocated at the end.
5947       if (VR_idx != NumVRs) {
5948         // Doesn't have GPR space allocated.
5949         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
5950       } else if (nAltivecParamsAtEnd==0) {
5951         // We are emitting Altivec params in order.
5952         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5953                          isPPC64, isTailCall, true, MemOpChains,
5954                          TailCallArguments, dl);
5955         ArgOffset += 16;
5956       }
5957       break;
5958     }
5959   }
5960   // If all Altivec parameters fit in registers, as they usually do,
5961   // they get stack space following the non-Altivec parameters.  We
5962   // don't track this here because nobody below needs it.
5963   // If there are more Altivec parameters than fit in registers emit
5964   // the stores here.
5965   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
5966     unsigned j = 0;
5967     // Offset is aligned; skip 1st 12 params which go in V registers.
5968     ArgOffset = ((ArgOffset+15)/16)*16;
5969     ArgOffset += 12*16;
5970     for (unsigned i = 0; i != NumOps; ++i) {
5971       SDValue Arg = OutVals[i];
5972       EVT ArgType = Outs[i].VT;
5973       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
5974           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
5975         if (++j > NumVRs) {
5976           SDValue PtrOff;
5977           // We are emitting Altivec params in order.
5978           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5979                            isPPC64, isTailCall, true, MemOpChains,
5980                            TailCallArguments, dl);
5981           ArgOffset += 16;
5982         }
5983       }
5984     }
5985   }
5986 
5987   if (!MemOpChains.empty())
5988     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5989 
5990   // On Darwin, R12 must contain the address of an indirect callee.  This does
5991   // not mean the MTCTR instruction must use R12; it's easier to model this as
5992   // an extra parameter, so do that.
5993   if (!isTailCall &&
5994       !isFunctionGlobalAddress(Callee) &&
5995       !isa<ExternalSymbolSDNode>(Callee) &&
5996       !isBLACompatibleAddress(Callee, DAG))
5997     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
5998                                                    PPC::R12), Callee));
5999 
6000   // Build a sequence of copy-to-reg nodes chained together with token chain
6001   // and flag operands which copy the outgoing args into the appropriate regs.
6002   SDValue InFlag;
6003   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6004     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6005                              RegsToPass[i].second, InFlag);
6006     InFlag = Chain.getValue(1);
6007   }
6008 
6009   if (isTailCall)
6010     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6011                     TailCallArguments);
6012 
6013   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6014                     /* unused except on PPC64 ELFv1 */ false, DAG,
6015                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6016                     NumBytes, Ins, InVals, CS);
6017 }
6018 
6019 bool
6020 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
6021                                   MachineFunction &MF, bool isVarArg,
6022                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
6023                                   LLVMContext &Context) const {
6024   SmallVector<CCValAssign, 16> RVLocs;
6025   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6026   return CCInfo.CheckReturn(Outs, RetCC_PPC);
6027 }
6028 
6029 SDValue
6030 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6031                                bool isVarArg,
6032                                const SmallVectorImpl<ISD::OutputArg> &Outs,
6033                                const SmallVectorImpl<SDValue> &OutVals,
6034                                const SDLoc &dl, SelectionDAG &DAG) const {
6035 
6036   SmallVector<CCValAssign, 16> RVLocs;
6037   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6038                  *DAG.getContext());
6039   CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
6040 
6041   SDValue Flag;
6042   SmallVector<SDValue, 4> RetOps(1, Chain);
6043 
6044   // Copy the result values into the output registers.
6045   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6046     CCValAssign &VA = RVLocs[i];
6047     assert(VA.isRegLoc() && "Can only return in registers!");
6048 
6049     SDValue Arg = OutVals[i];
6050 
6051     switch (VA.getLocInfo()) {
6052     default: llvm_unreachable("Unknown loc info!");
6053     case CCValAssign::Full: break;
6054     case CCValAssign::AExt:
6055       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
6056       break;
6057     case CCValAssign::ZExt:
6058       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6059       break;
6060     case CCValAssign::SExt:
6061       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6062       break;
6063     }
6064 
6065     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
6066     Flag = Chain.getValue(1);
6067     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6068   }
6069 
6070   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
6071   const MCPhysReg *I =
6072     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
6073   if (I) {
6074     for (; *I; ++I) {
6075 
6076       if (PPC::G8RCRegClass.contains(*I))
6077         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
6078       else if (PPC::F8RCRegClass.contains(*I))
6079         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
6080       else if (PPC::CRRCRegClass.contains(*I))
6081         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
6082       else if (PPC::VRRCRegClass.contains(*I))
6083         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
6084       else
6085         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
6086     }
6087   }
6088 
6089   RetOps[0] = Chain;  // Update chain.
6090 
6091   // Add the flag if we have it.
6092   if (Flag.getNode())
6093     RetOps.push_back(Flag);
6094 
6095   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
6096 }
6097 
6098 SDValue
6099 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
6100                                                 SelectionDAG &DAG) const {
6101   SDLoc dl(Op);
6102 
6103   // Get the corect type for integers.
6104   EVT IntVT = Op.getValueType();
6105 
6106   // Get the inputs.
6107   SDValue Chain = Op.getOperand(0);
6108   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6109   // Build a DYNAREAOFFSET node.
6110   SDValue Ops[2] = {Chain, FPSIdx};
6111   SDVTList VTs = DAG.getVTList(IntVT);
6112   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
6113 }
6114 
6115 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
6116                                              SelectionDAG &DAG) const {
6117   // When we pop the dynamic allocation we need to restore the SP link.
6118   SDLoc dl(Op);
6119 
6120   // Get the corect type for pointers.
6121   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6122 
6123   // Construct the stack pointer operand.
6124   bool isPPC64 = Subtarget.isPPC64();
6125   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6126   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
6127 
6128   // Get the operands for the STACKRESTORE.
6129   SDValue Chain = Op.getOperand(0);
6130   SDValue SaveSP = Op.getOperand(1);
6131 
6132   // Load the old link SP.
6133   SDValue LoadLinkSP =
6134       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
6135 
6136   // Restore the stack pointer.
6137   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
6138 
6139   // Store the old link SP.
6140   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
6141 }
6142 
6143 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
6144   MachineFunction &MF = DAG.getMachineFunction();
6145   bool isPPC64 = Subtarget.isPPC64();
6146   EVT PtrVT = getPointerTy(MF.getDataLayout());
6147 
6148   // Get current frame pointer save index.  The users of this index will be
6149   // primarily DYNALLOC instructions.
6150   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6151   int RASI = FI->getReturnAddrSaveIndex();
6152 
6153   // If the frame pointer save index hasn't been defined yet.
6154   if (!RASI) {
6155     // Find out what the fix offset of the frame pointer save area.
6156     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6157     // Allocate the frame index for frame pointer save area.
6158     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
6159     // Save the result.
6160     FI->setReturnAddrSaveIndex(RASI);
6161   }
6162   return DAG.getFrameIndex(RASI, PtrVT);
6163 }
6164 
6165 SDValue
6166 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
6167   MachineFunction &MF = DAG.getMachineFunction();
6168   bool isPPC64 = Subtarget.isPPC64();
6169   EVT PtrVT = getPointerTy(MF.getDataLayout());
6170 
6171   // Get current frame pointer save index.  The users of this index will be
6172   // primarily DYNALLOC instructions.
6173   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6174   int FPSI = FI->getFramePointerSaveIndex();
6175 
6176   // If the frame pointer save index hasn't been defined yet.
6177   if (!FPSI) {
6178     // Find out what the fix offset of the frame pointer save area.
6179     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6180     // Allocate the frame index for frame pointer save area.
6181     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
6182     // Save the result.
6183     FI->setFramePointerSaveIndex(FPSI);
6184   }
6185   return DAG.getFrameIndex(FPSI, PtrVT);
6186 }
6187 
6188 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
6189                                                    SelectionDAG &DAG) const {
6190   // Get the inputs.
6191   SDValue Chain = Op.getOperand(0);
6192   SDValue Size  = Op.getOperand(1);
6193   SDLoc dl(Op);
6194 
6195   // Get the corect type for pointers.
6196   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6197   // Negate the size.
6198   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
6199                                 DAG.getConstant(0, dl, PtrVT), Size);
6200   // Construct a node for the frame pointer save index.
6201   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6202   // Build a DYNALLOC node.
6203   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6204   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
6205   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
6206 }
6207 
6208 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
6209                                                      SelectionDAG &DAG) const {
6210   MachineFunction &MF = DAG.getMachineFunction();
6211 
6212   bool isPPC64 = Subtarget.isPPC64();
6213   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6214 
6215   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
6216   return DAG.getFrameIndex(FI, PtrVT);
6217 }
6218 
6219 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
6220                                                SelectionDAG &DAG) const {
6221   SDLoc DL(Op);
6222   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
6223                      DAG.getVTList(MVT::i32, MVT::Other),
6224                      Op.getOperand(0), Op.getOperand(1));
6225 }
6226 
6227 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
6228                                                 SelectionDAG &DAG) const {
6229   SDLoc DL(Op);
6230   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
6231                      Op.getOperand(0), Op.getOperand(1));
6232 }
6233 
6234 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6235   if (Op.getValueType().isVector())
6236     return LowerVectorLoad(Op, DAG);
6237 
6238   assert(Op.getValueType() == MVT::i1 &&
6239          "Custom lowering only for i1 loads");
6240 
6241   // First, load 8 bits into 32 bits, then truncate to 1 bit.
6242 
6243   SDLoc dl(Op);
6244   LoadSDNode *LD = cast<LoadSDNode>(Op);
6245 
6246   SDValue Chain = LD->getChain();
6247   SDValue BasePtr = LD->getBasePtr();
6248   MachineMemOperand *MMO = LD->getMemOperand();
6249 
6250   SDValue NewLD =
6251       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
6252                      BasePtr, MVT::i8, MMO);
6253   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
6254 
6255   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
6256   return DAG.getMergeValues(Ops, dl);
6257 }
6258 
6259 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6260   if (Op.getOperand(1).getValueType().isVector())
6261     return LowerVectorStore(Op, DAG);
6262 
6263   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
6264          "Custom lowering only for i1 stores");
6265 
6266   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
6267 
6268   SDLoc dl(Op);
6269   StoreSDNode *ST = cast<StoreSDNode>(Op);
6270 
6271   SDValue Chain = ST->getChain();
6272   SDValue BasePtr = ST->getBasePtr();
6273   SDValue Value = ST->getValue();
6274   MachineMemOperand *MMO = ST->getMemOperand();
6275 
6276   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
6277                       Value);
6278   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
6279 }
6280 
6281 // FIXME: Remove this once the ANDI glue bug is fixed:
6282 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
6283   assert(Op.getValueType() == MVT::i1 &&
6284          "Custom lowering only for i1 results");
6285 
6286   SDLoc DL(Op);
6287   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
6288                      Op.getOperand(0));
6289 }
6290 
6291 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
6292 /// possible.
6293 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
6294   // Not FP? Not a fsel.
6295   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
6296       !Op.getOperand(2).getValueType().isFloatingPoint())
6297     return Op;
6298 
6299   // We might be able to do better than this under some circumstances, but in
6300   // general, fsel-based lowering of select is a finite-math-only optimization.
6301   // For more information, see section F.3 of the 2.06 ISA specification.
6302   if (!DAG.getTarget().Options.NoInfsFPMath ||
6303       !DAG.getTarget().Options.NoNaNsFPMath)
6304     return Op;
6305   // TODO: Propagate flags from the select rather than global settings.
6306   SDNodeFlags Flags;
6307   Flags.setNoInfs(true);
6308   Flags.setNoNaNs(true);
6309 
6310   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
6311 
6312   EVT ResVT = Op.getValueType();
6313   EVT CmpVT = Op.getOperand(0).getValueType();
6314   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
6315   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
6316   SDLoc dl(Op);
6317 
6318   // If the RHS of the comparison is a 0.0, we don't need to do the
6319   // subtraction at all.
6320   SDValue Sel1;
6321   if (isFloatingPointZero(RHS))
6322     switch (CC) {
6323     default: break;       // SETUO etc aren't handled by fsel.
6324     case ISD::SETNE:
6325       std::swap(TV, FV);
6326     case ISD::SETEQ:
6327       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6328         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6329       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6330       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6331         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6332       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6333                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
6334     case ISD::SETULT:
6335     case ISD::SETLT:
6336       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6337     case ISD::SETOGE:
6338     case ISD::SETGE:
6339       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6340         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6341       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6342     case ISD::SETUGT:
6343     case ISD::SETGT:
6344       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6345     case ISD::SETOLE:
6346     case ISD::SETLE:
6347       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6348         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6349       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6350                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
6351     }
6352 
6353   SDValue Cmp;
6354   switch (CC) {
6355   default: break;       // SETUO etc aren't handled by fsel.
6356   case ISD::SETNE:
6357     std::swap(TV, FV);
6358   case ISD::SETEQ:
6359     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags);
6360     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6361       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6362     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6363     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6364       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6365     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6366                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
6367   case ISD::SETULT:
6368   case ISD::SETLT:
6369     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags);
6370     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6371       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6372     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6373   case ISD::SETOGE:
6374   case ISD::SETGE:
6375     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, &Flags);
6376     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6377       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6378     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6379   case ISD::SETUGT:
6380   case ISD::SETGT:
6381     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags);
6382     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6383       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6384     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6385   case ISD::SETOLE:
6386   case ISD::SETLE:
6387     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, &Flags);
6388     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6389       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6390     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6391   }
6392   return Op;
6393 }
6394 
6395 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
6396                                                SelectionDAG &DAG,
6397                                                const SDLoc &dl) const {
6398   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6399   SDValue Src = Op.getOperand(0);
6400   if (Src.getValueType() == MVT::f32)
6401     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6402 
6403   SDValue Tmp;
6404   switch (Op.getSimpleValueType().SimpleTy) {
6405   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6406   case MVT::i32:
6407     Tmp = DAG.getNode(
6408         Op.getOpcode() == ISD::FP_TO_SINT
6409             ? PPCISD::FCTIWZ
6410             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6411         dl, MVT::f64, Src);
6412     break;
6413   case MVT::i64:
6414     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6415            "i64 FP_TO_UINT is supported only with FPCVT");
6416     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6417                                                         PPCISD::FCTIDUZ,
6418                       dl, MVT::f64, Src);
6419     break;
6420   }
6421 
6422   // Convert the FP value to an int value through memory.
6423   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
6424     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
6425   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
6426   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
6427   MachinePointerInfo MPI =
6428       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
6429 
6430   // Emit a store to the stack slot.
6431   SDValue Chain;
6432   if (i32Stack) {
6433     MachineFunction &MF = DAG.getMachineFunction();
6434     MachineMemOperand *MMO =
6435       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
6436     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
6437     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
6438               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
6439   } else
6440     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
6441 
6442   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
6443   // add in a bias on big endian.
6444   if (Op.getValueType() == MVT::i32 && !i32Stack) {
6445     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
6446                         DAG.getConstant(4, dl, FIPtr.getValueType()));
6447     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
6448   }
6449 
6450   RLI.Chain = Chain;
6451   RLI.Ptr = FIPtr;
6452   RLI.MPI = MPI;
6453 }
6454 
6455 /// \brief Custom lowers floating point to integer conversions to use
6456 /// the direct move instructions available in ISA 2.07 to avoid the
6457 /// need for load/store combinations.
6458 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
6459                                                     SelectionDAG &DAG,
6460                                                     const SDLoc &dl) const {
6461   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6462   SDValue Src = Op.getOperand(0);
6463 
6464   if (Src.getValueType() == MVT::f32)
6465     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6466 
6467   SDValue Tmp;
6468   switch (Op.getSimpleValueType().SimpleTy) {
6469   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6470   case MVT::i32:
6471     Tmp = DAG.getNode(
6472         Op.getOpcode() == ISD::FP_TO_SINT
6473             ? PPCISD::FCTIWZ
6474             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6475         dl, MVT::f64, Src);
6476     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
6477     break;
6478   case MVT::i64:
6479     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6480            "i64 FP_TO_UINT is supported only with FPCVT");
6481     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6482                                                         PPCISD::FCTIDUZ,
6483                       dl, MVT::f64, Src);
6484     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
6485     break;
6486   }
6487   return Tmp;
6488 }
6489 
6490 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
6491                                           const SDLoc &dl) const {
6492   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
6493     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
6494 
6495   ReuseLoadInfo RLI;
6496   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6497 
6498   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
6499                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
6500 }
6501 
6502 // We're trying to insert a regular store, S, and then a load, L. If the
6503 // incoming value, O, is a load, we might just be able to have our load use the
6504 // address used by O. However, we don't know if anything else will store to
6505 // that address before we can load from it. To prevent this situation, we need
6506 // to insert our load, L, into the chain as a peer of O. To do this, we give L
6507 // the same chain operand as O, we create a token factor from the chain results
6508 // of O and L, and we replace all uses of O's chain result with that token
6509 // factor (see spliceIntoChain below for this last part).
6510 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
6511                                             ReuseLoadInfo &RLI,
6512                                             SelectionDAG &DAG,
6513                                             ISD::LoadExtType ET) const {
6514   SDLoc dl(Op);
6515   if (ET == ISD::NON_EXTLOAD &&
6516       (Op.getOpcode() == ISD::FP_TO_UINT ||
6517        Op.getOpcode() == ISD::FP_TO_SINT) &&
6518       isOperationLegalOrCustom(Op.getOpcode(),
6519                                Op.getOperand(0).getValueType())) {
6520 
6521     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6522     return true;
6523   }
6524 
6525   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
6526   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
6527       LD->isNonTemporal())
6528     return false;
6529   if (LD->getMemoryVT() != MemVT)
6530     return false;
6531 
6532   RLI.Ptr = LD->getBasePtr();
6533   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
6534     assert(LD->getAddressingMode() == ISD::PRE_INC &&
6535            "Non-pre-inc AM on PPC?");
6536     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
6537                           LD->getOffset());
6538   }
6539 
6540   RLI.Chain = LD->getChain();
6541   RLI.MPI = LD->getPointerInfo();
6542   RLI.IsDereferenceable = LD->isDereferenceable();
6543   RLI.IsInvariant = LD->isInvariant();
6544   RLI.Alignment = LD->getAlignment();
6545   RLI.AAInfo = LD->getAAInfo();
6546   RLI.Ranges = LD->getRanges();
6547 
6548   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
6549   return true;
6550 }
6551 
6552 // Given the head of the old chain, ResChain, insert a token factor containing
6553 // it and NewResChain, and make users of ResChain now be users of that token
6554 // factor.
6555 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
6556                                         SDValue NewResChain,
6557                                         SelectionDAG &DAG) const {
6558   if (!ResChain)
6559     return;
6560 
6561   SDLoc dl(NewResChain);
6562 
6563   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6564                            NewResChain, DAG.getUNDEF(MVT::Other));
6565   assert(TF.getNode() != NewResChain.getNode() &&
6566          "A new TF really is required here");
6567 
6568   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
6569   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
6570 }
6571 
6572 /// \brief Analyze profitability of direct move
6573 /// prefer float load to int load plus direct move
6574 /// when there is no integer use of int load
6575 static bool directMoveIsProfitable(const SDValue &Op) {
6576   SDNode *Origin = Op.getOperand(0).getNode();
6577   if (Origin->getOpcode() != ISD::LOAD)
6578     return true;
6579 
6580   for (SDNode::use_iterator UI = Origin->use_begin(),
6581                             UE = Origin->use_end();
6582        UI != UE; ++UI) {
6583 
6584     // Only look at the users of the loaded value.
6585     if (UI.getUse().get().getResNo() != 0)
6586       continue;
6587 
6588     if (UI->getOpcode() != ISD::SINT_TO_FP &&
6589         UI->getOpcode() != ISD::UINT_TO_FP)
6590       return true;
6591   }
6592 
6593   return false;
6594 }
6595 
6596 /// \brief Custom lowers integer to floating point conversions to use
6597 /// the direct move instructions available in ISA 2.07 to avoid the
6598 /// need for load/store combinations.
6599 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
6600                                                     SelectionDAG &DAG,
6601                                                     const SDLoc &dl) const {
6602   assert((Op.getValueType() == MVT::f32 ||
6603           Op.getValueType() == MVT::f64) &&
6604          "Invalid floating point type as target of conversion");
6605   assert(Subtarget.hasFPCVT() &&
6606          "Int to FP conversions with direct moves require FPCVT");
6607   SDValue FP;
6608   SDValue Src = Op.getOperand(0);
6609   bool SinglePrec = Op.getValueType() == MVT::f32;
6610   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
6611   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
6612   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
6613                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
6614 
6615   if (WordInt) {
6616     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
6617                      dl, MVT::f64, Src);
6618     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
6619   }
6620   else {
6621     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
6622     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
6623   }
6624 
6625   return FP;
6626 }
6627 
6628 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
6629                                           SelectionDAG &DAG) const {
6630   SDLoc dl(Op);
6631 
6632   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
6633     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
6634       return SDValue();
6635 
6636     SDValue Value = Op.getOperand(0);
6637     // The values are now known to be -1 (false) or 1 (true). To convert this
6638     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
6639     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
6640     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
6641 
6642     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
6643 
6644     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
6645 
6646     if (Op.getValueType() != MVT::v4f64)
6647       Value = DAG.getNode(ISD::FP_ROUND, dl,
6648                           Op.getValueType(), Value,
6649                           DAG.getIntPtrConstant(1, dl));
6650     return Value;
6651   }
6652 
6653   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
6654   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
6655     return SDValue();
6656 
6657   if (Op.getOperand(0).getValueType() == MVT::i1)
6658     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
6659                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
6660                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
6661 
6662   // If we have direct moves, we can do all the conversion, skip the store/load
6663   // however, without FPCVT we can't do most conversions.
6664   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
6665       Subtarget.isPPC64() && Subtarget.hasFPCVT())
6666     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
6667 
6668   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
6669          "UINT_TO_FP is supported only with FPCVT");
6670 
6671   // If we have FCFIDS, then use it when converting to single-precision.
6672   // Otherwise, convert to double-precision and then round.
6673   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
6674                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
6675                                                             : PPCISD::FCFIDS)
6676                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
6677                                                             : PPCISD::FCFID);
6678   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
6679                   ? MVT::f32
6680                   : MVT::f64;
6681 
6682   if (Op.getOperand(0).getValueType() == MVT::i64) {
6683     SDValue SINT = Op.getOperand(0);
6684     // When converting to single-precision, we actually need to convert
6685     // to double-precision first and then round to single-precision.
6686     // To avoid double-rounding effects during that operation, we have
6687     // to prepare the input operand.  Bits that might be truncated when
6688     // converting to double-precision are replaced by a bit that won't
6689     // be lost at this stage, but is below the single-precision rounding
6690     // position.
6691     //
6692     // However, if -enable-unsafe-fp-math is in effect, accept double
6693     // rounding to avoid the extra overhead.
6694     if (Op.getValueType() == MVT::f32 &&
6695         !Subtarget.hasFPCVT() &&
6696         !DAG.getTarget().Options.UnsafeFPMath) {
6697 
6698       // Twiddle input to make sure the low 11 bits are zero.  (If this
6699       // is the case, we are guaranteed the value will fit into the 53 bit
6700       // mantissa of an IEEE double-precision value without rounding.)
6701       // If any of those low 11 bits were not zero originally, make sure
6702       // bit 12 (value 2048) is set instead, so that the final rounding
6703       // to single-precision gets the correct result.
6704       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
6705                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
6706       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
6707                           Round, DAG.getConstant(2047, dl, MVT::i64));
6708       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
6709       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
6710                           Round, DAG.getConstant(-2048, dl, MVT::i64));
6711 
6712       // However, we cannot use that value unconditionally: if the magnitude
6713       // of the input value is small, the bit-twiddling we did above might
6714       // end up visibly changing the output.  Fortunately, in that case, we
6715       // don't need to twiddle bits since the original input will convert
6716       // exactly to double-precision floating-point already.  Therefore,
6717       // construct a conditional to use the original value if the top 11
6718       // bits are all sign-bit copies, and use the rounded value computed
6719       // above otherwise.
6720       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
6721                                  SINT, DAG.getConstant(53, dl, MVT::i32));
6722       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
6723                          Cond, DAG.getConstant(1, dl, MVT::i64));
6724       Cond = DAG.getSetCC(dl, MVT::i32,
6725                           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
6726 
6727       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
6728     }
6729 
6730     ReuseLoadInfo RLI;
6731     SDValue Bits;
6732 
6733     MachineFunction &MF = DAG.getMachineFunction();
6734     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
6735       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
6736                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
6737       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
6738     } else if (Subtarget.hasLFIWAX() &&
6739                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
6740       MachineMemOperand *MMO =
6741         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
6742                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6743       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6744       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
6745                                      DAG.getVTList(MVT::f64, MVT::Other),
6746                                      Ops, MVT::i32, MMO);
6747       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
6748     } else if (Subtarget.hasFPCVT() &&
6749                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
6750       MachineMemOperand *MMO =
6751         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
6752                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6753       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6754       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
6755                                      DAG.getVTList(MVT::f64, MVT::Other),
6756                                      Ops, MVT::i32, MMO);
6757       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
6758     } else if (((Subtarget.hasLFIWAX() &&
6759                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
6760                 (Subtarget.hasFPCVT() &&
6761                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
6762                SINT.getOperand(0).getValueType() == MVT::i32) {
6763       MachineFrameInfo &MFI = MF.getFrameInfo();
6764       EVT PtrVT = getPointerTy(DAG.getDataLayout());
6765 
6766       int FrameIdx = MFI.CreateStackObject(4, 4, false);
6767       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
6768 
6769       SDValue Store =
6770           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
6771                        MachinePointerInfo::getFixedStack(
6772                            DAG.getMachineFunction(), FrameIdx));
6773 
6774       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6775              "Expected an i32 store");
6776 
6777       RLI.Ptr = FIdx;
6778       RLI.Chain = Store;
6779       RLI.MPI =
6780           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
6781       RLI.Alignment = 4;
6782 
6783       MachineMemOperand *MMO =
6784         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
6785                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6786       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6787       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
6788                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
6789                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
6790                                      Ops, MVT::i32, MMO);
6791     } else
6792       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
6793 
6794     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
6795 
6796     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
6797       FP = DAG.getNode(ISD::FP_ROUND, dl,
6798                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
6799     return FP;
6800   }
6801 
6802   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
6803          "Unhandled INT_TO_FP type in custom expander!");
6804   // Since we only generate this in 64-bit mode, we can take advantage of
6805   // 64-bit registers.  In particular, sign extend the input value into the
6806   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
6807   // then lfd it and fcfid it.
6808   MachineFunction &MF = DAG.getMachineFunction();
6809   MachineFrameInfo &MFI = MF.getFrameInfo();
6810   EVT PtrVT = getPointerTy(MF.getDataLayout());
6811 
6812   SDValue Ld;
6813   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
6814     ReuseLoadInfo RLI;
6815     bool ReusingLoad;
6816     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
6817                                             DAG))) {
6818       int FrameIdx = MFI.CreateStackObject(4, 4, false);
6819       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
6820 
6821       SDValue Store =
6822           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
6823                        MachinePointerInfo::getFixedStack(
6824                            DAG.getMachineFunction(), FrameIdx));
6825 
6826       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6827              "Expected an i32 store");
6828 
6829       RLI.Ptr = FIdx;
6830       RLI.Chain = Store;
6831       RLI.MPI =
6832           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
6833       RLI.Alignment = 4;
6834     }
6835 
6836     MachineMemOperand *MMO =
6837       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
6838                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6839     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6840     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
6841                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
6842                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
6843                                  Ops, MVT::i32, MMO);
6844     if (ReusingLoad)
6845       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
6846   } else {
6847     assert(Subtarget.isPPC64() &&
6848            "i32->FP without LFIWAX supported only on PPC64");
6849 
6850     int FrameIdx = MFI.CreateStackObject(8, 8, false);
6851     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
6852 
6853     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
6854                                 Op.getOperand(0));
6855 
6856     // STD the extended value into the stack slot.
6857     SDValue Store = DAG.getStore(
6858         DAG.getEntryNode(), dl, Ext64, FIdx,
6859         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
6860 
6861     // Load the value as a double.
6862     Ld = DAG.getLoad(
6863         MVT::f64, dl, Store, FIdx,
6864         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
6865   }
6866 
6867   // FCFID it and return it.
6868   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
6869   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
6870     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
6871                      DAG.getIntPtrConstant(0, dl));
6872   return FP;
6873 }
6874 
6875 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
6876                                             SelectionDAG &DAG) const {
6877   SDLoc dl(Op);
6878   /*
6879    The rounding mode is in bits 30:31 of FPSR, and has the following
6880    settings:
6881      00 Round to nearest
6882      01 Round to 0
6883      10 Round to +inf
6884      11 Round to -inf
6885 
6886   FLT_ROUNDS, on the other hand, expects the following:
6887     -1 Undefined
6888      0 Round to 0
6889      1 Round to nearest
6890      2 Round to +inf
6891      3 Round to -inf
6892 
6893   To perform the conversion, we do:
6894     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
6895   */
6896 
6897   MachineFunction &MF = DAG.getMachineFunction();
6898   EVT VT = Op.getValueType();
6899   EVT PtrVT = getPointerTy(MF.getDataLayout());
6900 
6901   // Save FP Control Word to register
6902   EVT NodeTys[] = {
6903     MVT::f64,    // return register
6904     MVT::Glue    // unused in this context
6905   };
6906   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
6907 
6908   // Save FP register to stack slot
6909   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
6910   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
6911   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
6912                                MachinePointerInfo());
6913 
6914   // Load FP Control Word from low 32 bits of stack slot.
6915   SDValue Four = DAG.getConstant(4, dl, PtrVT);
6916   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
6917   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
6918 
6919   // Transform as necessary
6920   SDValue CWD1 =
6921     DAG.getNode(ISD::AND, dl, MVT::i32,
6922                 CWD, DAG.getConstant(3, dl, MVT::i32));
6923   SDValue CWD2 =
6924     DAG.getNode(ISD::SRL, dl, MVT::i32,
6925                 DAG.getNode(ISD::AND, dl, MVT::i32,
6926                             DAG.getNode(ISD::XOR, dl, MVT::i32,
6927                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
6928                             DAG.getConstant(3, dl, MVT::i32)),
6929                 DAG.getConstant(1, dl, MVT::i32));
6930 
6931   SDValue RetVal =
6932     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
6933 
6934   return DAG.getNode((VT.getSizeInBits() < 16 ?
6935                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
6936 }
6937 
6938 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
6939   EVT VT = Op.getValueType();
6940   unsigned BitWidth = VT.getSizeInBits();
6941   SDLoc dl(Op);
6942   assert(Op.getNumOperands() == 3 &&
6943          VT == Op.getOperand(1).getValueType() &&
6944          "Unexpected SHL!");
6945 
6946   // Expand into a bunch of logical ops.  Note that these ops
6947   // depend on the PPC behavior for oversized shift amounts.
6948   SDValue Lo = Op.getOperand(0);
6949   SDValue Hi = Op.getOperand(1);
6950   SDValue Amt = Op.getOperand(2);
6951   EVT AmtVT = Amt.getValueType();
6952 
6953   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
6954                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
6955   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
6956   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
6957   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
6958   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
6959                              DAG.getConstant(-BitWidth, dl, AmtVT));
6960   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
6961   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
6962   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
6963   SDValue OutOps[] = { OutLo, OutHi };
6964   return DAG.getMergeValues(OutOps, dl);
6965 }
6966 
6967 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
6968   EVT VT = Op.getValueType();
6969   SDLoc dl(Op);
6970   unsigned BitWidth = VT.getSizeInBits();
6971   assert(Op.getNumOperands() == 3 &&
6972          VT == Op.getOperand(1).getValueType() &&
6973          "Unexpected SRL!");
6974 
6975   // Expand into a bunch of logical ops.  Note that these ops
6976   // depend on the PPC behavior for oversized shift amounts.
6977   SDValue Lo = Op.getOperand(0);
6978   SDValue Hi = Op.getOperand(1);
6979   SDValue Amt = Op.getOperand(2);
6980   EVT AmtVT = Amt.getValueType();
6981 
6982   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
6983                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
6984   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
6985   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
6986   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
6987   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
6988                              DAG.getConstant(-BitWidth, dl, AmtVT));
6989   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
6990   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
6991   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
6992   SDValue OutOps[] = { OutLo, OutHi };
6993   return DAG.getMergeValues(OutOps, dl);
6994 }
6995 
6996 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
6997   SDLoc dl(Op);
6998   EVT VT = Op.getValueType();
6999   unsigned BitWidth = VT.getSizeInBits();
7000   assert(Op.getNumOperands() == 3 &&
7001          VT == Op.getOperand(1).getValueType() &&
7002          "Unexpected SRA!");
7003 
7004   // Expand into a bunch of logical ops, followed by a select_cc.
7005   SDValue Lo = Op.getOperand(0);
7006   SDValue Hi = Op.getOperand(1);
7007   SDValue Amt = Op.getOperand(2);
7008   EVT AmtVT = Amt.getValueType();
7009 
7010   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7011                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7012   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7013   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7014   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7015   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7016                              DAG.getConstant(-BitWidth, dl, AmtVT));
7017   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
7018   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
7019   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
7020                                   Tmp4, Tmp6, ISD::SETLE);
7021   SDValue OutOps[] = { OutLo, OutHi };
7022   return DAG.getMergeValues(OutOps, dl);
7023 }
7024 
7025 //===----------------------------------------------------------------------===//
7026 // Vector related lowering.
7027 //
7028 
7029 /// BuildSplatI - Build a canonical splati of Val with an element size of
7030 /// SplatSize.  Cast the result to VT.
7031 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
7032                            SelectionDAG &DAG, const SDLoc &dl) {
7033   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
7034 
7035   static const MVT VTys[] = { // canonical VT to use for each size.
7036     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
7037   };
7038 
7039   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
7040 
7041   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
7042   if (Val == -1)
7043     SplatSize = 1;
7044 
7045   EVT CanonicalVT = VTys[SplatSize-1];
7046 
7047   // Build a canonical splat for this value.
7048   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
7049 }
7050 
7051 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
7052 /// specified intrinsic ID.
7053 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
7054                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
7055   if (DestVT == MVT::Other) DestVT = Op.getValueType();
7056   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7057                      DAG.getConstant(IID, dl, MVT::i32), Op);
7058 }
7059 
7060 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
7061 /// specified intrinsic ID.
7062 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
7063                                 SelectionDAG &DAG, const SDLoc &dl,
7064                                 EVT DestVT = MVT::Other) {
7065   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
7066   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7067                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
7068 }
7069 
7070 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
7071 /// specified intrinsic ID.
7072 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
7073                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
7074                                 EVT DestVT = MVT::Other) {
7075   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
7076   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7077                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
7078 }
7079 
7080 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
7081 /// amount.  The result has the specified value type.
7082 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
7083                            SelectionDAG &DAG, const SDLoc &dl) {
7084   // Force LHS/RHS to be the right type.
7085   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
7086   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
7087 
7088   int Ops[16];
7089   for (unsigned i = 0; i != 16; ++i)
7090     Ops[i] = i + Amt;
7091   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
7092   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7093 }
7094 
7095 static bool isNonConstSplatBV(BuildVectorSDNode *BVN, EVT Type) {
7096   if (BVN->isConstant() || BVN->getValueType(0) != Type)
7097     return false;
7098   auto OpZero = BVN->getOperand(0);
7099   for (int i = 1, e = BVN->getNumOperands(); i < e; i++)
7100     if (BVN->getOperand(i) != OpZero)
7101       return false;
7102   return true;
7103 }
7104 
7105 // If this is a case we can't handle, return null and let the default
7106 // expansion code take care of it.  If we CAN select this case, and if it
7107 // selects to a single instruction, return Op.  Otherwise, if we can codegen
7108 // this case more efficiently than a constant pool load, lower it to the
7109 // sequence of ops that should be used.
7110 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
7111                                              SelectionDAG &DAG) const {
7112   SDLoc dl(Op);
7113   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7114   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7115 
7116   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
7117     // We first build an i32 vector, load it into a QPX register,
7118     // then convert it to a floating-point vector and compare it
7119     // to a zero vector to get the boolean result.
7120     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7121     int FrameIdx = MFI.CreateStackObject(16, 16, false);
7122     MachinePointerInfo PtrInfo =
7123         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7124     EVT PtrVT = getPointerTy(DAG.getDataLayout());
7125     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7126 
7127     assert(BVN->getNumOperands() == 4 &&
7128       "BUILD_VECTOR for v4i1 does not have 4 operands");
7129 
7130     bool IsConst = true;
7131     for (unsigned i = 0; i < 4; ++i) {
7132       if (BVN->getOperand(i).isUndef()) continue;
7133       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
7134         IsConst = false;
7135         break;
7136       }
7137     }
7138 
7139     if (IsConst) {
7140       Constant *One =
7141         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
7142       Constant *NegOne =
7143         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
7144 
7145       Constant *CV[4];
7146       for (unsigned i = 0; i < 4; ++i) {
7147         if (BVN->getOperand(i).isUndef())
7148           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
7149         else if (isNullConstant(BVN->getOperand(i)))
7150           CV[i] = NegOne;
7151         else
7152           CV[i] = One;
7153       }
7154 
7155       Constant *CP = ConstantVector::get(CV);
7156       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
7157                                           16 /* alignment */);
7158 
7159       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
7160       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
7161       return DAG.getMemIntrinsicNode(
7162           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
7163           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
7164     }
7165 
7166     SmallVector<SDValue, 4> Stores;
7167     for (unsigned i = 0; i < 4; ++i) {
7168       if (BVN->getOperand(i).isUndef()) continue;
7169 
7170       unsigned Offset = 4*i;
7171       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
7172       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
7173 
7174       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
7175       if (StoreSize > 4) {
7176         Stores.push_back(
7177             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
7178                               PtrInfo.getWithOffset(Offset), MVT::i32));
7179       } else {
7180         SDValue StoreValue = BVN->getOperand(i);
7181         if (StoreSize < 4)
7182           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
7183 
7184         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
7185                                       PtrInfo.getWithOffset(Offset)));
7186       }
7187     }
7188 
7189     SDValue StoreChain;
7190     if (!Stores.empty())
7191       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7192     else
7193       StoreChain = DAG.getEntryNode();
7194 
7195     // Now load from v4i32 into the QPX register; this will extend it to
7196     // v4i64 but not yet convert it to a floating point. Nevertheless, this
7197     // is typed as v4f64 because the QPX register integer states are not
7198     // explicitly represented.
7199 
7200     SDValue Ops[] = {StoreChain,
7201                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
7202                      FIdx};
7203     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
7204 
7205     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
7206       dl, VTs, Ops, MVT::v4i32, PtrInfo);
7207     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
7208       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
7209       LoadedVect);
7210 
7211     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
7212 
7213     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
7214   }
7215 
7216   // All other QPX vectors are handled by generic code.
7217   if (Subtarget.hasQPX())
7218     return SDValue();
7219 
7220   // Check if this is a splat of a constant value.
7221   APInt APSplatBits, APSplatUndef;
7222   unsigned SplatBitSize;
7223   bool HasAnyUndefs;
7224   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
7225                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
7226       SplatBitSize > 32) {
7227     // We can splat a non-const value on CPU's that implement ISA 3.0
7228     // in two ways: LXVWSX (load and splat) and MTVSRWS(move and splat).
7229     auto OpZero = BVN->getOperand(0);
7230     bool CanLoadAndSplat = OpZero.getOpcode() == ISD::LOAD &&
7231       BVN->isOnlyUserOf(OpZero.getNode());
7232     if (Subtarget.isISA3_0() && !CanLoadAndSplat &&
7233         (isNonConstSplatBV(BVN, MVT::v4i32) ||
7234          isNonConstSplatBV(BVN, MVT::v2i64)))
7235       return Op;
7236     return SDValue();
7237   }
7238 
7239   unsigned SplatBits = APSplatBits.getZExtValue();
7240   unsigned SplatUndef = APSplatUndef.getZExtValue();
7241   unsigned SplatSize = SplatBitSize / 8;
7242 
7243   // First, handle single instruction cases.
7244 
7245   // All zeros?
7246   if (SplatBits == 0) {
7247     // Canonicalize all zero vectors to be v4i32.
7248     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
7249       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
7250       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
7251     }
7252     return Op;
7253   }
7254 
7255   // We have XXSPLTIB for constant splats one byte wide
7256   if (Subtarget.isISA3_0() && Op.getValueType() == MVT::v16i8)
7257     return Op;
7258 
7259   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
7260   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
7261                     (32-SplatBitSize));
7262   if (SextVal >= -16 && SextVal <= 15)
7263     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
7264 
7265   // Two instruction sequences.
7266 
7267   // If this value is in the range [-32,30] and is even, use:
7268   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
7269   // If this value is in the range [17,31] and is odd, use:
7270   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
7271   // If this value is in the range [-31,-17] and is odd, use:
7272   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
7273   // Note the last two are three-instruction sequences.
7274   if (SextVal >= -32 && SextVal <= 31) {
7275     // To avoid having these optimizations undone by constant folding,
7276     // we convert to a pseudo that will be expanded later into one of
7277     // the above forms.
7278     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
7279     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
7280               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
7281     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
7282     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
7283     if (VT == Op.getValueType())
7284       return RetVal;
7285     else
7286       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
7287   }
7288 
7289   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
7290   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
7291   // for fneg/fabs.
7292   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
7293     // Make -1 and vspltisw -1:
7294     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
7295 
7296     // Make the VSLW intrinsic, computing 0x8000_0000.
7297     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
7298                                    OnesV, DAG, dl);
7299 
7300     // xor by OnesV to invert it.
7301     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
7302     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7303   }
7304 
7305   // Check to see if this is a wide variety of vsplti*, binop self cases.
7306   static const signed char SplatCsts[] = {
7307     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
7308     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
7309   };
7310 
7311   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
7312     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
7313     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
7314     int i = SplatCsts[idx];
7315 
7316     // Figure out what shift amount will be used by altivec if shifted by i in
7317     // this splat size.
7318     unsigned TypeShiftAmt = i & (SplatBitSize-1);
7319 
7320     // vsplti + shl self.
7321     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
7322       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7323       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7324         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
7325         Intrinsic::ppc_altivec_vslw
7326       };
7327       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7328       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7329     }
7330 
7331     // vsplti + srl self.
7332     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
7333       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7334       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7335         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
7336         Intrinsic::ppc_altivec_vsrw
7337       };
7338       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7339       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7340     }
7341 
7342     // vsplti + sra self.
7343     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
7344       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7345       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7346         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
7347         Intrinsic::ppc_altivec_vsraw
7348       };
7349       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7350       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7351     }
7352 
7353     // vsplti + rol self.
7354     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
7355                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
7356       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
7357       static const unsigned IIDs[] = { // Intrinsic to use for each size.
7358         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
7359         Intrinsic::ppc_altivec_vrlw
7360       };
7361       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
7362       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
7363     }
7364 
7365     // t = vsplti c, result = vsldoi t, t, 1
7366     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
7367       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7368       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
7369       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7370     }
7371     // t = vsplti c, result = vsldoi t, t, 2
7372     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
7373       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7374       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
7375       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7376     }
7377     // t = vsplti c, result = vsldoi t, t, 3
7378     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
7379       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
7380       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
7381       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
7382     }
7383   }
7384 
7385   return SDValue();
7386 }
7387 
7388 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7389 /// the specified operations to build the shuffle.
7390 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7391                                       SDValue RHS, SelectionDAG &DAG,
7392                                       const SDLoc &dl) {
7393   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7394   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7395   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7396 
7397   enum {
7398     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7399     OP_VMRGHW,
7400     OP_VMRGLW,
7401     OP_VSPLTISW0,
7402     OP_VSPLTISW1,
7403     OP_VSPLTISW2,
7404     OP_VSPLTISW3,
7405     OP_VSLDOI4,
7406     OP_VSLDOI8,
7407     OP_VSLDOI12
7408   };
7409 
7410   if (OpNum == OP_COPY) {
7411     if (LHSID == (1*9+2)*9+3) return LHS;
7412     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7413     return RHS;
7414   }
7415 
7416   SDValue OpLHS, OpRHS;
7417   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7418   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7419 
7420   int ShufIdxs[16];
7421   switch (OpNum) {
7422   default: llvm_unreachable("Unknown i32 permute!");
7423   case OP_VMRGHW:
7424     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
7425     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
7426     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
7427     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
7428     break;
7429   case OP_VMRGLW:
7430     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
7431     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
7432     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
7433     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
7434     break;
7435   case OP_VSPLTISW0:
7436     for (unsigned i = 0; i != 16; ++i)
7437       ShufIdxs[i] = (i&3)+0;
7438     break;
7439   case OP_VSPLTISW1:
7440     for (unsigned i = 0; i != 16; ++i)
7441       ShufIdxs[i] = (i&3)+4;
7442     break;
7443   case OP_VSPLTISW2:
7444     for (unsigned i = 0; i != 16; ++i)
7445       ShufIdxs[i] = (i&3)+8;
7446     break;
7447   case OP_VSPLTISW3:
7448     for (unsigned i = 0; i != 16; ++i)
7449       ShufIdxs[i] = (i&3)+12;
7450     break;
7451   case OP_VSLDOI4:
7452     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
7453   case OP_VSLDOI8:
7454     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
7455   case OP_VSLDOI12:
7456     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
7457   }
7458   EVT VT = OpLHS.getValueType();
7459   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
7460   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
7461   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
7462   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7463 }
7464 
7465 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
7466 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
7467 /// return the code it can be lowered into.  Worst case, it can always be
7468 /// lowered into a vperm.
7469 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
7470                                                SelectionDAG &DAG) const {
7471   SDLoc dl(Op);
7472   SDValue V1 = Op.getOperand(0);
7473   SDValue V2 = Op.getOperand(1);
7474   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
7475   EVT VT = Op.getValueType();
7476   bool isLittleEndian = Subtarget.isLittleEndian();
7477 
7478   unsigned ShiftElts, InsertAtByte;
7479   bool Swap;
7480   if (Subtarget.hasP9Vector() &&
7481       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
7482                            isLittleEndian)) {
7483     if (Swap)
7484       std::swap(V1, V2);
7485     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
7486     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
7487     if (ShiftElts) {
7488       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
7489                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
7490       SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Shl,
7491                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
7492       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
7493     }
7494     SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Conv2,
7495                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
7496     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
7497   }
7498 
7499   if (Subtarget.hasVSX()) {
7500     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
7501       int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
7502 
7503       // If the source for the shuffle is a scalar_to_vector that came from a
7504       // 32-bit load, it will have used LXVWSX so we don't need to splat again.
7505       if (Subtarget.isISA3_0() &&
7506           ((isLittleEndian && SplatIdx == 3) ||
7507            (!isLittleEndian && SplatIdx == 0))) {
7508         SDValue Src = V1.getOperand(0);
7509         if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7510             Src.getOperand(0).getOpcode() == ISD::LOAD &&
7511             Src.getOperand(0).hasOneUse())
7512           return V1;
7513       }
7514       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
7515       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
7516                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
7517       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
7518     }
7519 
7520     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
7521     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
7522       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
7523       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
7524       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
7525     }
7526 
7527   }
7528 
7529   if (Subtarget.hasQPX()) {
7530     if (VT.getVectorNumElements() != 4)
7531       return SDValue();
7532 
7533     if (V2.isUndef()) V2 = V1;
7534 
7535     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
7536     if (AlignIdx != -1) {
7537       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
7538                          DAG.getConstant(AlignIdx, dl, MVT::i32));
7539     } else if (SVOp->isSplat()) {
7540       int SplatIdx = SVOp->getSplatIndex();
7541       if (SplatIdx >= 4) {
7542         std::swap(V1, V2);
7543         SplatIdx -= 4;
7544       }
7545 
7546       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
7547                          DAG.getConstant(SplatIdx, dl, MVT::i32));
7548     }
7549 
7550     // Lower this into a qvgpci/qvfperm pair.
7551 
7552     // Compute the qvgpci literal
7553     unsigned idx = 0;
7554     for (unsigned i = 0; i < 4; ++i) {
7555       int m = SVOp->getMaskElt(i);
7556       unsigned mm = m >= 0 ? (unsigned) m : i;
7557       idx |= mm << (3-i)*3;
7558     }
7559 
7560     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
7561                              DAG.getConstant(idx, dl, MVT::i32));
7562     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
7563   }
7564 
7565   // Cases that are handled by instructions that take permute immediates
7566   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
7567   // selected by the instruction selector.
7568   if (V2.isUndef()) {
7569     if (PPC::isSplatShuffleMask(SVOp, 1) ||
7570         PPC::isSplatShuffleMask(SVOp, 2) ||
7571         PPC::isSplatShuffleMask(SVOp, 4) ||
7572         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
7573         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
7574         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
7575         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
7576         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
7577         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
7578         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
7579         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
7580         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
7581         (Subtarget.hasP8Altivec() && (
7582          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
7583          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
7584          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
7585       return Op;
7586     }
7587   }
7588 
7589   // Altivec has a variety of "shuffle immediates" that take two vector inputs
7590   // and produce a fixed permutation.  If any of these match, do not lower to
7591   // VPERM.
7592   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
7593   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
7594       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
7595       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
7596       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
7597       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
7598       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
7599       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
7600       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
7601       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
7602       (Subtarget.hasP8Altivec() && (
7603        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
7604        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
7605        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
7606     return Op;
7607 
7608   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
7609   // perfect shuffle table to emit an optimal matching sequence.
7610   ArrayRef<int> PermMask = SVOp->getMask();
7611 
7612   unsigned PFIndexes[4];
7613   bool isFourElementShuffle = true;
7614   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
7615     unsigned EltNo = 8;   // Start out undef.
7616     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
7617       if (PermMask[i*4+j] < 0)
7618         continue;   // Undef, ignore it.
7619 
7620       unsigned ByteSource = PermMask[i*4+j];
7621       if ((ByteSource & 3) != j) {
7622         isFourElementShuffle = false;
7623         break;
7624       }
7625 
7626       if (EltNo == 8) {
7627         EltNo = ByteSource/4;
7628       } else if (EltNo != ByteSource/4) {
7629         isFourElementShuffle = false;
7630         break;
7631       }
7632     }
7633     PFIndexes[i] = EltNo;
7634   }
7635 
7636   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
7637   // perfect shuffle vector to determine if it is cost effective to do this as
7638   // discrete instructions, or whether we should use a vperm.
7639   // For now, we skip this for little endian until such time as we have a
7640   // little-endian perfect shuffle table.
7641   if (isFourElementShuffle && !isLittleEndian) {
7642     // Compute the index in the perfect shuffle table.
7643     unsigned PFTableIndex =
7644       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7645 
7646     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7647     unsigned Cost  = (PFEntry >> 30);
7648 
7649     // Determining when to avoid vperm is tricky.  Many things affect the cost
7650     // of vperm, particularly how many times the perm mask needs to be computed.
7651     // For example, if the perm mask can be hoisted out of a loop or is already
7652     // used (perhaps because there are multiple permutes with the same shuffle
7653     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
7654     // the loop requires an extra register.
7655     //
7656     // As a compromise, we only emit discrete instructions if the shuffle can be
7657     // generated in 3 or fewer operations.  When we have loop information
7658     // available, if this block is within a loop, we should avoid using vperm
7659     // for 3-operation perms and use a constant pool load instead.
7660     if (Cost < 3)
7661       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
7662   }
7663 
7664   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
7665   // vector that will get spilled to the constant pool.
7666   if (V2.isUndef()) V2 = V1;
7667 
7668   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
7669   // that it is in input element units, not in bytes.  Convert now.
7670 
7671   // For little endian, the order of the input vectors is reversed, and
7672   // the permutation mask is complemented with respect to 31.  This is
7673   // necessary to produce proper semantics with the big-endian-biased vperm
7674   // instruction.
7675   EVT EltVT = V1.getValueType().getVectorElementType();
7676   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
7677 
7678   SmallVector<SDValue, 16> ResultMask;
7679   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7680     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
7681 
7682     for (unsigned j = 0; j != BytesPerElement; ++j)
7683       if (isLittleEndian)
7684         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
7685                                              dl, MVT::i32));
7686       else
7687         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
7688                                              MVT::i32));
7689   }
7690 
7691   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
7692   if (isLittleEndian)
7693     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
7694                        V2, V1, VPermMask);
7695   else
7696     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
7697                        V1, V2, VPermMask);
7698 }
7699 
7700 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
7701 /// vector comparison.  If it is, return true and fill in Opc/isDot with
7702 /// information about the intrinsic.
7703 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
7704                                  bool &isDot, const PPCSubtarget &Subtarget) {
7705   unsigned IntrinsicID =
7706     cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
7707   CompareOpc = -1;
7708   isDot = false;
7709   switch (IntrinsicID) {
7710   default: return false;
7711     // Comparison predicates.
7712   case Intrinsic::ppc_altivec_vcmpbfp_p:  CompareOpc = 966; isDot = 1; break;
7713   case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
7714   case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc =   6; isDot = 1; break;
7715   case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc =  70; isDot = 1; break;
7716   case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
7717   case Intrinsic::ppc_altivec_vcmpequd_p:
7718     if (Subtarget.hasP8Altivec()) {
7719       CompareOpc = 199;
7720       isDot = 1;
7721     } else
7722       return false;
7723 
7724     break;
7725   case Intrinsic::ppc_altivec_vcmpneb_p:
7726   case Intrinsic::ppc_altivec_vcmpneh_p:
7727   case Intrinsic::ppc_altivec_vcmpnew_p:
7728   case Intrinsic::ppc_altivec_vcmpnezb_p:
7729   case Intrinsic::ppc_altivec_vcmpnezh_p:
7730   case Intrinsic::ppc_altivec_vcmpnezw_p:
7731     if (Subtarget.hasP9Altivec()) {
7732       switch(IntrinsicID) {
7733       default: llvm_unreachable("Unknown comparison intrinsic.");
7734       case Intrinsic::ppc_altivec_vcmpneb_p: CompareOpc = 7; break;
7735       case Intrinsic::ppc_altivec_vcmpneh_p: CompareOpc = 71; break;
7736       case Intrinsic::ppc_altivec_vcmpnew_p: CompareOpc = 135; break;
7737       case Intrinsic::ppc_altivec_vcmpnezb_p: CompareOpc = 263; break;
7738       case Intrinsic::ppc_altivec_vcmpnezh_p: CompareOpc = 327; break;
7739       case Intrinsic::ppc_altivec_vcmpnezw_p: CompareOpc = 391; break;
7740       }
7741       isDot = 1;
7742     } else
7743       return false;
7744 
7745     break;
7746   case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
7747   case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
7748   case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
7749   case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
7750   case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
7751   case Intrinsic::ppc_altivec_vcmpgtsd_p:
7752     if (Subtarget.hasP8Altivec()) {
7753       CompareOpc = 967;
7754       isDot = 1;
7755     } else
7756       return false;
7757 
7758     break;
7759   case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
7760   case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
7761   case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
7762   case Intrinsic::ppc_altivec_vcmpgtud_p:
7763     if (Subtarget.hasP8Altivec()) {
7764       CompareOpc = 711;
7765       isDot = 1;
7766     } else
7767       return false;
7768 
7769     break;
7770     // VSX predicate comparisons use the same infrastructure
7771   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
7772   case Intrinsic::ppc_vsx_xvcmpgedp_p:
7773   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
7774   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
7775   case Intrinsic::ppc_vsx_xvcmpgesp_p:
7776   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
7777     if (Subtarget.hasVSX()) {
7778       switch (IntrinsicID) {
7779       case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99; break;
7780       case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115; break;
7781       case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107; break;
7782       case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67; break;
7783       case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83; break;
7784       case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75; break;
7785       }
7786       isDot = 1;
7787     }
7788     else
7789       return false;
7790 
7791     break;
7792 
7793     // Normal Comparisons.
7794   case Intrinsic::ppc_altivec_vcmpbfp:    CompareOpc = 966; isDot = 0; break;
7795   case Intrinsic::ppc_altivec_vcmpeqfp:   CompareOpc = 198; isDot = 0; break;
7796   case Intrinsic::ppc_altivec_vcmpequb:   CompareOpc =   6; isDot = 0; break;
7797   case Intrinsic::ppc_altivec_vcmpequh:   CompareOpc =  70; isDot = 0; break;
7798   case Intrinsic::ppc_altivec_vcmpequw:   CompareOpc = 134; isDot = 0; break;
7799   case Intrinsic::ppc_altivec_vcmpequd:
7800     if (Subtarget.hasP8Altivec()) {
7801       CompareOpc = 199;
7802       isDot = 0;
7803     } else
7804       return false;
7805 
7806     break;
7807   case Intrinsic::ppc_altivec_vcmpneb:
7808   case Intrinsic::ppc_altivec_vcmpneh:
7809   case Intrinsic::ppc_altivec_vcmpnew:
7810   case Intrinsic::ppc_altivec_vcmpnezb:
7811   case Intrinsic::ppc_altivec_vcmpnezh:
7812   case Intrinsic::ppc_altivec_vcmpnezw:
7813     if (Subtarget.hasP9Altivec()) {
7814       switch (IntrinsicID) {
7815       default: llvm_unreachable("Unknown comparison intrinsic.");
7816       case Intrinsic::ppc_altivec_vcmpneb: CompareOpc = 7; break;
7817       case Intrinsic::ppc_altivec_vcmpneh: CompareOpc = 71; break;
7818       case Intrinsic::ppc_altivec_vcmpnew: CompareOpc = 135; break;
7819       case Intrinsic::ppc_altivec_vcmpnezb: CompareOpc = 263; break;
7820       case Intrinsic::ppc_altivec_vcmpnezh: CompareOpc = 327; break;
7821       case Intrinsic::ppc_altivec_vcmpnezw: CompareOpc = 391; break;
7822       }
7823       isDot = 0;
7824     } else
7825       return false;
7826     break;
7827   case Intrinsic::ppc_altivec_vcmpgefp:   CompareOpc = 454; isDot = 0; break;
7828   case Intrinsic::ppc_altivec_vcmpgtfp:   CompareOpc = 710; isDot = 0; break;
7829   case Intrinsic::ppc_altivec_vcmpgtsb:   CompareOpc = 774; isDot = 0; break;
7830   case Intrinsic::ppc_altivec_vcmpgtsh:   CompareOpc = 838; isDot = 0; break;
7831   case Intrinsic::ppc_altivec_vcmpgtsw:   CompareOpc = 902; isDot = 0; break;
7832   case Intrinsic::ppc_altivec_vcmpgtsd:
7833     if (Subtarget.hasP8Altivec()) {
7834       CompareOpc = 967;
7835       isDot = 0;
7836     } else
7837       return false;
7838 
7839     break;
7840   case Intrinsic::ppc_altivec_vcmpgtub:   CompareOpc = 518; isDot = 0; break;
7841   case Intrinsic::ppc_altivec_vcmpgtuh:   CompareOpc = 582; isDot = 0; break;
7842   case Intrinsic::ppc_altivec_vcmpgtuw:   CompareOpc = 646; isDot = 0; break;
7843   case Intrinsic::ppc_altivec_vcmpgtud:
7844     if (Subtarget.hasP8Altivec()) {
7845       CompareOpc = 711;
7846       isDot = 0;
7847     } else
7848       return false;
7849 
7850     break;
7851   }
7852   return true;
7853 }
7854 
7855 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
7856 /// lower, do it, otherwise return null.
7857 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
7858                                                    SelectionDAG &DAG) const {
7859   unsigned IntrinsicID =
7860     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7861 
7862   if (IntrinsicID == Intrinsic::thread_pointer) {
7863     // Reads the thread pointer register, used for __builtin_thread_pointer.
7864     bool is64bit = Subtarget.isPPC64();
7865     return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
7866                            is64bit ? MVT::i64 : MVT::i32);
7867   }
7868 
7869   // If this is a lowered altivec predicate compare, CompareOpc is set to the
7870   // opcode number of the comparison.
7871   SDLoc dl(Op);
7872   int CompareOpc;
7873   bool isDot;
7874   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
7875     return SDValue();    // Don't custom lower most intrinsics.
7876 
7877   // If this is a non-dot comparison, make the VCMP node and we are done.
7878   if (!isDot) {
7879     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
7880                               Op.getOperand(1), Op.getOperand(2),
7881                               DAG.getConstant(CompareOpc, dl, MVT::i32));
7882     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
7883   }
7884 
7885   // Create the PPCISD altivec 'dot' comparison node.
7886   SDValue Ops[] = {
7887     Op.getOperand(2),  // LHS
7888     Op.getOperand(3),  // RHS
7889     DAG.getConstant(CompareOpc, dl, MVT::i32)
7890   };
7891   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
7892   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
7893 
7894   // Now that we have the comparison, emit a copy from the CR to a GPR.
7895   // This is flagged to the above dot comparison.
7896   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
7897                                 DAG.getRegister(PPC::CR6, MVT::i32),
7898                                 CompNode.getValue(1));
7899 
7900   // Unpack the result based on how the target uses it.
7901   unsigned BitNo;   // Bit # of CR6.
7902   bool InvertBit;   // Invert result?
7903   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
7904   default:  // Can't happen, don't crash on invalid number though.
7905   case 0:   // Return the value of the EQ bit of CR6.
7906     BitNo = 0; InvertBit = false;
7907     break;
7908   case 1:   // Return the inverted value of the EQ bit of CR6.
7909     BitNo = 0; InvertBit = true;
7910     break;
7911   case 2:   // Return the value of the LT bit of CR6.
7912     BitNo = 2; InvertBit = false;
7913     break;
7914   case 3:   // Return the inverted value of the LT bit of CR6.
7915     BitNo = 2; InvertBit = true;
7916     break;
7917   }
7918 
7919   // Shift the bit into the low position.
7920   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
7921                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
7922   // Isolate the bit.
7923   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
7924                       DAG.getConstant(1, dl, MVT::i32));
7925 
7926   // If we are supposed to, toggle the bit.
7927   if (InvertBit)
7928     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
7929                         DAG.getConstant(1, dl, MVT::i32));
7930   return Flags;
7931 }
7932 
7933 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
7934                                                   SelectionDAG &DAG) const {
7935   SDLoc dl(Op);
7936   // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
7937   // instructions), but for smaller types, we need to first extend up to v2i32
7938   // before doing going farther.
7939   if (Op.getValueType() == MVT::v2i64) {
7940     EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
7941     if (ExtVT != MVT::v2i32) {
7942       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
7943       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
7944                        DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
7945                                         ExtVT.getVectorElementType(), 4)));
7946       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
7947       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
7948                        DAG.getValueType(MVT::v2i32));
7949     }
7950 
7951     return Op;
7952   }
7953 
7954   return SDValue();
7955 }
7956 
7957 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
7958                                                    SelectionDAG &DAG) const {
7959   SDLoc dl(Op);
7960   // Create a stack slot that is 16-byte aligned.
7961   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7962   int FrameIdx = MFI.CreateStackObject(16, 16, false);
7963   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7964   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7965 
7966   // Store the input value into Value#0 of the stack slot.
7967   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
7968                                MachinePointerInfo());
7969   // Load it out.
7970   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
7971 }
7972 
7973 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
7974                                                   SelectionDAG &DAG) const {
7975   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
7976          "Should only be called for ISD::INSERT_VECTOR_ELT");
7977   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
7978   // We have legal lowering for constant indices but not for variable ones.
7979   if (C)
7980     return Op;
7981   return SDValue();
7982 }
7983 
7984 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
7985                                                    SelectionDAG &DAG) const {
7986   SDLoc dl(Op);
7987   SDNode *N = Op.getNode();
7988 
7989   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
7990          "Unknown extract_vector_elt type");
7991 
7992   SDValue Value = N->getOperand(0);
7993 
7994   // The first part of this is like the store lowering except that we don't
7995   // need to track the chain.
7996 
7997   // The values are now known to be -1 (false) or 1 (true). To convert this
7998   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
7999   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8000   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8001 
8002   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
8003   // understand how to form the extending load.
8004   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8005 
8006   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8007 
8008   // Now convert to an integer and store.
8009   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8010     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
8011     Value);
8012 
8013   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8014   int FrameIdx = MFI.CreateStackObject(16, 16, false);
8015   MachinePointerInfo PtrInfo =
8016       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8017   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8018   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8019 
8020   SDValue StoreChain = DAG.getEntryNode();
8021   SDValue Ops[] = {StoreChain,
8022                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
8023                    Value, FIdx};
8024   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
8025 
8026   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
8027     dl, VTs, Ops, MVT::v4i32, PtrInfo);
8028 
8029   // Extract the value requested.
8030   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
8031   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8032   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8033 
8034   SDValue IntVal =
8035       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
8036 
8037   if (!Subtarget.useCRBits())
8038     return IntVal;
8039 
8040   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
8041 }
8042 
8043 /// Lowering for QPX v4i1 loads
8044 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
8045                                            SelectionDAG &DAG) const {
8046   SDLoc dl(Op);
8047   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
8048   SDValue LoadChain = LN->getChain();
8049   SDValue BasePtr = LN->getBasePtr();
8050 
8051   if (Op.getValueType() == MVT::v4f64 ||
8052       Op.getValueType() == MVT::v4f32) {
8053     EVT MemVT = LN->getMemoryVT();
8054     unsigned Alignment = LN->getAlignment();
8055 
8056     // If this load is properly aligned, then it is legal.
8057     if (Alignment >= MemVT.getStoreSize())
8058       return Op;
8059 
8060     EVT ScalarVT = Op.getValueType().getScalarType(),
8061         ScalarMemVT = MemVT.getScalarType();
8062     unsigned Stride = ScalarMemVT.getStoreSize();
8063 
8064     SDValue Vals[4], LoadChains[4];
8065     for (unsigned Idx = 0; Idx < 4; ++Idx) {
8066       SDValue Load;
8067       if (ScalarVT != ScalarMemVT)
8068         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
8069                               BasePtr,
8070                               LN->getPointerInfo().getWithOffset(Idx * Stride),
8071                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
8072                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
8073       else
8074         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
8075                            LN->getPointerInfo().getWithOffset(Idx * Stride),
8076                            MinAlign(Alignment, Idx * Stride),
8077                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
8078 
8079       if (Idx == 0 && LN->isIndexed()) {
8080         assert(LN->getAddressingMode() == ISD::PRE_INC &&
8081                "Unknown addressing mode on vector load");
8082         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
8083                                   LN->getAddressingMode());
8084       }
8085 
8086       Vals[Idx] = Load;
8087       LoadChains[Idx] = Load.getValue(1);
8088 
8089       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
8090                             DAG.getConstant(Stride, dl,
8091                                             BasePtr.getValueType()));
8092     }
8093 
8094     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
8095     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
8096 
8097     if (LN->isIndexed()) {
8098       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
8099       return DAG.getMergeValues(RetOps, dl);
8100     }
8101 
8102     SDValue RetOps[] = { Value, TF };
8103     return DAG.getMergeValues(RetOps, dl);
8104   }
8105 
8106   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
8107   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
8108 
8109   // To lower v4i1 from a byte array, we load the byte elements of the
8110   // vector and then reuse the BUILD_VECTOR logic.
8111 
8112   SDValue VectElmts[4], VectElmtChains[4];
8113   for (unsigned i = 0; i < 4; ++i) {
8114     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
8115     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
8116 
8117     VectElmts[i] = DAG.getExtLoad(
8118         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
8119         LN->getPointerInfo().getWithOffset(i), MVT::i8,
8120         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
8121     VectElmtChains[i] = VectElmts[i].getValue(1);
8122   }
8123 
8124   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
8125   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
8126 
8127   SDValue RVals[] = { Value, LoadChain };
8128   return DAG.getMergeValues(RVals, dl);
8129 }
8130 
8131 /// Lowering for QPX v4i1 stores
8132 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
8133                                             SelectionDAG &DAG) const {
8134   SDLoc dl(Op);
8135   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
8136   SDValue StoreChain = SN->getChain();
8137   SDValue BasePtr = SN->getBasePtr();
8138   SDValue Value = SN->getValue();
8139 
8140   if (Value.getValueType() == MVT::v4f64 ||
8141       Value.getValueType() == MVT::v4f32) {
8142     EVT MemVT = SN->getMemoryVT();
8143     unsigned Alignment = SN->getAlignment();
8144 
8145     // If this store is properly aligned, then it is legal.
8146     if (Alignment >= MemVT.getStoreSize())
8147       return Op;
8148 
8149     EVT ScalarVT = Value.getValueType().getScalarType(),
8150         ScalarMemVT = MemVT.getScalarType();
8151     unsigned Stride = ScalarMemVT.getStoreSize();
8152 
8153     SDValue Stores[4];
8154     for (unsigned Idx = 0; Idx < 4; ++Idx) {
8155       SDValue Ex = DAG.getNode(
8156           ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
8157           DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
8158       SDValue Store;
8159       if (ScalarVT != ScalarMemVT)
8160         Store =
8161             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
8162                               SN->getPointerInfo().getWithOffset(Idx * Stride),
8163                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
8164                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
8165       else
8166         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
8167                              SN->getPointerInfo().getWithOffset(Idx * Stride),
8168                              MinAlign(Alignment, Idx * Stride),
8169                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
8170 
8171       if (Idx == 0 && SN->isIndexed()) {
8172         assert(SN->getAddressingMode() == ISD::PRE_INC &&
8173                "Unknown addressing mode on vector store");
8174         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
8175                                     SN->getAddressingMode());
8176       }
8177 
8178       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
8179                             DAG.getConstant(Stride, dl,
8180                                             BasePtr.getValueType()));
8181       Stores[Idx] = Store;
8182     }
8183 
8184     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8185 
8186     if (SN->isIndexed()) {
8187       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
8188       return DAG.getMergeValues(RetOps, dl);
8189     }
8190 
8191     return TF;
8192   }
8193 
8194   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
8195   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
8196 
8197   // The values are now known to be -1 (false) or 1 (true). To convert this
8198   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8199   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8200   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8201 
8202   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
8203   // understand how to form the extending load.
8204   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8205 
8206   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8207 
8208   // Now convert to an integer and store.
8209   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8210     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
8211     Value);
8212 
8213   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8214   int FrameIdx = MFI.CreateStackObject(16, 16, false);
8215   MachinePointerInfo PtrInfo =
8216       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8217   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8218   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8219 
8220   SDValue Ops[] = {StoreChain,
8221                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
8222                    Value, FIdx};
8223   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
8224 
8225   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
8226     dl, VTs, Ops, MVT::v4i32, PtrInfo);
8227 
8228   // Move data into the byte array.
8229   SDValue Loads[4], LoadChains[4];
8230   for (unsigned i = 0; i < 4; ++i) {
8231     unsigned Offset = 4*i;
8232     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8233     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8234 
8235     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
8236                            PtrInfo.getWithOffset(Offset));
8237     LoadChains[i] = Loads[i].getValue(1);
8238   }
8239 
8240   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
8241 
8242   SDValue Stores[4];
8243   for (unsigned i = 0; i < 4; ++i) {
8244     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
8245     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
8246 
8247     Stores[i] = DAG.getTruncStore(
8248         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
8249         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
8250         SN->getAAInfo());
8251   }
8252 
8253   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8254 
8255   return StoreChain;
8256 }
8257 
8258 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
8259   SDLoc dl(Op);
8260   if (Op.getValueType() == MVT::v4i32) {
8261     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8262 
8263     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
8264     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
8265 
8266     SDValue RHSSwap =   // = vrlw RHS, 16
8267       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
8268 
8269     // Shrinkify inputs to v8i16.
8270     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
8271     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
8272     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
8273 
8274     // Low parts multiplied together, generating 32-bit results (we ignore the
8275     // top parts).
8276     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
8277                                         LHS, RHS, DAG, dl, MVT::v4i32);
8278 
8279     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
8280                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
8281     // Shift the high parts up 16 bits.
8282     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
8283                               Neg16, DAG, dl);
8284     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
8285   } else if (Op.getValueType() == MVT::v8i16) {
8286     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8287 
8288     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
8289 
8290     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
8291                             LHS, RHS, Zero, DAG, dl);
8292   } else if (Op.getValueType() == MVT::v16i8) {
8293     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8294     bool isLittleEndian = Subtarget.isLittleEndian();
8295 
8296     // Multiply the even 8-bit parts, producing 16-bit sums.
8297     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
8298                                            LHS, RHS, DAG, dl, MVT::v8i16);
8299     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
8300 
8301     // Multiply the odd 8-bit parts, producing 16-bit sums.
8302     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
8303                                           LHS, RHS, DAG, dl, MVT::v8i16);
8304     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
8305 
8306     // Merge the results together.  Because vmuleub and vmuloub are
8307     // instructions with a big-endian bias, we must reverse the
8308     // element numbering and reverse the meaning of "odd" and "even"
8309     // when generating little endian code.
8310     int Ops[16];
8311     for (unsigned i = 0; i != 8; ++i) {
8312       if (isLittleEndian) {
8313         Ops[i*2  ] = 2*i;
8314         Ops[i*2+1] = 2*i+16;
8315       } else {
8316         Ops[i*2  ] = 2*i+1;
8317         Ops[i*2+1] = 2*i+1+16;
8318       }
8319     }
8320     if (isLittleEndian)
8321       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
8322     else
8323       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
8324   } else {
8325     llvm_unreachable("Unknown mul to lower!");
8326   }
8327 }
8328 
8329 /// LowerOperation - Provide custom lowering hooks for some operations.
8330 ///
8331 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8332   switch (Op.getOpcode()) {
8333   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
8334   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
8335   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
8336   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
8337   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
8338   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
8339   case ISD::SETCC:              return LowerSETCC(Op, DAG);
8340   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
8341   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
8342   case ISD::VASTART:
8343     return LowerVASTART(Op, DAG);
8344 
8345   case ISD::VAARG:
8346     return LowerVAARG(Op, DAG);
8347 
8348   case ISD::VACOPY:
8349     return LowerVACOPY(Op, DAG);
8350 
8351   case ISD::STACKRESTORE:
8352     return LowerSTACKRESTORE(Op, DAG);
8353 
8354   case ISD::DYNAMIC_STACKALLOC:
8355     return LowerDYNAMIC_STACKALLOC(Op, DAG);
8356 
8357   case ISD::GET_DYNAMIC_AREA_OFFSET:
8358     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
8359 
8360   case ISD::EH_DWARF_CFA:
8361     return LowerEH_DWARF_CFA(Op, DAG);
8362 
8363   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
8364   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
8365 
8366   case ISD::LOAD:               return LowerLOAD(Op, DAG);
8367   case ISD::STORE:              return LowerSTORE(Op, DAG);
8368   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
8369   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
8370   case ISD::FP_TO_UINT:
8371   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG,
8372                                                       SDLoc(Op));
8373   case ISD::UINT_TO_FP:
8374   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
8375   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
8376 
8377   // Lower 64-bit shifts.
8378   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
8379   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
8380   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
8381 
8382   // Vector-related lowering.
8383   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
8384   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
8385   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
8386   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
8387   case ISD::SIGN_EXTEND_INREG:  return LowerSIGN_EXTEND_INREG(Op, DAG);
8388   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
8389   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
8390   case ISD::MUL:                return LowerMUL(Op, DAG);
8391 
8392   // For counter-based loop handling.
8393   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
8394 
8395   // Frame & Return address.
8396   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
8397   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
8398   }
8399 }
8400 
8401 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
8402                                            SmallVectorImpl<SDValue>&Results,
8403                                            SelectionDAG &DAG) const {
8404   SDLoc dl(N);
8405   switch (N->getOpcode()) {
8406   default:
8407     llvm_unreachable("Do not know how to custom type legalize this operation!");
8408   case ISD::READCYCLECOUNTER: {
8409     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
8410     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
8411 
8412     Results.push_back(RTB);
8413     Results.push_back(RTB.getValue(1));
8414     Results.push_back(RTB.getValue(2));
8415     break;
8416   }
8417   case ISD::INTRINSIC_W_CHAIN: {
8418     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
8419         Intrinsic::ppc_is_decremented_ctr_nonzero)
8420       break;
8421 
8422     assert(N->getValueType(0) == MVT::i1 &&
8423            "Unexpected result type for CTR decrement intrinsic");
8424     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
8425                                  N->getValueType(0));
8426     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
8427     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
8428                                  N->getOperand(1));
8429 
8430     Results.push_back(NewInt);
8431     Results.push_back(NewInt.getValue(1));
8432     break;
8433   }
8434   case ISD::VAARG: {
8435     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
8436       return;
8437 
8438     EVT VT = N->getValueType(0);
8439 
8440     if (VT == MVT::i64) {
8441       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
8442 
8443       Results.push_back(NewNode);
8444       Results.push_back(NewNode.getValue(1));
8445     }
8446     return;
8447   }
8448   case ISD::FP_ROUND_INREG: {
8449     assert(N->getValueType(0) == MVT::ppcf128);
8450     assert(N->getOperand(0).getValueType() == MVT::ppcf128);
8451     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8452                              MVT::f64, N->getOperand(0),
8453                              DAG.getIntPtrConstant(0, dl));
8454     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8455                              MVT::f64, N->getOperand(0),
8456                              DAG.getIntPtrConstant(1, dl));
8457 
8458     // Add the two halves of the long double in round-to-zero mode.
8459     SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8460 
8461     // We know the low half is about to be thrown away, so just use something
8462     // convenient.
8463     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
8464                                 FPreg, FPreg));
8465     return;
8466   }
8467   case ISD::FP_TO_SINT:
8468   case ISD::FP_TO_UINT:
8469     // LowerFP_TO_INT() can only handle f32 and f64.
8470     if (N->getOperand(0).getValueType() == MVT::ppcf128)
8471       return;
8472     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
8473     return;
8474   }
8475 }
8476 
8477 //===----------------------------------------------------------------------===//
8478 //  Other Lowering Code
8479 //===----------------------------------------------------------------------===//
8480 
8481 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
8482   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
8483   Function *Func = Intrinsic::getDeclaration(M, Id);
8484   return Builder.CreateCall(Func, {});
8485 }
8486 
8487 // The mappings for emitLeading/TrailingFence is taken from
8488 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
8489 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
8490                                          AtomicOrdering Ord, bool IsStore,
8491                                          bool IsLoad) const {
8492   if (Ord == AtomicOrdering::SequentiallyConsistent)
8493     return callIntrinsic(Builder, Intrinsic::ppc_sync);
8494   if (isReleaseOrStronger(Ord))
8495     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
8496   return nullptr;
8497 }
8498 
8499 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
8500                                           AtomicOrdering Ord, bool IsStore,
8501                                           bool IsLoad) const {
8502   if (IsLoad && isAcquireOrStronger(Ord))
8503     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
8504   // FIXME: this is too conservative, a dependent branch + isync is enough.
8505   // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
8506   // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
8507   // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
8508   return nullptr;
8509 }
8510 
8511 MachineBasicBlock *
8512 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
8513                                     unsigned AtomicSize,
8514                                     unsigned BinOpcode,
8515                                     unsigned CmpOpcode,
8516                                     unsigned CmpPred) const {
8517   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
8518   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
8519 
8520   auto LoadMnemonic = PPC::LDARX;
8521   auto StoreMnemonic = PPC::STDCX;
8522   switch (AtomicSize) {
8523   default:
8524     llvm_unreachable("Unexpected size of atomic entity");
8525   case 1:
8526     LoadMnemonic = PPC::LBARX;
8527     StoreMnemonic = PPC::STBCX;
8528     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
8529     break;
8530   case 2:
8531     LoadMnemonic = PPC::LHARX;
8532     StoreMnemonic = PPC::STHCX;
8533     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
8534     break;
8535   case 4:
8536     LoadMnemonic = PPC::LWARX;
8537     StoreMnemonic = PPC::STWCX;
8538     break;
8539   case 8:
8540     LoadMnemonic = PPC::LDARX;
8541     StoreMnemonic = PPC::STDCX;
8542     break;
8543   }
8544 
8545   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8546   MachineFunction *F = BB->getParent();
8547   MachineFunction::iterator It = ++BB->getIterator();
8548 
8549   unsigned dest = MI.getOperand(0).getReg();
8550   unsigned ptrA = MI.getOperand(1).getReg();
8551   unsigned ptrB = MI.getOperand(2).getReg();
8552   unsigned incr = MI.getOperand(3).getReg();
8553   DebugLoc dl = MI.getDebugLoc();
8554 
8555   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
8556   MachineBasicBlock *loop2MBB =
8557     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
8558   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
8559   F->insert(It, loopMBB);
8560   if (CmpOpcode)
8561     F->insert(It, loop2MBB);
8562   F->insert(It, exitMBB);
8563   exitMBB->splice(exitMBB->begin(), BB,
8564                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8565   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8566 
8567   MachineRegisterInfo &RegInfo = F->getRegInfo();
8568   unsigned TmpReg = (!BinOpcode) ? incr :
8569     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
8570                                            : &PPC::GPRCRegClass);
8571 
8572   //  thisMBB:
8573   //   ...
8574   //   fallthrough --> loopMBB
8575   BB->addSuccessor(loopMBB);
8576 
8577   //  loopMBB:
8578   //   l[wd]arx dest, ptr
8579   //   add r0, dest, incr
8580   //   st[wd]cx. r0, ptr
8581   //   bne- loopMBB
8582   //   fallthrough --> exitMBB
8583 
8584   // For max/min...
8585   //  loopMBB:
8586   //   l[wd]arx dest, ptr
8587   //   cmpl?[wd] incr, dest
8588   //   bgt exitMBB
8589   //  loop2MBB:
8590   //   st[wd]cx. dest, ptr
8591   //   bne- loopMBB
8592   //   fallthrough --> exitMBB
8593 
8594   BB = loopMBB;
8595   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
8596     .addReg(ptrA).addReg(ptrB);
8597   if (BinOpcode)
8598     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
8599   if (CmpOpcode) {
8600     // Signed comparisons of byte or halfword values must be sign-extended.
8601     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
8602       unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
8603       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
8604               ExtReg).addReg(dest);
8605       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
8606         .addReg(incr).addReg(ExtReg);
8607     } else
8608       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
8609         .addReg(incr).addReg(dest);
8610 
8611     BuildMI(BB, dl, TII->get(PPC::BCC))
8612       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
8613     BB->addSuccessor(loop2MBB);
8614     BB->addSuccessor(exitMBB);
8615     BB = loop2MBB;
8616   }
8617   BuildMI(BB, dl, TII->get(StoreMnemonic))
8618     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
8619   BuildMI(BB, dl, TII->get(PPC::BCC))
8620     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
8621   BB->addSuccessor(loopMBB);
8622   BB->addSuccessor(exitMBB);
8623 
8624   //  exitMBB:
8625   //   ...
8626   BB = exitMBB;
8627   return BB;
8628 }
8629 
8630 MachineBasicBlock *
8631 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
8632                                             MachineBasicBlock *BB,
8633                                             bool is8bit, // operation
8634                                             unsigned BinOpcode,
8635                                             unsigned CmpOpcode,
8636                                             unsigned CmpPred) const {
8637   // If we support part-word atomic mnemonics, just use them
8638   if (Subtarget.hasPartwordAtomics())
8639     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
8640                             CmpOpcode, CmpPred);
8641 
8642   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
8643   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
8644   // In 64 bit mode we have to use 64 bits for addresses, even though the
8645   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
8646   // registers without caring whether they're 32 or 64, but here we're
8647   // doing actual arithmetic on the addresses.
8648   bool is64bit = Subtarget.isPPC64();
8649   bool isLittleEndian = Subtarget.isLittleEndian();
8650   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
8651 
8652   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8653   MachineFunction *F = BB->getParent();
8654   MachineFunction::iterator It = ++BB->getIterator();
8655 
8656   unsigned dest = MI.getOperand(0).getReg();
8657   unsigned ptrA = MI.getOperand(1).getReg();
8658   unsigned ptrB = MI.getOperand(2).getReg();
8659   unsigned incr = MI.getOperand(3).getReg();
8660   DebugLoc dl = MI.getDebugLoc();
8661 
8662   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
8663   MachineBasicBlock *loop2MBB =
8664     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
8665   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
8666   F->insert(It, loopMBB);
8667   if (CmpOpcode)
8668     F->insert(It, loop2MBB);
8669   F->insert(It, exitMBB);
8670   exitMBB->splice(exitMBB->begin(), BB,
8671                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8672   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8673 
8674   MachineRegisterInfo &RegInfo = F->getRegInfo();
8675   const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
8676                                           : &PPC::GPRCRegClass;
8677   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
8678   unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
8679   unsigned ShiftReg =
8680     isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
8681   unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
8682   unsigned MaskReg = RegInfo.createVirtualRegister(RC);
8683   unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
8684   unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
8685   unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
8686   unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
8687   unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
8688   unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
8689   unsigned Ptr1Reg;
8690   unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
8691 
8692   //  thisMBB:
8693   //   ...
8694   //   fallthrough --> loopMBB
8695   BB->addSuccessor(loopMBB);
8696 
8697   // The 4-byte load must be aligned, while a char or short may be
8698   // anywhere in the word.  Hence all this nasty bookkeeping code.
8699   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
8700   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
8701   //   xori shift, shift1, 24 [16]
8702   //   rlwinm ptr, ptr1, 0, 0, 29
8703   //   slw incr2, incr, shift
8704   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
8705   //   slw mask, mask2, shift
8706   //  loopMBB:
8707   //   lwarx tmpDest, ptr
8708   //   add tmp, tmpDest, incr2
8709   //   andc tmp2, tmpDest, mask
8710   //   and tmp3, tmp, mask
8711   //   or tmp4, tmp3, tmp2
8712   //   stwcx. tmp4, ptr
8713   //   bne- loopMBB
8714   //   fallthrough --> exitMBB
8715   //   srw dest, tmpDest, shift
8716   if (ptrA != ZeroReg) {
8717     Ptr1Reg = RegInfo.createVirtualRegister(RC);
8718     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
8719       .addReg(ptrA).addReg(ptrB);
8720   } else {
8721     Ptr1Reg = ptrB;
8722   }
8723   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
8724       .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
8725   if (!isLittleEndian)
8726     BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
8727         .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
8728   if (is64bit)
8729     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
8730       .addReg(Ptr1Reg).addImm(0).addImm(61);
8731   else
8732     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
8733       .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
8734   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
8735       .addReg(incr).addReg(ShiftReg);
8736   if (is8bit)
8737     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
8738   else {
8739     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
8740     BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
8741   }
8742   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
8743       .addReg(Mask2Reg).addReg(ShiftReg);
8744 
8745   BB = loopMBB;
8746   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
8747     .addReg(ZeroReg).addReg(PtrReg);
8748   if (BinOpcode)
8749     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
8750       .addReg(Incr2Reg).addReg(TmpDestReg);
8751   BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
8752     .addReg(TmpDestReg).addReg(MaskReg);
8753   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
8754     .addReg(TmpReg).addReg(MaskReg);
8755   if (CmpOpcode) {
8756     // For unsigned comparisons, we can directly compare the shifted values.
8757     // For signed comparisons we shift and sign extend.
8758     unsigned SReg = RegInfo.createVirtualRegister(RC);
8759     BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
8760       .addReg(TmpDestReg).addReg(MaskReg);
8761     unsigned ValueReg = SReg;
8762     unsigned CmpReg = Incr2Reg;
8763     if (CmpOpcode == PPC::CMPW) {
8764       ValueReg = RegInfo.createVirtualRegister(RC);
8765       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
8766         .addReg(SReg).addReg(ShiftReg);
8767       unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
8768       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
8769         .addReg(ValueReg);
8770       ValueReg = ValueSReg;
8771       CmpReg = incr;
8772     }
8773     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
8774       .addReg(CmpReg).addReg(ValueReg);
8775     BuildMI(BB, dl, TII->get(PPC::BCC))
8776       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
8777     BB->addSuccessor(loop2MBB);
8778     BB->addSuccessor(exitMBB);
8779     BB = loop2MBB;
8780   }
8781   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
8782     .addReg(Tmp3Reg).addReg(Tmp2Reg);
8783   BuildMI(BB, dl, TII->get(PPC::STWCX))
8784     .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
8785   BuildMI(BB, dl, TII->get(PPC::BCC))
8786     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
8787   BB->addSuccessor(loopMBB);
8788   BB->addSuccessor(exitMBB);
8789 
8790   //  exitMBB:
8791   //   ...
8792   BB = exitMBB;
8793   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
8794     .addReg(ShiftReg);
8795   return BB;
8796 }
8797 
8798 llvm::MachineBasicBlock *
8799 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
8800                                     MachineBasicBlock *MBB) const {
8801   DebugLoc DL = MI.getDebugLoc();
8802   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
8803 
8804   MachineFunction *MF = MBB->getParent();
8805   MachineRegisterInfo &MRI = MF->getRegInfo();
8806 
8807   const BasicBlock *BB = MBB->getBasicBlock();
8808   MachineFunction::iterator I = ++MBB->getIterator();
8809 
8810   // Memory Reference
8811   MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
8812   MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
8813 
8814   unsigned DstReg = MI.getOperand(0).getReg();
8815   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
8816   assert(RC->hasType(MVT::i32) && "Invalid destination!");
8817   unsigned mainDstReg = MRI.createVirtualRegister(RC);
8818   unsigned restoreDstReg = MRI.createVirtualRegister(RC);
8819 
8820   MVT PVT = getPointerTy(MF->getDataLayout());
8821   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
8822          "Invalid Pointer Size!");
8823   // For v = setjmp(buf), we generate
8824   //
8825   // thisMBB:
8826   //  SjLjSetup mainMBB
8827   //  bl mainMBB
8828   //  v_restore = 1
8829   //  b sinkMBB
8830   //
8831   // mainMBB:
8832   //  buf[LabelOffset] = LR
8833   //  v_main = 0
8834   //
8835   // sinkMBB:
8836   //  v = phi(main, restore)
8837   //
8838 
8839   MachineBasicBlock *thisMBB = MBB;
8840   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
8841   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
8842   MF->insert(I, mainMBB);
8843   MF->insert(I, sinkMBB);
8844 
8845   MachineInstrBuilder MIB;
8846 
8847   // Transfer the remainder of BB and its successor edges to sinkMBB.
8848   sinkMBB->splice(sinkMBB->begin(), MBB,
8849                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
8850   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
8851 
8852   // Note that the structure of the jmp_buf used here is not compatible
8853   // with that used by libc, and is not designed to be. Specifically, it
8854   // stores only those 'reserved' registers that LLVM does not otherwise
8855   // understand how to spill. Also, by convention, by the time this
8856   // intrinsic is called, Clang has already stored the frame address in the
8857   // first slot of the buffer and stack address in the third. Following the
8858   // X86 target code, we'll store the jump address in the second slot. We also
8859   // need to save the TOC pointer (R2) to handle jumps between shared
8860   // libraries, and that will be stored in the fourth slot. The thread
8861   // identifier (R13) is not affected.
8862 
8863   // thisMBB:
8864   const int64_t LabelOffset = 1 * PVT.getStoreSize();
8865   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
8866   const int64_t BPOffset    = 4 * PVT.getStoreSize();
8867 
8868   // Prepare IP either in reg.
8869   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
8870   unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
8871   unsigned BufReg = MI.getOperand(1).getReg();
8872 
8873   if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
8874     setUsesTOCBasePtr(*MBB->getParent());
8875     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
8876             .addReg(PPC::X2)
8877             .addImm(TOCOffset)
8878             .addReg(BufReg);
8879     MIB.setMemRefs(MMOBegin, MMOEnd);
8880   }
8881 
8882   // Naked functions never have a base pointer, and so we use r1. For all
8883   // other functions, this decision must be delayed until during PEI.
8884   unsigned BaseReg;
8885   if (MF->getFunction()->hasFnAttribute(Attribute::Naked))
8886     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
8887   else
8888     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
8889 
8890   MIB = BuildMI(*thisMBB, MI, DL,
8891                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
8892             .addReg(BaseReg)
8893             .addImm(BPOffset)
8894             .addReg(BufReg);
8895   MIB.setMemRefs(MMOBegin, MMOEnd);
8896 
8897   // Setup
8898   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
8899   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
8900   MIB.addRegMask(TRI->getNoPreservedMask());
8901 
8902   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
8903 
8904   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
8905           .addMBB(mainMBB);
8906   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
8907 
8908   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
8909   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
8910 
8911   // mainMBB:
8912   //  mainDstReg = 0
8913   MIB =
8914       BuildMI(mainMBB, DL,
8915               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
8916 
8917   // Store IP
8918   if (Subtarget.isPPC64()) {
8919     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
8920             .addReg(LabelReg)
8921             .addImm(LabelOffset)
8922             .addReg(BufReg);
8923   } else {
8924     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
8925             .addReg(LabelReg)
8926             .addImm(LabelOffset)
8927             .addReg(BufReg);
8928   }
8929 
8930   MIB.setMemRefs(MMOBegin, MMOEnd);
8931 
8932   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
8933   mainMBB->addSuccessor(sinkMBB);
8934 
8935   // sinkMBB:
8936   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
8937           TII->get(PPC::PHI), DstReg)
8938     .addReg(mainDstReg).addMBB(mainMBB)
8939     .addReg(restoreDstReg).addMBB(thisMBB);
8940 
8941   MI.eraseFromParent();
8942   return sinkMBB;
8943 }
8944 
8945 MachineBasicBlock *
8946 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
8947                                      MachineBasicBlock *MBB) const {
8948   DebugLoc DL = MI.getDebugLoc();
8949   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
8950 
8951   MachineFunction *MF = MBB->getParent();
8952   MachineRegisterInfo &MRI = MF->getRegInfo();
8953 
8954   // Memory Reference
8955   MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
8956   MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
8957 
8958   MVT PVT = getPointerTy(MF->getDataLayout());
8959   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
8960          "Invalid Pointer Size!");
8961 
8962   const TargetRegisterClass *RC =
8963     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
8964   unsigned Tmp = MRI.createVirtualRegister(RC);
8965   // Since FP is only updated here but NOT referenced, it's treated as GPR.
8966   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
8967   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
8968   unsigned BP =
8969       (PVT == MVT::i64)
8970           ? PPC::X30
8971           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
8972                                                               : PPC::R30);
8973 
8974   MachineInstrBuilder MIB;
8975 
8976   const int64_t LabelOffset = 1 * PVT.getStoreSize();
8977   const int64_t SPOffset    = 2 * PVT.getStoreSize();
8978   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
8979   const int64_t BPOffset    = 4 * PVT.getStoreSize();
8980 
8981   unsigned BufReg = MI.getOperand(0).getReg();
8982 
8983   // Reload FP (the jumped-to function may not have had a
8984   // frame pointer, and if so, then its r31 will be restored
8985   // as necessary).
8986   if (PVT == MVT::i64) {
8987     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
8988             .addImm(0)
8989             .addReg(BufReg);
8990   } else {
8991     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
8992             .addImm(0)
8993             .addReg(BufReg);
8994   }
8995   MIB.setMemRefs(MMOBegin, MMOEnd);
8996 
8997   // Reload IP
8998   if (PVT == MVT::i64) {
8999     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
9000             .addImm(LabelOffset)
9001             .addReg(BufReg);
9002   } else {
9003     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
9004             .addImm(LabelOffset)
9005             .addReg(BufReg);
9006   }
9007   MIB.setMemRefs(MMOBegin, MMOEnd);
9008 
9009   // Reload SP
9010   if (PVT == MVT::i64) {
9011     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
9012             .addImm(SPOffset)
9013             .addReg(BufReg);
9014   } else {
9015     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
9016             .addImm(SPOffset)
9017             .addReg(BufReg);
9018   }
9019   MIB.setMemRefs(MMOBegin, MMOEnd);
9020 
9021   // Reload BP
9022   if (PVT == MVT::i64) {
9023     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
9024             .addImm(BPOffset)
9025             .addReg(BufReg);
9026   } else {
9027     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
9028             .addImm(BPOffset)
9029             .addReg(BufReg);
9030   }
9031   MIB.setMemRefs(MMOBegin, MMOEnd);
9032 
9033   // Reload TOC
9034   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
9035     setUsesTOCBasePtr(*MBB->getParent());
9036     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
9037             .addImm(TOCOffset)
9038             .addReg(BufReg);
9039 
9040     MIB.setMemRefs(MMOBegin, MMOEnd);
9041   }
9042 
9043   // Jump
9044   BuildMI(*MBB, MI, DL,
9045           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
9046   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
9047 
9048   MI.eraseFromParent();
9049   return MBB;
9050 }
9051 
9052 MachineBasicBlock *
9053 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9054                                                MachineBasicBlock *BB) const {
9055   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
9056       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
9057     if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
9058         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
9059       // Call lowering should have added an r2 operand to indicate a dependence
9060       // on the TOC base pointer value. It can't however, because there is no
9061       // way to mark the dependence as implicit there, and so the stackmap code
9062       // will confuse it with a regular operand. Instead, add the dependence
9063       // here.
9064       setUsesTOCBasePtr(*BB->getParent());
9065       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
9066     }
9067 
9068     return emitPatchPoint(MI, BB);
9069   }
9070 
9071   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
9072       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
9073     return emitEHSjLjSetJmp(MI, BB);
9074   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
9075              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
9076     return emitEHSjLjLongJmp(MI, BB);
9077   }
9078 
9079   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9080 
9081   // To "insert" these instructions we actually have to insert their
9082   // control-flow patterns.
9083   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9084   MachineFunction::iterator It = ++BB->getIterator();
9085 
9086   MachineFunction *F = BB->getParent();
9087 
9088   if (Subtarget.hasISEL() &&
9089       (MI.getOpcode() == PPC::SELECT_CC_I4 ||
9090        MI.getOpcode() == PPC::SELECT_CC_I8 ||
9091        MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8)) {
9092     SmallVector<MachineOperand, 2> Cond;
9093     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
9094         MI.getOpcode() == PPC::SELECT_CC_I8)
9095       Cond.push_back(MI.getOperand(4));
9096     else
9097       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
9098     Cond.push_back(MI.getOperand(1));
9099 
9100     DebugLoc dl = MI.getDebugLoc();
9101     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
9102                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
9103   } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
9104              MI.getOpcode() == PPC::SELECT_CC_I8 ||
9105              MI.getOpcode() == PPC::SELECT_CC_F4 ||
9106              MI.getOpcode() == PPC::SELECT_CC_F8 ||
9107              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
9108              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
9109              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
9110              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
9111              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
9112              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
9113              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
9114              MI.getOpcode() == PPC::SELECT_I4 ||
9115              MI.getOpcode() == PPC::SELECT_I8 ||
9116              MI.getOpcode() == PPC::SELECT_F4 ||
9117              MI.getOpcode() == PPC::SELECT_F8 ||
9118              MI.getOpcode() == PPC::SELECT_QFRC ||
9119              MI.getOpcode() == PPC::SELECT_QSRC ||
9120              MI.getOpcode() == PPC::SELECT_QBRC ||
9121              MI.getOpcode() == PPC::SELECT_VRRC ||
9122              MI.getOpcode() == PPC::SELECT_VSFRC ||
9123              MI.getOpcode() == PPC::SELECT_VSSRC ||
9124              MI.getOpcode() == PPC::SELECT_VSRC) {
9125     // The incoming instruction knows the destination vreg to set, the
9126     // condition code register to branch on, the true/false values to
9127     // select between, and a branch opcode to use.
9128 
9129     //  thisMBB:
9130     //  ...
9131     //   TrueVal = ...
9132     //   cmpTY ccX, r1, r2
9133     //   bCC copy1MBB
9134     //   fallthrough --> copy0MBB
9135     MachineBasicBlock *thisMBB = BB;
9136     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
9137     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
9138     DebugLoc dl = MI.getDebugLoc();
9139     F->insert(It, copy0MBB);
9140     F->insert(It, sinkMBB);
9141 
9142     // Transfer the remainder of BB and its successor edges to sinkMBB.
9143     sinkMBB->splice(sinkMBB->begin(), BB,
9144                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
9145     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
9146 
9147     // Next, add the true and fallthrough blocks as its successors.
9148     BB->addSuccessor(copy0MBB);
9149     BB->addSuccessor(sinkMBB);
9150 
9151     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
9152         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
9153         MI.getOpcode() == PPC::SELECT_QFRC ||
9154         MI.getOpcode() == PPC::SELECT_QSRC ||
9155         MI.getOpcode() == PPC::SELECT_QBRC ||
9156         MI.getOpcode() == PPC::SELECT_VRRC ||
9157         MI.getOpcode() == PPC::SELECT_VSFRC ||
9158         MI.getOpcode() == PPC::SELECT_VSSRC ||
9159         MI.getOpcode() == PPC::SELECT_VSRC) {
9160       BuildMI(BB, dl, TII->get(PPC::BC))
9161           .addReg(MI.getOperand(1).getReg())
9162           .addMBB(sinkMBB);
9163     } else {
9164       unsigned SelectPred = MI.getOperand(4).getImm();
9165       BuildMI(BB, dl, TII->get(PPC::BCC))
9166           .addImm(SelectPred)
9167           .addReg(MI.getOperand(1).getReg())
9168           .addMBB(sinkMBB);
9169     }
9170 
9171     //  copy0MBB:
9172     //   %FalseValue = ...
9173     //   # fallthrough to sinkMBB
9174     BB = copy0MBB;
9175 
9176     // Update machine-CFG edges
9177     BB->addSuccessor(sinkMBB);
9178 
9179     //  sinkMBB:
9180     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
9181     //  ...
9182     BB = sinkMBB;
9183     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
9184         .addReg(MI.getOperand(3).getReg())
9185         .addMBB(copy0MBB)
9186         .addReg(MI.getOperand(2).getReg())
9187         .addMBB(thisMBB);
9188   } else if (MI.getOpcode() == PPC::ReadTB) {
9189     // To read the 64-bit time-base register on a 32-bit target, we read the
9190     // two halves. Should the counter have wrapped while it was being read, we
9191     // need to try again.
9192     // ...
9193     // readLoop:
9194     // mfspr Rx,TBU # load from TBU
9195     // mfspr Ry,TB  # load from TB
9196     // mfspr Rz,TBU # load from TBU
9197     // cmpw crX,Rx,Rz # check if 'old'='new'
9198     // bne readLoop   # branch if they're not equal
9199     // ...
9200 
9201     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
9202     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
9203     DebugLoc dl = MI.getDebugLoc();
9204     F->insert(It, readMBB);
9205     F->insert(It, sinkMBB);
9206 
9207     // Transfer the remainder of BB and its successor edges to sinkMBB.
9208     sinkMBB->splice(sinkMBB->begin(), BB,
9209                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
9210     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
9211 
9212     BB->addSuccessor(readMBB);
9213     BB = readMBB;
9214 
9215     MachineRegisterInfo &RegInfo = F->getRegInfo();
9216     unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
9217     unsigned LoReg = MI.getOperand(0).getReg();
9218     unsigned HiReg = MI.getOperand(1).getReg();
9219 
9220     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
9221     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
9222     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
9223 
9224     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
9225 
9226     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
9227       .addReg(HiReg).addReg(ReadAgainReg);
9228     BuildMI(BB, dl, TII->get(PPC::BCC))
9229       .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
9230 
9231     BB->addSuccessor(readMBB);
9232     BB->addSuccessor(sinkMBB);
9233   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
9234     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
9235   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
9236     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
9237   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
9238     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
9239   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
9240     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
9241 
9242   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
9243     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
9244   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
9245     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
9246   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
9247     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
9248   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
9249     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
9250 
9251   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
9252     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
9253   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
9254     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
9255   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
9256     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
9257   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
9258     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
9259 
9260   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
9261     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
9262   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
9263     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
9264   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
9265     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
9266   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
9267     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
9268 
9269   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
9270     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
9271   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
9272     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
9273   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
9274     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
9275   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
9276     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
9277 
9278   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
9279     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
9280   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
9281     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
9282   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
9283     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
9284   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
9285     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
9286 
9287   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
9288     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
9289   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
9290     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
9291   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
9292     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
9293   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
9294     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
9295 
9296   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
9297     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
9298   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
9299     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
9300   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
9301     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
9302   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
9303     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
9304 
9305   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
9306     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
9307   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
9308     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
9309   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
9310     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
9311   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
9312     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
9313 
9314   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
9315     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
9316   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
9317     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
9318   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
9319     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
9320   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
9321     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
9322 
9323   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
9324     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
9325   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
9326     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
9327   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
9328     BB = EmitAtomicBinary(MI, BB, 4, 0);
9329   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
9330     BB = EmitAtomicBinary(MI, BB, 8, 0);
9331 
9332   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
9333            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
9334            (Subtarget.hasPartwordAtomics() &&
9335             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
9336            (Subtarget.hasPartwordAtomics() &&
9337             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
9338     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
9339 
9340     auto LoadMnemonic = PPC::LDARX;
9341     auto StoreMnemonic = PPC::STDCX;
9342     switch (MI.getOpcode()) {
9343     default:
9344       llvm_unreachable("Compare and swap of unknown size");
9345     case PPC::ATOMIC_CMP_SWAP_I8:
9346       LoadMnemonic = PPC::LBARX;
9347       StoreMnemonic = PPC::STBCX;
9348       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
9349       break;
9350     case PPC::ATOMIC_CMP_SWAP_I16:
9351       LoadMnemonic = PPC::LHARX;
9352       StoreMnemonic = PPC::STHCX;
9353       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
9354       break;
9355     case PPC::ATOMIC_CMP_SWAP_I32:
9356       LoadMnemonic = PPC::LWARX;
9357       StoreMnemonic = PPC::STWCX;
9358       break;
9359     case PPC::ATOMIC_CMP_SWAP_I64:
9360       LoadMnemonic = PPC::LDARX;
9361       StoreMnemonic = PPC::STDCX;
9362       break;
9363     }
9364     unsigned dest = MI.getOperand(0).getReg();
9365     unsigned ptrA = MI.getOperand(1).getReg();
9366     unsigned ptrB = MI.getOperand(2).getReg();
9367     unsigned oldval = MI.getOperand(3).getReg();
9368     unsigned newval = MI.getOperand(4).getReg();
9369     DebugLoc dl = MI.getDebugLoc();
9370 
9371     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
9372     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
9373     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
9374     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9375     F->insert(It, loop1MBB);
9376     F->insert(It, loop2MBB);
9377     F->insert(It, midMBB);
9378     F->insert(It, exitMBB);
9379     exitMBB->splice(exitMBB->begin(), BB,
9380                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
9381     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9382 
9383     //  thisMBB:
9384     //   ...
9385     //   fallthrough --> loopMBB
9386     BB->addSuccessor(loop1MBB);
9387 
9388     // loop1MBB:
9389     //   l[bhwd]arx dest, ptr
9390     //   cmp[wd] dest, oldval
9391     //   bne- midMBB
9392     // loop2MBB:
9393     //   st[bhwd]cx. newval, ptr
9394     //   bne- loopMBB
9395     //   b exitBB
9396     // midMBB:
9397     //   st[bhwd]cx. dest, ptr
9398     // exitBB:
9399     BB = loop1MBB;
9400     BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
9401       .addReg(ptrA).addReg(ptrB);
9402     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
9403       .addReg(oldval).addReg(dest);
9404     BuildMI(BB, dl, TII->get(PPC::BCC))
9405       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
9406     BB->addSuccessor(loop2MBB);
9407     BB->addSuccessor(midMBB);
9408 
9409     BB = loop2MBB;
9410     BuildMI(BB, dl, TII->get(StoreMnemonic))
9411       .addReg(newval).addReg(ptrA).addReg(ptrB);
9412     BuildMI(BB, dl, TII->get(PPC::BCC))
9413       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
9414     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
9415     BB->addSuccessor(loop1MBB);
9416     BB->addSuccessor(exitMBB);
9417 
9418     BB = midMBB;
9419     BuildMI(BB, dl, TII->get(StoreMnemonic))
9420       .addReg(dest).addReg(ptrA).addReg(ptrB);
9421     BB->addSuccessor(exitMBB);
9422 
9423     //  exitMBB:
9424     //   ...
9425     BB = exitMBB;
9426   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
9427              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
9428     // We must use 64-bit registers for addresses when targeting 64-bit,
9429     // since we're actually doing arithmetic on them.  Other registers
9430     // can be 32-bit.
9431     bool is64bit = Subtarget.isPPC64();
9432     bool isLittleEndian = Subtarget.isLittleEndian();
9433     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
9434 
9435     unsigned dest = MI.getOperand(0).getReg();
9436     unsigned ptrA = MI.getOperand(1).getReg();
9437     unsigned ptrB = MI.getOperand(2).getReg();
9438     unsigned oldval = MI.getOperand(3).getReg();
9439     unsigned newval = MI.getOperand(4).getReg();
9440     DebugLoc dl = MI.getDebugLoc();
9441 
9442     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
9443     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
9444     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
9445     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9446     F->insert(It, loop1MBB);
9447     F->insert(It, loop2MBB);
9448     F->insert(It, midMBB);
9449     F->insert(It, exitMBB);
9450     exitMBB->splice(exitMBB->begin(), BB,
9451                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
9452     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9453 
9454     MachineRegisterInfo &RegInfo = F->getRegInfo();
9455     const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
9456                                             : &PPC::GPRCRegClass;
9457     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
9458     unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
9459     unsigned ShiftReg =
9460       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
9461     unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
9462     unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
9463     unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
9464     unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
9465     unsigned MaskReg = RegInfo.createVirtualRegister(RC);
9466     unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
9467     unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
9468     unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
9469     unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
9470     unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
9471     unsigned Ptr1Reg;
9472     unsigned TmpReg = RegInfo.createVirtualRegister(RC);
9473     unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9474     //  thisMBB:
9475     //   ...
9476     //   fallthrough --> loopMBB
9477     BB->addSuccessor(loop1MBB);
9478 
9479     // The 4-byte load must be aligned, while a char or short may be
9480     // anywhere in the word.  Hence all this nasty bookkeeping code.
9481     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
9482     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
9483     //   xori shift, shift1, 24 [16]
9484     //   rlwinm ptr, ptr1, 0, 0, 29
9485     //   slw newval2, newval, shift
9486     //   slw oldval2, oldval,shift
9487     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
9488     //   slw mask, mask2, shift
9489     //   and newval3, newval2, mask
9490     //   and oldval3, oldval2, mask
9491     // loop1MBB:
9492     //   lwarx tmpDest, ptr
9493     //   and tmp, tmpDest, mask
9494     //   cmpw tmp, oldval3
9495     //   bne- midMBB
9496     // loop2MBB:
9497     //   andc tmp2, tmpDest, mask
9498     //   or tmp4, tmp2, newval3
9499     //   stwcx. tmp4, ptr
9500     //   bne- loop1MBB
9501     //   b exitBB
9502     // midMBB:
9503     //   stwcx. tmpDest, ptr
9504     // exitBB:
9505     //   srw dest, tmpDest, shift
9506     if (ptrA != ZeroReg) {
9507       Ptr1Reg = RegInfo.createVirtualRegister(RC);
9508       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
9509         .addReg(ptrA).addReg(ptrB);
9510     } else {
9511       Ptr1Reg = ptrB;
9512     }
9513     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
9514         .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
9515     if (!isLittleEndian)
9516       BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
9517           .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
9518     if (is64bit)
9519       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
9520         .addReg(Ptr1Reg).addImm(0).addImm(61);
9521     else
9522       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
9523         .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
9524     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
9525         .addReg(newval).addReg(ShiftReg);
9526     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
9527         .addReg(oldval).addReg(ShiftReg);
9528     if (is8bit)
9529       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
9530     else {
9531       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
9532       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
9533         .addReg(Mask3Reg).addImm(65535);
9534     }
9535     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
9536         .addReg(Mask2Reg).addReg(ShiftReg);
9537     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
9538         .addReg(NewVal2Reg).addReg(MaskReg);
9539     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
9540         .addReg(OldVal2Reg).addReg(MaskReg);
9541 
9542     BB = loop1MBB;
9543     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
9544         .addReg(ZeroReg).addReg(PtrReg);
9545     BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
9546         .addReg(TmpDestReg).addReg(MaskReg);
9547     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
9548         .addReg(TmpReg).addReg(OldVal3Reg);
9549     BuildMI(BB, dl, TII->get(PPC::BCC))
9550         .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
9551     BB->addSuccessor(loop2MBB);
9552     BB->addSuccessor(midMBB);
9553 
9554     BB = loop2MBB;
9555     BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
9556         .addReg(TmpDestReg).addReg(MaskReg);
9557     BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
9558         .addReg(Tmp2Reg).addReg(NewVal3Reg);
9559     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
9560         .addReg(ZeroReg).addReg(PtrReg);
9561     BuildMI(BB, dl, TII->get(PPC::BCC))
9562       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
9563     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
9564     BB->addSuccessor(loop1MBB);
9565     BB->addSuccessor(exitMBB);
9566 
9567     BB = midMBB;
9568     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
9569       .addReg(ZeroReg).addReg(PtrReg);
9570     BB->addSuccessor(exitMBB);
9571 
9572     //  exitMBB:
9573     //   ...
9574     BB = exitMBB;
9575     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
9576       .addReg(ShiftReg);
9577   } else if (MI.getOpcode() == PPC::FADDrtz) {
9578     // This pseudo performs an FADD with rounding mode temporarily forced
9579     // to round-to-zero.  We emit this via custom inserter since the FPSCR
9580     // is not modeled at the SelectionDAG level.
9581     unsigned Dest = MI.getOperand(0).getReg();
9582     unsigned Src1 = MI.getOperand(1).getReg();
9583     unsigned Src2 = MI.getOperand(2).getReg();
9584     DebugLoc dl = MI.getDebugLoc();
9585 
9586     MachineRegisterInfo &RegInfo = F->getRegInfo();
9587     unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
9588 
9589     // Save FPSCR value.
9590     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
9591 
9592     // Set rounding mode to round-to-zero.
9593     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
9594     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
9595 
9596     // Perform addition.
9597     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
9598 
9599     // Restore FPSCR value.
9600     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
9601   } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
9602              MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
9603              MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9604              MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
9605     unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9606                        MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
9607                           ? PPC::ANDIo8
9608                           : PPC::ANDIo;
9609     bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
9610                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
9611 
9612     MachineRegisterInfo &RegInfo = F->getRegInfo();
9613     unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
9614                                                   &PPC::GPRCRegClass :
9615                                                   &PPC::G8RCRegClass);
9616 
9617     DebugLoc dl = MI.getDebugLoc();
9618     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
9619         .addReg(MI.getOperand(1).getReg())
9620         .addImm(1);
9621     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
9622             MI.getOperand(0).getReg())
9623         .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
9624   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
9625     DebugLoc Dl = MI.getDebugLoc();
9626     MachineRegisterInfo &RegInfo = F->getRegInfo();
9627     unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
9628     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
9629     return BB;
9630   } else {
9631     llvm_unreachable("Unexpected instr type to insert");
9632   }
9633 
9634   MI.eraseFromParent(); // The pseudo instruction is gone now.
9635   return BB;
9636 }
9637 
9638 //===----------------------------------------------------------------------===//
9639 // Target Optimization Hooks
9640 //===----------------------------------------------------------------------===//
9641 
9642 static std::string getRecipOp(const char *Base, EVT VT) {
9643   std::string RecipOp(Base);
9644   if (VT.getScalarType() == MVT::f64)
9645     RecipOp += "d";
9646   else
9647     RecipOp += "f";
9648 
9649   if (VT.isVector())
9650     RecipOp = "vec-" + RecipOp;
9651 
9652   return RecipOp;
9653 }
9654 
9655 SDValue PPCTargetLowering::getRsqrtEstimate(SDValue Operand,
9656                                             DAGCombinerInfo &DCI,
9657                                             unsigned &RefinementSteps,
9658                                             bool &UseOneConstNR) const {
9659   EVT VT = Operand.getValueType();
9660   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
9661       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
9662       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
9663       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
9664       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
9665       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
9666     TargetRecip Recips = getTargetRecipForFunc(DCI.DAG.getMachineFunction());
9667     std::string RecipOp = getRecipOp("sqrt", VT);
9668     if (!Recips.isEnabled(RecipOp))
9669       return SDValue();
9670 
9671     RefinementSteps = Recips.getRefinementSteps(RecipOp);
9672     UseOneConstNR = true;
9673     return DCI.DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
9674   }
9675   return SDValue();
9676 }
9677 
9678 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand,
9679                                             DAGCombinerInfo &DCI,
9680                                             unsigned &RefinementSteps) const {
9681   EVT VT = Operand.getValueType();
9682   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
9683       (VT == MVT::f64 && Subtarget.hasFRE()) ||
9684       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
9685       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
9686       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
9687       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
9688     TargetRecip Recips = getTargetRecipForFunc(DCI.DAG.getMachineFunction());
9689     std::string RecipOp = getRecipOp("div", VT);
9690     if (!Recips.isEnabled(RecipOp))
9691       return SDValue();
9692 
9693     RefinementSteps = Recips.getRefinementSteps(RecipOp);
9694     return DCI.DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
9695   }
9696   return SDValue();
9697 }
9698 
9699 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
9700   // Note: This functionality is used only when unsafe-fp-math is enabled, and
9701   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
9702   // enabled for division), this functionality is redundant with the default
9703   // combiner logic (once the division -> reciprocal/multiply transformation
9704   // has taken place). As a result, this matters more for older cores than for
9705   // newer ones.
9706 
9707   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
9708   // reciprocal if there are two or more FDIVs (for embedded cores with only
9709   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
9710   switch (Subtarget.getDarwinDirective()) {
9711   default:
9712     return 3;
9713   case PPC::DIR_440:
9714   case PPC::DIR_A2:
9715   case PPC::DIR_E500mc:
9716   case PPC::DIR_E5500:
9717     return 2;
9718   }
9719 }
9720 
9721 // isConsecutiveLSLoc needs to work even if all adds have not yet been
9722 // collapsed, and so we need to look through chains of them.
9723 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
9724                                      int64_t& Offset, SelectionDAG &DAG) {
9725   if (DAG.isBaseWithConstantOffset(Loc)) {
9726     Base = Loc.getOperand(0);
9727     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
9728 
9729     // The base might itself be a base plus an offset, and if so, accumulate
9730     // that as well.
9731     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
9732   }
9733 }
9734 
9735 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
9736                             unsigned Bytes, int Dist,
9737                             SelectionDAG &DAG) {
9738   if (VT.getSizeInBits() / 8 != Bytes)
9739     return false;
9740 
9741   SDValue BaseLoc = Base->getBasePtr();
9742   if (Loc.getOpcode() == ISD::FrameIndex) {
9743     if (BaseLoc.getOpcode() != ISD::FrameIndex)
9744       return false;
9745     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9746     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
9747     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
9748     int FS  = MFI.getObjectSize(FI);
9749     int BFS = MFI.getObjectSize(BFI);
9750     if (FS != BFS || FS != (int)Bytes) return false;
9751     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
9752   }
9753 
9754   SDValue Base1 = Loc, Base2 = BaseLoc;
9755   int64_t Offset1 = 0, Offset2 = 0;
9756   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
9757   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
9758   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
9759     return true;
9760 
9761   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9762   const GlobalValue *GV1 = nullptr;
9763   const GlobalValue *GV2 = nullptr;
9764   Offset1 = 0;
9765   Offset2 = 0;
9766   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
9767   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
9768   if (isGA1 && isGA2 && GV1 == GV2)
9769     return Offset1 == (Offset2 + Dist*Bytes);
9770   return false;
9771 }
9772 
9773 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
9774 // not enforce equality of the chain operands.
9775 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
9776                             unsigned Bytes, int Dist,
9777                             SelectionDAG &DAG) {
9778   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
9779     EVT VT = LS->getMemoryVT();
9780     SDValue Loc = LS->getBasePtr();
9781     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
9782   }
9783 
9784   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
9785     EVT VT;
9786     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
9787     default: return false;
9788     case Intrinsic::ppc_qpx_qvlfd:
9789     case Intrinsic::ppc_qpx_qvlfda:
9790       VT = MVT::v4f64;
9791       break;
9792     case Intrinsic::ppc_qpx_qvlfs:
9793     case Intrinsic::ppc_qpx_qvlfsa:
9794       VT = MVT::v4f32;
9795       break;
9796     case Intrinsic::ppc_qpx_qvlfcd:
9797     case Intrinsic::ppc_qpx_qvlfcda:
9798       VT = MVT::v2f64;
9799       break;
9800     case Intrinsic::ppc_qpx_qvlfcs:
9801     case Intrinsic::ppc_qpx_qvlfcsa:
9802       VT = MVT::v2f32;
9803       break;
9804     case Intrinsic::ppc_qpx_qvlfiwa:
9805     case Intrinsic::ppc_qpx_qvlfiwz:
9806     case Intrinsic::ppc_altivec_lvx:
9807     case Intrinsic::ppc_altivec_lvxl:
9808     case Intrinsic::ppc_vsx_lxvw4x:
9809       VT = MVT::v4i32;
9810       break;
9811     case Intrinsic::ppc_vsx_lxvd2x:
9812       VT = MVT::v2f64;
9813       break;
9814     case Intrinsic::ppc_altivec_lvebx:
9815       VT = MVT::i8;
9816       break;
9817     case Intrinsic::ppc_altivec_lvehx:
9818       VT = MVT::i16;
9819       break;
9820     case Intrinsic::ppc_altivec_lvewx:
9821       VT = MVT::i32;
9822       break;
9823     }
9824 
9825     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
9826   }
9827 
9828   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
9829     EVT VT;
9830     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
9831     default: return false;
9832     case Intrinsic::ppc_qpx_qvstfd:
9833     case Intrinsic::ppc_qpx_qvstfda:
9834       VT = MVT::v4f64;
9835       break;
9836     case Intrinsic::ppc_qpx_qvstfs:
9837     case Intrinsic::ppc_qpx_qvstfsa:
9838       VT = MVT::v4f32;
9839       break;
9840     case Intrinsic::ppc_qpx_qvstfcd:
9841     case Intrinsic::ppc_qpx_qvstfcda:
9842       VT = MVT::v2f64;
9843       break;
9844     case Intrinsic::ppc_qpx_qvstfcs:
9845     case Intrinsic::ppc_qpx_qvstfcsa:
9846       VT = MVT::v2f32;
9847       break;
9848     case Intrinsic::ppc_qpx_qvstfiw:
9849     case Intrinsic::ppc_qpx_qvstfiwa:
9850     case Intrinsic::ppc_altivec_stvx:
9851     case Intrinsic::ppc_altivec_stvxl:
9852     case Intrinsic::ppc_vsx_stxvw4x:
9853       VT = MVT::v4i32;
9854       break;
9855     case Intrinsic::ppc_vsx_stxvd2x:
9856       VT = MVT::v2f64;
9857       break;
9858     case Intrinsic::ppc_altivec_stvebx:
9859       VT = MVT::i8;
9860       break;
9861     case Intrinsic::ppc_altivec_stvehx:
9862       VT = MVT::i16;
9863       break;
9864     case Intrinsic::ppc_altivec_stvewx:
9865       VT = MVT::i32;
9866       break;
9867     }
9868 
9869     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
9870   }
9871 
9872   return false;
9873 }
9874 
9875 // Return true is there is a nearyby consecutive load to the one provided
9876 // (regardless of alignment). We search up and down the chain, looking though
9877 // token factors and other loads (but nothing else). As a result, a true result
9878 // indicates that it is safe to create a new consecutive load adjacent to the
9879 // load provided.
9880 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
9881   SDValue Chain = LD->getChain();
9882   EVT VT = LD->getMemoryVT();
9883 
9884   SmallSet<SDNode *, 16> LoadRoots;
9885   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
9886   SmallSet<SDNode *, 16> Visited;
9887 
9888   // First, search up the chain, branching to follow all token-factor operands.
9889   // If we find a consecutive load, then we're done, otherwise, record all
9890   // nodes just above the top-level loads and token factors.
9891   while (!Queue.empty()) {
9892     SDNode *ChainNext = Queue.pop_back_val();
9893     if (!Visited.insert(ChainNext).second)
9894       continue;
9895 
9896     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
9897       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
9898         return true;
9899 
9900       if (!Visited.count(ChainLD->getChain().getNode()))
9901         Queue.push_back(ChainLD->getChain().getNode());
9902     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
9903       for (const SDUse &O : ChainNext->ops())
9904         if (!Visited.count(O.getNode()))
9905           Queue.push_back(O.getNode());
9906     } else
9907       LoadRoots.insert(ChainNext);
9908   }
9909 
9910   // Second, search down the chain, starting from the top-level nodes recorded
9911   // in the first phase. These top-level nodes are the nodes just above all
9912   // loads and token factors. Starting with their uses, recursively look though
9913   // all loads (just the chain uses) and token factors to find a consecutive
9914   // load.
9915   Visited.clear();
9916   Queue.clear();
9917 
9918   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
9919        IE = LoadRoots.end(); I != IE; ++I) {
9920     Queue.push_back(*I);
9921 
9922     while (!Queue.empty()) {
9923       SDNode *LoadRoot = Queue.pop_back_val();
9924       if (!Visited.insert(LoadRoot).second)
9925         continue;
9926 
9927       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
9928         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
9929           return true;
9930 
9931       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
9932            UE = LoadRoot->use_end(); UI != UE; ++UI)
9933         if (((isa<MemSDNode>(*UI) &&
9934             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
9935             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
9936           Queue.push_back(*UI);
9937     }
9938   }
9939 
9940   return false;
9941 }
9942 
9943 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
9944                                                   DAGCombinerInfo &DCI) const {
9945   SelectionDAG &DAG = DCI.DAG;
9946   SDLoc dl(N);
9947 
9948   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
9949   // If we're tracking CR bits, we need to be careful that we don't have:
9950   //   trunc(binary-ops(zext(x), zext(y)))
9951   // or
9952   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
9953   // such that we're unnecessarily moving things into GPRs when it would be
9954   // better to keep them in CR bits.
9955 
9956   // Note that trunc here can be an actual i1 trunc, or can be the effective
9957   // truncation that comes from a setcc or select_cc.
9958   if (N->getOpcode() == ISD::TRUNCATE &&
9959       N->getValueType(0) != MVT::i1)
9960     return SDValue();
9961 
9962   if (N->getOperand(0).getValueType() != MVT::i32 &&
9963       N->getOperand(0).getValueType() != MVT::i64)
9964     return SDValue();
9965 
9966   if (N->getOpcode() == ISD::SETCC ||
9967       N->getOpcode() == ISD::SELECT_CC) {
9968     // If we're looking at a comparison, then we need to make sure that the
9969     // high bits (all except for the first) don't matter the result.
9970     ISD::CondCode CC =
9971       cast<CondCodeSDNode>(N->getOperand(
9972         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
9973     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
9974 
9975     if (ISD::isSignedIntSetCC(CC)) {
9976       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
9977           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
9978         return SDValue();
9979     } else if (ISD::isUnsignedIntSetCC(CC)) {
9980       if (!DAG.MaskedValueIsZero(N->getOperand(0),
9981                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
9982           !DAG.MaskedValueIsZero(N->getOperand(1),
9983                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
9984         return SDValue();
9985     } else {
9986       // This is neither a signed nor an unsigned comparison, just make sure
9987       // that the high bits are equal.
9988       APInt Op1Zero, Op1One;
9989       APInt Op2Zero, Op2One;
9990       DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One);
9991       DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One);
9992 
9993       // We don't really care about what is known about the first bit (if
9994       // anything), so clear it in all masks prior to comparing them.
9995       Op1Zero.clearBit(0); Op1One.clearBit(0);
9996       Op2Zero.clearBit(0); Op2One.clearBit(0);
9997 
9998       if (Op1Zero != Op2Zero || Op1One != Op2One)
9999         return SDValue();
10000     }
10001   }
10002 
10003   // We now know that the higher-order bits are irrelevant, we just need to
10004   // make sure that all of the intermediate operations are bit operations, and
10005   // all inputs are extensions.
10006   if (N->getOperand(0).getOpcode() != ISD::AND &&
10007       N->getOperand(0).getOpcode() != ISD::OR  &&
10008       N->getOperand(0).getOpcode() != ISD::XOR &&
10009       N->getOperand(0).getOpcode() != ISD::SELECT &&
10010       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
10011       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
10012       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
10013       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
10014       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
10015     return SDValue();
10016 
10017   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
10018       N->getOperand(1).getOpcode() != ISD::AND &&
10019       N->getOperand(1).getOpcode() != ISD::OR  &&
10020       N->getOperand(1).getOpcode() != ISD::XOR &&
10021       N->getOperand(1).getOpcode() != ISD::SELECT &&
10022       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
10023       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
10024       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
10025       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
10026       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
10027     return SDValue();
10028 
10029   SmallVector<SDValue, 4> Inputs;
10030   SmallVector<SDValue, 8> BinOps, PromOps;
10031   SmallPtrSet<SDNode *, 16> Visited;
10032 
10033   for (unsigned i = 0; i < 2; ++i) {
10034     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
10035           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
10036           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
10037           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
10038         isa<ConstantSDNode>(N->getOperand(i)))
10039       Inputs.push_back(N->getOperand(i));
10040     else
10041       BinOps.push_back(N->getOperand(i));
10042 
10043     if (N->getOpcode() == ISD::TRUNCATE)
10044       break;
10045   }
10046 
10047   // Visit all inputs, collect all binary operations (and, or, xor and
10048   // select) that are all fed by extensions.
10049   while (!BinOps.empty()) {
10050     SDValue BinOp = BinOps.back();
10051     BinOps.pop_back();
10052 
10053     if (!Visited.insert(BinOp.getNode()).second)
10054       continue;
10055 
10056     PromOps.push_back(BinOp);
10057 
10058     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
10059       // The condition of the select is not promoted.
10060       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
10061         continue;
10062       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
10063         continue;
10064 
10065       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
10066             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
10067             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
10068            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
10069           isa<ConstantSDNode>(BinOp.getOperand(i))) {
10070         Inputs.push_back(BinOp.getOperand(i));
10071       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
10072                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
10073                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
10074                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
10075                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
10076                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
10077                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
10078                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
10079                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
10080         BinOps.push_back(BinOp.getOperand(i));
10081       } else {
10082         // We have an input that is not an extension or another binary
10083         // operation; we'll abort this transformation.
10084         return SDValue();
10085       }
10086     }
10087   }
10088 
10089   // Make sure that this is a self-contained cluster of operations (which
10090   // is not quite the same thing as saying that everything has only one
10091   // use).
10092   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
10093     if (isa<ConstantSDNode>(Inputs[i]))
10094       continue;
10095 
10096     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
10097                               UE = Inputs[i].getNode()->use_end();
10098          UI != UE; ++UI) {
10099       SDNode *User = *UI;
10100       if (User != N && !Visited.count(User))
10101         return SDValue();
10102 
10103       // Make sure that we're not going to promote the non-output-value
10104       // operand(s) or SELECT or SELECT_CC.
10105       // FIXME: Although we could sometimes handle this, and it does occur in
10106       // practice that one of the condition inputs to the select is also one of
10107       // the outputs, we currently can't deal with this.
10108       if (User->getOpcode() == ISD::SELECT) {
10109         if (User->getOperand(0) == Inputs[i])
10110           return SDValue();
10111       } else if (User->getOpcode() == ISD::SELECT_CC) {
10112         if (User->getOperand(0) == Inputs[i] ||
10113             User->getOperand(1) == Inputs[i])
10114           return SDValue();
10115       }
10116     }
10117   }
10118 
10119   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
10120     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
10121                               UE = PromOps[i].getNode()->use_end();
10122          UI != UE; ++UI) {
10123       SDNode *User = *UI;
10124       if (User != N && !Visited.count(User))
10125         return SDValue();
10126 
10127       // Make sure that we're not going to promote the non-output-value
10128       // operand(s) or SELECT or SELECT_CC.
10129       // FIXME: Although we could sometimes handle this, and it does occur in
10130       // practice that one of the condition inputs to the select is also one of
10131       // the outputs, we currently can't deal with this.
10132       if (User->getOpcode() == ISD::SELECT) {
10133         if (User->getOperand(0) == PromOps[i])
10134           return SDValue();
10135       } else if (User->getOpcode() == ISD::SELECT_CC) {
10136         if (User->getOperand(0) == PromOps[i] ||
10137             User->getOperand(1) == PromOps[i])
10138           return SDValue();
10139       }
10140     }
10141   }
10142 
10143   // Replace all inputs with the extension operand.
10144   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
10145     // Constants may have users outside the cluster of to-be-promoted nodes,
10146     // and so we need to replace those as we do the promotions.
10147     if (isa<ConstantSDNode>(Inputs[i]))
10148       continue;
10149     else
10150       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
10151   }
10152 
10153   std::list<HandleSDNode> PromOpHandles;
10154   for (auto &PromOp : PromOps)
10155     PromOpHandles.emplace_back(PromOp);
10156 
10157   // Replace all operations (these are all the same, but have a different
10158   // (i1) return type). DAG.getNode will validate that the types of
10159   // a binary operator match, so go through the list in reverse so that
10160   // we've likely promoted both operands first. Any intermediate truncations or
10161   // extensions disappear.
10162   while (!PromOpHandles.empty()) {
10163     SDValue PromOp = PromOpHandles.back().getValue();
10164     PromOpHandles.pop_back();
10165 
10166     if (PromOp.getOpcode() == ISD::TRUNCATE ||
10167         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
10168         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
10169         PromOp.getOpcode() == ISD::ANY_EXTEND) {
10170       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
10171           PromOp.getOperand(0).getValueType() != MVT::i1) {
10172         // The operand is not yet ready (see comment below).
10173         PromOpHandles.emplace_front(PromOp);
10174         continue;
10175       }
10176 
10177       SDValue RepValue = PromOp.getOperand(0);
10178       if (isa<ConstantSDNode>(RepValue))
10179         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
10180 
10181       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
10182       continue;
10183     }
10184 
10185     unsigned C;
10186     switch (PromOp.getOpcode()) {
10187     default:             C = 0; break;
10188     case ISD::SELECT:    C = 1; break;
10189     case ISD::SELECT_CC: C = 2; break;
10190     }
10191 
10192     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
10193          PromOp.getOperand(C).getValueType() != MVT::i1) ||
10194         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
10195          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
10196       // The to-be-promoted operands of this node have not yet been
10197       // promoted (this should be rare because we're going through the
10198       // list backward, but if one of the operands has several users in
10199       // this cluster of to-be-promoted nodes, it is possible).
10200       PromOpHandles.emplace_front(PromOp);
10201       continue;
10202     }
10203 
10204     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
10205                                 PromOp.getNode()->op_end());
10206 
10207     // If there are any constant inputs, make sure they're replaced now.
10208     for (unsigned i = 0; i < 2; ++i)
10209       if (isa<ConstantSDNode>(Ops[C+i]))
10210         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
10211 
10212     DAG.ReplaceAllUsesOfValueWith(PromOp,
10213       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
10214   }
10215 
10216   // Now we're left with the initial truncation itself.
10217   if (N->getOpcode() == ISD::TRUNCATE)
10218     return N->getOperand(0);
10219 
10220   // Otherwise, this is a comparison. The operands to be compared have just
10221   // changed type (to i1), but everything else is the same.
10222   return SDValue(N, 0);
10223 }
10224 
10225 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
10226                                                   DAGCombinerInfo &DCI) const {
10227   SelectionDAG &DAG = DCI.DAG;
10228   SDLoc dl(N);
10229 
10230   // If we're tracking CR bits, we need to be careful that we don't have:
10231   //   zext(binary-ops(trunc(x), trunc(y)))
10232   // or
10233   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
10234   // such that we're unnecessarily moving things into CR bits that can more
10235   // efficiently stay in GPRs. Note that if we're not certain that the high
10236   // bits are set as required by the final extension, we still may need to do
10237   // some masking to get the proper behavior.
10238 
10239   // This same functionality is important on PPC64 when dealing with
10240   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
10241   // the return values of functions. Because it is so similar, it is handled
10242   // here as well.
10243 
10244   if (N->getValueType(0) != MVT::i32 &&
10245       N->getValueType(0) != MVT::i64)
10246     return SDValue();
10247 
10248   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
10249         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
10250     return SDValue();
10251 
10252   if (N->getOperand(0).getOpcode() != ISD::AND &&
10253       N->getOperand(0).getOpcode() != ISD::OR  &&
10254       N->getOperand(0).getOpcode() != ISD::XOR &&
10255       N->getOperand(0).getOpcode() != ISD::SELECT &&
10256       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
10257     return SDValue();
10258 
10259   SmallVector<SDValue, 4> Inputs;
10260   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
10261   SmallPtrSet<SDNode *, 16> Visited;
10262 
10263   // Visit all inputs, collect all binary operations (and, or, xor and
10264   // select) that are all fed by truncations.
10265   while (!BinOps.empty()) {
10266     SDValue BinOp = BinOps.back();
10267     BinOps.pop_back();
10268 
10269     if (!Visited.insert(BinOp.getNode()).second)
10270       continue;
10271 
10272     PromOps.push_back(BinOp);
10273 
10274     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
10275       // The condition of the select is not promoted.
10276       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
10277         continue;
10278       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
10279         continue;
10280 
10281       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
10282           isa<ConstantSDNode>(BinOp.getOperand(i))) {
10283         Inputs.push_back(BinOp.getOperand(i));
10284       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
10285                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
10286                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
10287                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
10288                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
10289         BinOps.push_back(BinOp.getOperand(i));
10290       } else {
10291         // We have an input that is not a truncation or another binary
10292         // operation; we'll abort this transformation.
10293         return SDValue();
10294       }
10295     }
10296   }
10297 
10298   // The operands of a select that must be truncated when the select is
10299   // promoted because the operand is actually part of the to-be-promoted set.
10300   DenseMap<SDNode *, EVT> SelectTruncOp[2];
10301 
10302   // Make sure that this is a self-contained cluster of operations (which
10303   // is not quite the same thing as saying that everything has only one
10304   // use).
10305   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
10306     if (isa<ConstantSDNode>(Inputs[i]))
10307       continue;
10308 
10309     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
10310                               UE = Inputs[i].getNode()->use_end();
10311          UI != UE; ++UI) {
10312       SDNode *User = *UI;
10313       if (User != N && !Visited.count(User))
10314         return SDValue();
10315 
10316       // If we're going to promote the non-output-value operand(s) or SELECT or
10317       // SELECT_CC, record them for truncation.
10318       if (User->getOpcode() == ISD::SELECT) {
10319         if (User->getOperand(0) == Inputs[i])
10320           SelectTruncOp[0].insert(std::make_pair(User,
10321                                     User->getOperand(0).getValueType()));
10322       } else if (User->getOpcode() == ISD::SELECT_CC) {
10323         if (User->getOperand(0) == Inputs[i])
10324           SelectTruncOp[0].insert(std::make_pair(User,
10325                                     User->getOperand(0).getValueType()));
10326         if (User->getOperand(1) == Inputs[i])
10327           SelectTruncOp[1].insert(std::make_pair(User,
10328                                     User->getOperand(1).getValueType()));
10329       }
10330     }
10331   }
10332 
10333   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
10334     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
10335                               UE = PromOps[i].getNode()->use_end();
10336          UI != UE; ++UI) {
10337       SDNode *User = *UI;
10338       if (User != N && !Visited.count(User))
10339         return SDValue();
10340 
10341       // If we're going to promote the non-output-value operand(s) or SELECT or
10342       // SELECT_CC, record them for truncation.
10343       if (User->getOpcode() == ISD::SELECT) {
10344         if (User->getOperand(0) == PromOps[i])
10345           SelectTruncOp[0].insert(std::make_pair(User,
10346                                     User->getOperand(0).getValueType()));
10347       } else if (User->getOpcode() == ISD::SELECT_CC) {
10348         if (User->getOperand(0) == PromOps[i])
10349           SelectTruncOp[0].insert(std::make_pair(User,
10350                                     User->getOperand(0).getValueType()));
10351         if (User->getOperand(1) == PromOps[i])
10352           SelectTruncOp[1].insert(std::make_pair(User,
10353                                     User->getOperand(1).getValueType()));
10354       }
10355     }
10356   }
10357 
10358   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
10359   bool ReallyNeedsExt = false;
10360   if (N->getOpcode() != ISD::ANY_EXTEND) {
10361     // If all of the inputs are not already sign/zero extended, then
10362     // we'll still need to do that at the end.
10363     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
10364       if (isa<ConstantSDNode>(Inputs[i]))
10365         continue;
10366 
10367       unsigned OpBits =
10368         Inputs[i].getOperand(0).getValueSizeInBits();
10369       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
10370 
10371       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
10372            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
10373                                   APInt::getHighBitsSet(OpBits,
10374                                                         OpBits-PromBits))) ||
10375           (N->getOpcode() == ISD::SIGN_EXTEND &&
10376            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
10377              (OpBits-(PromBits-1)))) {
10378         ReallyNeedsExt = true;
10379         break;
10380       }
10381     }
10382   }
10383 
10384   // Replace all inputs, either with the truncation operand, or a
10385   // truncation or extension to the final output type.
10386   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
10387     // Constant inputs need to be replaced with the to-be-promoted nodes that
10388     // use them because they might have users outside of the cluster of
10389     // promoted nodes.
10390     if (isa<ConstantSDNode>(Inputs[i]))
10391       continue;
10392 
10393     SDValue InSrc = Inputs[i].getOperand(0);
10394     if (Inputs[i].getValueType() == N->getValueType(0))
10395       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
10396     else if (N->getOpcode() == ISD::SIGN_EXTEND)
10397       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
10398         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
10399     else if (N->getOpcode() == ISD::ZERO_EXTEND)
10400       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
10401         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
10402     else
10403       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
10404         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
10405   }
10406 
10407   std::list<HandleSDNode> PromOpHandles;
10408   for (auto &PromOp : PromOps)
10409     PromOpHandles.emplace_back(PromOp);
10410 
10411   // Replace all operations (these are all the same, but have a different
10412   // (promoted) return type). DAG.getNode will validate that the types of
10413   // a binary operator match, so go through the list in reverse so that
10414   // we've likely promoted both operands first.
10415   while (!PromOpHandles.empty()) {
10416     SDValue PromOp = PromOpHandles.back().getValue();
10417     PromOpHandles.pop_back();
10418 
10419     unsigned C;
10420     switch (PromOp.getOpcode()) {
10421     default:             C = 0; break;
10422     case ISD::SELECT:    C = 1; break;
10423     case ISD::SELECT_CC: C = 2; break;
10424     }
10425 
10426     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
10427          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
10428         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
10429          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
10430       // The to-be-promoted operands of this node have not yet been
10431       // promoted (this should be rare because we're going through the
10432       // list backward, but if one of the operands has several users in
10433       // this cluster of to-be-promoted nodes, it is possible).
10434       PromOpHandles.emplace_front(PromOp);
10435       continue;
10436     }
10437 
10438     // For SELECT and SELECT_CC nodes, we do a similar check for any
10439     // to-be-promoted comparison inputs.
10440     if (PromOp.getOpcode() == ISD::SELECT ||
10441         PromOp.getOpcode() == ISD::SELECT_CC) {
10442       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
10443            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
10444           (SelectTruncOp[1].count(PromOp.getNode()) &&
10445            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
10446         PromOpHandles.emplace_front(PromOp);
10447         continue;
10448       }
10449     }
10450 
10451     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
10452                                 PromOp.getNode()->op_end());
10453 
10454     // If this node has constant inputs, then they'll need to be promoted here.
10455     for (unsigned i = 0; i < 2; ++i) {
10456       if (!isa<ConstantSDNode>(Ops[C+i]))
10457         continue;
10458       if (Ops[C+i].getValueType() == N->getValueType(0))
10459         continue;
10460 
10461       if (N->getOpcode() == ISD::SIGN_EXTEND)
10462         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
10463       else if (N->getOpcode() == ISD::ZERO_EXTEND)
10464         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
10465       else
10466         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
10467     }
10468 
10469     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
10470     // truncate them again to the original value type.
10471     if (PromOp.getOpcode() == ISD::SELECT ||
10472         PromOp.getOpcode() == ISD::SELECT_CC) {
10473       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
10474       if (SI0 != SelectTruncOp[0].end())
10475         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
10476       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
10477       if (SI1 != SelectTruncOp[1].end())
10478         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
10479     }
10480 
10481     DAG.ReplaceAllUsesOfValueWith(PromOp,
10482       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
10483   }
10484 
10485   // Now we're left with the initial extension itself.
10486   if (!ReallyNeedsExt)
10487     return N->getOperand(0);
10488 
10489   // To zero extend, just mask off everything except for the first bit (in the
10490   // i1 case).
10491   if (N->getOpcode() == ISD::ZERO_EXTEND)
10492     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
10493                        DAG.getConstant(APInt::getLowBitsSet(
10494                                          N->getValueSizeInBits(0), PromBits),
10495                                        dl, N->getValueType(0)));
10496 
10497   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
10498          "Invalid extension type");
10499   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
10500   SDValue ShiftCst =
10501       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
10502   return DAG.getNode(
10503       ISD::SRA, dl, N->getValueType(0),
10504       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
10505       ShiftCst);
10506 }
10507 
10508 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
10509                                                  DAGCombinerInfo &DCI) const {
10510   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
10511          "Should be called with a BUILD_VECTOR node");
10512 
10513   SelectionDAG &DAG = DCI.DAG;
10514   SDLoc dl(N);
10515   if (N->getValueType(0) != MVT::v2f64 || !Subtarget.hasVSX())
10516     return SDValue();
10517 
10518   // Looking for:
10519   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
10520   if (N->getOperand(0).getOpcode() != ISD::SINT_TO_FP &&
10521       N->getOperand(0).getOpcode() != ISD::UINT_TO_FP)
10522     return SDValue();
10523   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
10524       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
10525     return SDValue();
10526   if (N->getOperand(0).getOpcode() != N->getOperand(1).getOpcode())
10527     return SDValue();
10528 
10529   SDValue Ext1 = N->getOperand(0).getOperand(0);
10530   SDValue Ext2 = N->getOperand(1).getOperand(0);
10531   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10532      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10533     return SDValue();
10534 
10535   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
10536   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
10537   if (!Ext1Op || !Ext2Op)
10538     return SDValue();
10539   if (Ext1.getValueType() != MVT::i32 ||
10540       Ext2.getValueType() != MVT::i32)
10541   if (Ext1.getOperand(0) != Ext2.getOperand(0))
10542     return SDValue();
10543 
10544   int FirstElem = Ext1Op->getZExtValue();
10545   int SecondElem = Ext2Op->getZExtValue();
10546   int SubvecIdx;
10547   if (FirstElem == 0 && SecondElem == 1)
10548     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
10549   else if (FirstElem == 2 && SecondElem == 3)
10550     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
10551   else
10552     return SDValue();
10553 
10554   SDValue SrcVec = Ext1.getOperand(0);
10555   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
10556     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
10557   return DAG.getNode(NodeType, dl, MVT::v2f64,
10558                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
10559 }
10560 
10561 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
10562                                               DAGCombinerInfo &DCI) const {
10563   assert((N->getOpcode() == ISD::SINT_TO_FP ||
10564           N->getOpcode() == ISD::UINT_TO_FP) &&
10565          "Need an int -> FP conversion node here");
10566 
10567   if (useSoftFloat() || !Subtarget.has64BitSupport())
10568     return SDValue();
10569 
10570   SelectionDAG &DAG = DCI.DAG;
10571   SDLoc dl(N);
10572   SDValue Op(N, 0);
10573 
10574   SDValue FirstOperand(Op.getOperand(0));
10575   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
10576     (FirstOperand.getValueType() == MVT::i8 ||
10577      FirstOperand.getValueType() == MVT::i16);
10578   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
10579     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
10580     bool DstDouble = Op.getValueType() == MVT::f64;
10581     unsigned ConvOp = Signed ?
10582       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
10583       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
10584     SDValue WidthConst =
10585       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
10586                             dl, false);
10587     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
10588     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
10589     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
10590                                          DAG.getVTList(MVT::f64, MVT::Other),
10591                                          Ops, MVT::i8, LDN->getMemOperand());
10592 
10593     // For signed conversion, we need to sign-extend the value in the VSR
10594     if (Signed) {
10595       SDValue ExtOps[] = { Ld, WidthConst };
10596       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
10597       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
10598     } else
10599       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
10600   }
10601 
10602   // Don't handle ppc_fp128 here or i1 conversions.
10603   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
10604     return SDValue();
10605   if (Op.getOperand(0).getValueType() == MVT::i1)
10606     return SDValue();
10607 
10608   // For i32 intermediate values, unfortunately, the conversion functions
10609   // leave the upper 32 bits of the value are undefined. Within the set of
10610   // scalar instructions, we have no method for zero- or sign-extending the
10611   // value. Thus, we cannot handle i32 intermediate values here.
10612   if (Op.getOperand(0).getValueType() == MVT::i32)
10613     return SDValue();
10614 
10615   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
10616          "UINT_TO_FP is supported only with FPCVT");
10617 
10618   // If we have FCFIDS, then use it when converting to single-precision.
10619   // Otherwise, convert to double-precision and then round.
10620   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
10621                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
10622                                                             : PPCISD::FCFIDS)
10623                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
10624                                                             : PPCISD::FCFID);
10625   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
10626                   ? MVT::f32
10627                   : MVT::f64;
10628 
10629   // If we're converting from a float, to an int, and back to a float again,
10630   // then we don't need the store/load pair at all.
10631   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
10632        Subtarget.hasFPCVT()) ||
10633       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
10634     SDValue Src = Op.getOperand(0).getOperand(0);
10635     if (Src.getValueType() == MVT::f32) {
10636       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
10637       DCI.AddToWorklist(Src.getNode());
10638     } else if (Src.getValueType() != MVT::f64) {
10639       // Make sure that we don't pick up a ppc_fp128 source value.
10640       return SDValue();
10641     }
10642 
10643     unsigned FCTOp =
10644       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
10645                                                         PPCISD::FCTIDUZ;
10646 
10647     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
10648     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
10649 
10650     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
10651       FP = DAG.getNode(ISD::FP_ROUND, dl,
10652                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
10653       DCI.AddToWorklist(FP.getNode());
10654     }
10655 
10656     return FP;
10657   }
10658 
10659   return SDValue();
10660 }
10661 
10662 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
10663 // builtins) into loads with swaps.
10664 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
10665                                               DAGCombinerInfo &DCI) const {
10666   SelectionDAG &DAG = DCI.DAG;
10667   SDLoc dl(N);
10668   SDValue Chain;
10669   SDValue Base;
10670   MachineMemOperand *MMO;
10671 
10672   switch (N->getOpcode()) {
10673   default:
10674     llvm_unreachable("Unexpected opcode for little endian VSX load");
10675   case ISD::LOAD: {
10676     LoadSDNode *LD = cast<LoadSDNode>(N);
10677     Chain = LD->getChain();
10678     Base = LD->getBasePtr();
10679     MMO = LD->getMemOperand();
10680     // If the MMO suggests this isn't a load of a full vector, leave
10681     // things alone.  For a built-in, we have to make the change for
10682     // correctness, so if there is a size problem that will be a bug.
10683     if (MMO->getSize() < 16)
10684       return SDValue();
10685     break;
10686   }
10687   case ISD::INTRINSIC_W_CHAIN: {
10688     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
10689     Chain = Intrin->getChain();
10690     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
10691     // us what we want. Get operand 2 instead.
10692     Base = Intrin->getOperand(2);
10693     MMO = Intrin->getMemOperand();
10694     break;
10695   }
10696   }
10697 
10698   MVT VecTy = N->getValueType(0).getSimpleVT();
10699   SDValue LoadOps[] = { Chain, Base };
10700   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
10701                                          DAG.getVTList(MVT::v2f64, MVT::Other),
10702                                          LoadOps, MVT::v2f64, MMO);
10703 
10704   DCI.AddToWorklist(Load.getNode());
10705   Chain = Load.getValue(1);
10706   SDValue Swap = DAG.getNode(
10707       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
10708   DCI.AddToWorklist(Swap.getNode());
10709 
10710   // Add a bitcast if the resulting load type doesn't match v2f64.
10711   if (VecTy != MVT::v2f64) {
10712     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
10713     DCI.AddToWorklist(N.getNode());
10714     // Package {bitcast value, swap's chain} to match Load's shape.
10715     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
10716                        N, Swap.getValue(1));
10717   }
10718 
10719   return Swap;
10720 }
10721 
10722 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
10723 // builtins) into stores with swaps.
10724 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
10725                                                DAGCombinerInfo &DCI) const {
10726   SelectionDAG &DAG = DCI.DAG;
10727   SDLoc dl(N);
10728   SDValue Chain;
10729   SDValue Base;
10730   unsigned SrcOpnd;
10731   MachineMemOperand *MMO;
10732 
10733   switch (N->getOpcode()) {
10734   default:
10735     llvm_unreachable("Unexpected opcode for little endian VSX store");
10736   case ISD::STORE: {
10737     StoreSDNode *ST = cast<StoreSDNode>(N);
10738     Chain = ST->getChain();
10739     Base = ST->getBasePtr();
10740     MMO = ST->getMemOperand();
10741     SrcOpnd = 1;
10742     // If the MMO suggests this isn't a store of a full vector, leave
10743     // things alone.  For a built-in, we have to make the change for
10744     // correctness, so if there is a size problem that will be a bug.
10745     if (MMO->getSize() < 16)
10746       return SDValue();
10747     break;
10748   }
10749   case ISD::INTRINSIC_VOID: {
10750     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
10751     Chain = Intrin->getChain();
10752     // Intrin->getBasePtr() oddly does not get what we want.
10753     Base = Intrin->getOperand(3);
10754     MMO = Intrin->getMemOperand();
10755     SrcOpnd = 2;
10756     break;
10757   }
10758   }
10759 
10760   SDValue Src = N->getOperand(SrcOpnd);
10761   MVT VecTy = Src.getValueType().getSimpleVT();
10762 
10763   // All stores are done as v2f64 and possible bit cast.
10764   if (VecTy != MVT::v2f64) {
10765     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
10766     DCI.AddToWorklist(Src.getNode());
10767   }
10768 
10769   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
10770                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
10771   DCI.AddToWorklist(Swap.getNode());
10772   Chain = Swap.getValue(1);
10773   SDValue StoreOps[] = { Chain, Swap, Base };
10774   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
10775                                           DAG.getVTList(MVT::Other),
10776                                           StoreOps, VecTy, MMO);
10777   DCI.AddToWorklist(Store.getNode());
10778   return Store;
10779 }
10780 
10781 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
10782                                              DAGCombinerInfo &DCI) const {
10783   SelectionDAG &DAG = DCI.DAG;
10784   SDLoc dl(N);
10785   switch (N->getOpcode()) {
10786   default: break;
10787   case PPCISD::SHL:
10788     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
10789         return N->getOperand(0);
10790     break;
10791   case PPCISD::SRL:
10792     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
10793         return N->getOperand(0);
10794     break;
10795   case PPCISD::SRA:
10796     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
10797       if (C->isNullValue() ||   //  0 >>s V -> 0.
10798           C->isAllOnesValue())    // -1 >>s V -> -1.
10799         return N->getOperand(0);
10800     }
10801     break;
10802   case ISD::SIGN_EXTEND:
10803   case ISD::ZERO_EXTEND:
10804   case ISD::ANY_EXTEND:
10805     return DAGCombineExtBoolTrunc(N, DCI);
10806   case ISD::TRUNCATE:
10807   case ISD::SETCC:
10808   case ISD::SELECT_CC:
10809     return DAGCombineTruncBoolExt(N, DCI);
10810   case ISD::SINT_TO_FP:
10811   case ISD::UINT_TO_FP:
10812     return combineFPToIntToFP(N, DCI);
10813   case ISD::STORE: {
10814     EVT Op1VT = N->getOperand(1).getValueType();
10815     bool ValidTypeForStoreFltAsInt = (Op1VT == MVT::i32) ||
10816       (Subtarget.hasP9Vector() && (Op1VT == MVT::i8 || Op1VT == MVT::i16));
10817 
10818     // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
10819     if (Subtarget.hasSTFIWX() && !cast<StoreSDNode>(N)->isTruncatingStore() &&
10820         N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
10821         ValidTypeForStoreFltAsInt &&
10822         N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
10823       SDValue Val = N->getOperand(1).getOperand(0);
10824       if (Val.getValueType() == MVT::f32) {
10825         Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
10826         DCI.AddToWorklist(Val.getNode());
10827       }
10828       Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
10829       DCI.AddToWorklist(Val.getNode());
10830 
10831       if (Op1VT == MVT::i32) {
10832         SDValue Ops[] = {
10833           N->getOperand(0), Val, N->getOperand(2),
10834           DAG.getValueType(N->getOperand(1).getValueType())
10835         };
10836 
10837         Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
10838                 DAG.getVTList(MVT::Other), Ops,
10839                 cast<StoreSDNode>(N)->getMemoryVT(),
10840                 cast<StoreSDNode>(N)->getMemOperand());
10841       } else {
10842         unsigned WidthInBytes =
10843           N->getOperand(1).getValueType() == MVT::i8 ? 1 : 2;
10844         SDValue WidthConst = DAG.getIntPtrConstant(WidthInBytes, dl, false);
10845 
10846         SDValue Ops[] = {
10847           N->getOperand(0), Val, N->getOperand(2), WidthConst,
10848           DAG.getValueType(N->getOperand(1).getValueType())
10849         };
10850         Val = DAG.getMemIntrinsicNode(PPCISD::STXSIX, dl,
10851                                       DAG.getVTList(MVT::Other), Ops,
10852                                       cast<StoreSDNode>(N)->getMemoryVT(),
10853                                       cast<StoreSDNode>(N)->getMemOperand());
10854       }
10855 
10856       DCI.AddToWorklist(Val.getNode());
10857       return Val;
10858     }
10859 
10860     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
10861     if (cast<StoreSDNode>(N)->isUnindexed() &&
10862         N->getOperand(1).getOpcode() == ISD::BSWAP &&
10863         N->getOperand(1).getNode()->hasOneUse() &&
10864         (N->getOperand(1).getValueType() == MVT::i32 ||
10865          N->getOperand(1).getValueType() == MVT::i16 ||
10866          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
10867           N->getOperand(1).getValueType() == MVT::i64))) {
10868       SDValue BSwapOp = N->getOperand(1).getOperand(0);
10869       // Do an any-extend to 32-bits if this is a half-word input.
10870       if (BSwapOp.getValueType() == MVT::i16)
10871         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
10872 
10873       SDValue Ops[] = {
10874         N->getOperand(0), BSwapOp, N->getOperand(2),
10875         DAG.getValueType(N->getOperand(1).getValueType())
10876       };
10877       return
10878         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
10879                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
10880                                 cast<StoreSDNode>(N)->getMemOperand());
10881     }
10882 
10883     // For little endian, VSX stores require generating xxswapd/lxvd2x.
10884     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
10885     EVT VT = N->getOperand(1).getValueType();
10886     if (VT.isSimple()) {
10887       MVT StoreVT = VT.getSimpleVT();
10888       if (Subtarget.needsSwapsForVSXMemOps() &&
10889           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
10890            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
10891         return expandVSXStoreForLE(N, DCI);
10892     }
10893     break;
10894   }
10895   case ISD::LOAD: {
10896     LoadSDNode *LD = cast<LoadSDNode>(N);
10897     EVT VT = LD->getValueType(0);
10898 
10899     // For little endian, VSX loads require generating lxvd2x/xxswapd.
10900     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
10901     if (VT.isSimple()) {
10902       MVT LoadVT = VT.getSimpleVT();
10903       if (Subtarget.needsSwapsForVSXMemOps() &&
10904           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
10905            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
10906         return expandVSXLoadForLE(N, DCI);
10907     }
10908 
10909     // We sometimes end up with a 64-bit integer load, from which we extract
10910     // two single-precision floating-point numbers. This happens with
10911     // std::complex<float>, and other similar structures, because of the way we
10912     // canonicalize structure copies. However, if we lack direct moves,
10913     // then the final bitcasts from the extracted integer values to the
10914     // floating-point numbers turn into store/load pairs. Even with direct moves,
10915     // just loading the two floating-point numbers is likely better.
10916     auto ReplaceTwoFloatLoad = [&]() {
10917       if (VT != MVT::i64)
10918         return false;
10919 
10920       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
10921           LD->isVolatile())
10922         return false;
10923 
10924       //  We're looking for a sequence like this:
10925       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
10926       //      t16: i64 = srl t13, Constant:i32<32>
10927       //    t17: i32 = truncate t16
10928       //  t18: f32 = bitcast t17
10929       //    t19: i32 = truncate t13
10930       //  t20: f32 = bitcast t19
10931 
10932       if (!LD->hasNUsesOfValue(2, 0))
10933         return false;
10934 
10935       auto UI = LD->use_begin();
10936       while (UI.getUse().getResNo() != 0) ++UI;
10937       SDNode *Trunc = *UI++;
10938       while (UI.getUse().getResNo() != 0) ++UI;
10939       SDNode *RightShift = *UI;
10940       if (Trunc->getOpcode() != ISD::TRUNCATE)
10941         std::swap(Trunc, RightShift);
10942 
10943       if (Trunc->getOpcode() != ISD::TRUNCATE ||
10944           Trunc->getValueType(0) != MVT::i32 ||
10945           !Trunc->hasOneUse())
10946         return false;
10947       if (RightShift->getOpcode() != ISD::SRL ||
10948           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
10949           RightShift->getConstantOperandVal(1) != 32 ||
10950           !RightShift->hasOneUse())
10951         return false;
10952 
10953       SDNode *Trunc2 = *RightShift->use_begin();
10954       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
10955           Trunc2->getValueType(0) != MVT::i32 ||
10956           !Trunc2->hasOneUse())
10957         return false;
10958 
10959       SDNode *Bitcast = *Trunc->use_begin();
10960       SDNode *Bitcast2 = *Trunc2->use_begin();
10961 
10962       if (Bitcast->getOpcode() != ISD::BITCAST ||
10963           Bitcast->getValueType(0) != MVT::f32)
10964         return false;
10965       if (Bitcast2->getOpcode() != ISD::BITCAST ||
10966           Bitcast2->getValueType(0) != MVT::f32)
10967         return false;
10968 
10969       if (Subtarget.isLittleEndian())
10970         std::swap(Bitcast, Bitcast2);
10971 
10972       // Bitcast has the second float (in memory-layout order) and Bitcast2
10973       // has the first one.
10974 
10975       SDValue BasePtr = LD->getBasePtr();
10976       if (LD->isIndexed()) {
10977         assert(LD->getAddressingMode() == ISD::PRE_INC &&
10978                "Non-pre-inc AM on PPC?");
10979         BasePtr =
10980           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10981                       LD->getOffset());
10982       }
10983 
10984       auto MMOFlags =
10985           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
10986       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
10987                                       LD->getPointerInfo(), LD->getAlignment(),
10988                                       MMOFlags, LD->getAAInfo());
10989       SDValue AddPtr =
10990         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
10991                     BasePtr, DAG.getIntPtrConstant(4, dl));
10992       SDValue FloatLoad2 = DAG.getLoad(
10993           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
10994           LD->getPointerInfo().getWithOffset(4),
10995           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
10996 
10997       if (LD->isIndexed()) {
10998         // Note that DAGCombine should re-form any pre-increment load(s) from
10999         // what is produced here if that makes sense.
11000         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
11001       }
11002 
11003       DCI.CombineTo(Bitcast2, FloatLoad);
11004       DCI.CombineTo(Bitcast, FloatLoad2);
11005 
11006       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
11007                                     SDValue(FloatLoad2.getNode(), 1));
11008       return true;
11009     };
11010 
11011     if (ReplaceTwoFloatLoad())
11012       return SDValue(N, 0);
11013 
11014     EVT MemVT = LD->getMemoryVT();
11015     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
11016     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
11017     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
11018     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
11019     if (LD->isUnindexed() && VT.isVector() &&
11020         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
11021           // P8 and later hardware should just use LOAD.
11022           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
11023                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
11024          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
11025           LD->getAlignment() >= ScalarABIAlignment)) &&
11026         LD->getAlignment() < ABIAlignment) {
11027       // This is a type-legal unaligned Altivec or QPX load.
11028       SDValue Chain = LD->getChain();
11029       SDValue Ptr = LD->getBasePtr();
11030       bool isLittleEndian = Subtarget.isLittleEndian();
11031 
11032       // This implements the loading of unaligned vectors as described in
11033       // the venerable Apple Velocity Engine overview. Specifically:
11034       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
11035       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
11036       //
11037       // The general idea is to expand a sequence of one or more unaligned
11038       // loads into an alignment-based permutation-control instruction (lvsl
11039       // or lvsr), a series of regular vector loads (which always truncate
11040       // their input address to an aligned address), and a series of
11041       // permutations.  The results of these permutations are the requested
11042       // loaded values.  The trick is that the last "extra" load is not taken
11043       // from the address you might suspect (sizeof(vector) bytes after the
11044       // last requested load), but rather sizeof(vector) - 1 bytes after the
11045       // last requested vector. The point of this is to avoid a page fault if
11046       // the base address happened to be aligned. This works because if the
11047       // base address is aligned, then adding less than a full vector length
11048       // will cause the last vector in the sequence to be (re)loaded.
11049       // Otherwise, the next vector will be fetched as you might suspect was
11050       // necessary.
11051 
11052       // We might be able to reuse the permutation generation from
11053       // a different base address offset from this one by an aligned amount.
11054       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
11055       // optimization later.
11056       Intrinsic::ID Intr, IntrLD, IntrPerm;
11057       MVT PermCntlTy, PermTy, LDTy;
11058       if (Subtarget.hasAltivec()) {
11059         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
11060                                  Intrinsic::ppc_altivec_lvsl;
11061         IntrLD = Intrinsic::ppc_altivec_lvx;
11062         IntrPerm = Intrinsic::ppc_altivec_vperm;
11063         PermCntlTy = MVT::v16i8;
11064         PermTy = MVT::v4i32;
11065         LDTy = MVT::v4i32;
11066       } else {
11067         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
11068                                        Intrinsic::ppc_qpx_qvlpcls;
11069         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
11070                                        Intrinsic::ppc_qpx_qvlfs;
11071         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
11072         PermCntlTy = MVT::v4f64;
11073         PermTy = MVT::v4f64;
11074         LDTy = MemVT.getSimpleVT();
11075       }
11076 
11077       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
11078 
11079       // Create the new MMO for the new base load. It is like the original MMO,
11080       // but represents an area in memory almost twice the vector size centered
11081       // on the original address. If the address is unaligned, we might start
11082       // reading up to (sizeof(vector)-1) bytes below the address of the
11083       // original unaligned load.
11084       MachineFunction &MF = DAG.getMachineFunction();
11085       MachineMemOperand *BaseMMO =
11086         MF.getMachineMemOperand(LD->getMemOperand(),
11087                                 -(long)MemVT.getStoreSize()+1,
11088                                 2*MemVT.getStoreSize()-1);
11089 
11090       // Create the new base load.
11091       SDValue LDXIntID =
11092           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
11093       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
11094       SDValue BaseLoad =
11095         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
11096                                 DAG.getVTList(PermTy, MVT::Other),
11097                                 BaseLoadOps, LDTy, BaseMMO);
11098 
11099       // Note that the value of IncOffset (which is provided to the next
11100       // load's pointer info offset value, and thus used to calculate the
11101       // alignment), and the value of IncValue (which is actually used to
11102       // increment the pointer value) are different! This is because we
11103       // require the next load to appear to be aligned, even though it
11104       // is actually offset from the base pointer by a lesser amount.
11105       int IncOffset = VT.getSizeInBits() / 8;
11106       int IncValue = IncOffset;
11107 
11108       // Walk (both up and down) the chain looking for another load at the real
11109       // (aligned) offset (the alignment of the other load does not matter in
11110       // this case). If found, then do not use the offset reduction trick, as
11111       // that will prevent the loads from being later combined (as they would
11112       // otherwise be duplicates).
11113       if (!findConsecutiveLoad(LD, DAG))
11114         --IncValue;
11115 
11116       SDValue Increment =
11117           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
11118       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
11119 
11120       MachineMemOperand *ExtraMMO =
11121         MF.getMachineMemOperand(LD->getMemOperand(),
11122                                 1, 2*MemVT.getStoreSize()-1);
11123       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
11124       SDValue ExtraLoad =
11125         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
11126                                 DAG.getVTList(PermTy, MVT::Other),
11127                                 ExtraLoadOps, LDTy, ExtraMMO);
11128 
11129       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
11130         BaseLoad.getValue(1), ExtraLoad.getValue(1));
11131 
11132       // Because vperm has a big-endian bias, we must reverse the order
11133       // of the input vectors and complement the permute control vector
11134       // when generating little endian code.  We have already handled the
11135       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
11136       // and ExtraLoad here.
11137       SDValue Perm;
11138       if (isLittleEndian)
11139         Perm = BuildIntrinsicOp(IntrPerm,
11140                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
11141       else
11142         Perm = BuildIntrinsicOp(IntrPerm,
11143                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
11144 
11145       if (VT != PermTy)
11146         Perm = Subtarget.hasAltivec() ?
11147                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
11148                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
11149                                DAG.getTargetConstant(1, dl, MVT::i64));
11150                                // second argument is 1 because this rounding
11151                                // is always exact.
11152 
11153       // The output of the permutation is our loaded result, the TokenFactor is
11154       // our new chain.
11155       DCI.CombineTo(N, Perm, TF);
11156       return SDValue(N, 0);
11157     }
11158     }
11159     break;
11160     case ISD::INTRINSIC_WO_CHAIN: {
11161       bool isLittleEndian = Subtarget.isLittleEndian();
11162       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11163       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
11164                                            : Intrinsic::ppc_altivec_lvsl);
11165       if ((IID == Intr ||
11166            IID == Intrinsic::ppc_qpx_qvlpcld  ||
11167            IID == Intrinsic::ppc_qpx_qvlpcls) &&
11168         N->getOperand(1)->getOpcode() == ISD::ADD) {
11169         SDValue Add = N->getOperand(1);
11170 
11171         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
11172                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
11173 
11174         if (DAG.MaskedValueIsZero(Add->getOperand(1),
11175                                   APInt::getAllOnesValue(Bits /* alignment */)
11176                                       .zext(Add.getScalarValueSizeInBits()))) {
11177           SDNode *BasePtr = Add->getOperand(0).getNode();
11178           for (SDNode::use_iterator UI = BasePtr->use_begin(),
11179                                     UE = BasePtr->use_end();
11180                UI != UE; ++UI) {
11181             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
11182                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
11183               // We've found another LVSL/LVSR, and this address is an aligned
11184               // multiple of that one. The results will be the same, so use the
11185               // one we've just found instead.
11186 
11187               return SDValue(*UI, 0);
11188             }
11189           }
11190         }
11191 
11192         if (isa<ConstantSDNode>(Add->getOperand(1))) {
11193           SDNode *BasePtr = Add->getOperand(0).getNode();
11194           for (SDNode::use_iterator UI = BasePtr->use_begin(),
11195                UE = BasePtr->use_end(); UI != UE; ++UI) {
11196             if (UI->getOpcode() == ISD::ADD &&
11197                 isa<ConstantSDNode>(UI->getOperand(1)) &&
11198                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
11199                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
11200                 (1ULL << Bits) == 0) {
11201               SDNode *OtherAdd = *UI;
11202               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
11203                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
11204                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
11205                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
11206                   return SDValue(*VI, 0);
11207                 }
11208               }
11209             }
11210           }
11211         }
11212       }
11213     }
11214 
11215     break;
11216   case ISD::INTRINSIC_W_CHAIN: {
11217     // For little endian, VSX loads require generating lxvd2x/xxswapd.
11218     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
11219     if (Subtarget.needsSwapsForVSXMemOps()) {
11220       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11221       default:
11222         break;
11223       case Intrinsic::ppc_vsx_lxvw4x:
11224       case Intrinsic::ppc_vsx_lxvd2x:
11225         return expandVSXLoadForLE(N, DCI);
11226       }
11227     }
11228     break;
11229   }
11230   case ISD::INTRINSIC_VOID: {
11231     // For little endian, VSX stores require generating xxswapd/stxvd2x.
11232     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
11233     if (Subtarget.needsSwapsForVSXMemOps()) {
11234       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11235       default:
11236         break;
11237       case Intrinsic::ppc_vsx_stxvw4x:
11238       case Intrinsic::ppc_vsx_stxvd2x:
11239         return expandVSXStoreForLE(N, DCI);
11240       }
11241     }
11242     break;
11243   }
11244   case ISD::BSWAP:
11245     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
11246     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
11247         N->getOperand(0).hasOneUse() &&
11248         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
11249          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
11250           N->getValueType(0) == MVT::i64))) {
11251       SDValue Load = N->getOperand(0);
11252       LoadSDNode *LD = cast<LoadSDNode>(Load);
11253       // Create the byte-swapping load.
11254       SDValue Ops[] = {
11255         LD->getChain(),    // Chain
11256         LD->getBasePtr(),  // Ptr
11257         DAG.getValueType(N->getValueType(0)) // VT
11258       };
11259       SDValue BSLoad =
11260         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
11261                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
11262                                               MVT::i64 : MVT::i32, MVT::Other),
11263                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
11264 
11265       // If this is an i16 load, insert the truncate.
11266       SDValue ResVal = BSLoad;
11267       if (N->getValueType(0) == MVT::i16)
11268         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
11269 
11270       // First, combine the bswap away.  This makes the value produced by the
11271       // load dead.
11272       DCI.CombineTo(N, ResVal);
11273 
11274       // Next, combine the load away, we give it a bogus result value but a real
11275       // chain result.  The result value is dead because the bswap is dead.
11276       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
11277 
11278       // Return N so it doesn't get rechecked!
11279       return SDValue(N, 0);
11280     }
11281 
11282     break;
11283   case PPCISD::VCMP: {
11284     // If a VCMPo node already exists with exactly the same operands as this
11285     // node, use its result instead of this node (VCMPo computes both a CR6 and
11286     // a normal output).
11287     //
11288     if (!N->getOperand(0).hasOneUse() &&
11289         !N->getOperand(1).hasOneUse() &&
11290         !N->getOperand(2).hasOneUse()) {
11291 
11292       // Scan all of the users of the LHS, looking for VCMPo's that match.
11293       SDNode *VCMPoNode = nullptr;
11294 
11295       SDNode *LHSN = N->getOperand(0).getNode();
11296       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
11297            UI != E; ++UI)
11298         if (UI->getOpcode() == PPCISD::VCMPo &&
11299             UI->getOperand(1) == N->getOperand(1) &&
11300             UI->getOperand(2) == N->getOperand(2) &&
11301             UI->getOperand(0) == N->getOperand(0)) {
11302           VCMPoNode = *UI;
11303           break;
11304         }
11305 
11306       // If there is no VCMPo node, or if the flag value has a single use, don't
11307       // transform this.
11308       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
11309         break;
11310 
11311       // Look at the (necessarily single) use of the flag value.  If it has a
11312       // chain, this transformation is more complex.  Note that multiple things
11313       // could use the value result, which we should ignore.
11314       SDNode *FlagUser = nullptr;
11315       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
11316            FlagUser == nullptr; ++UI) {
11317         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
11318         SDNode *User = *UI;
11319         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
11320           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
11321             FlagUser = User;
11322             break;
11323           }
11324         }
11325       }
11326 
11327       // If the user is a MFOCRF instruction, we know this is safe.
11328       // Otherwise we give up for right now.
11329       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
11330         return SDValue(VCMPoNode, 0);
11331     }
11332     break;
11333   }
11334   case ISD::BRCOND: {
11335     SDValue Cond = N->getOperand(1);
11336     SDValue Target = N->getOperand(2);
11337 
11338     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
11339         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
11340           Intrinsic::ppc_is_decremented_ctr_nonzero) {
11341 
11342       // We now need to make the intrinsic dead (it cannot be instruction
11343       // selected).
11344       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
11345       assert(Cond.getNode()->hasOneUse() &&
11346              "Counter decrement has more than one use");
11347 
11348       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
11349                          N->getOperand(0), Target);
11350     }
11351   }
11352   break;
11353   case ISD::BR_CC: {
11354     // If this is a branch on an altivec predicate comparison, lower this so
11355     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
11356     // lowering is done pre-legalize, because the legalizer lowers the predicate
11357     // compare down to code that is difficult to reassemble.
11358     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
11359     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
11360 
11361     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
11362     // value. If so, pass-through the AND to get to the intrinsic.
11363     if (LHS.getOpcode() == ISD::AND &&
11364         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
11365         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
11366           Intrinsic::ppc_is_decremented_ctr_nonzero &&
11367         isa<ConstantSDNode>(LHS.getOperand(1)) &&
11368         !isNullConstant(LHS.getOperand(1)))
11369       LHS = LHS.getOperand(0);
11370 
11371     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
11372         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
11373           Intrinsic::ppc_is_decremented_ctr_nonzero &&
11374         isa<ConstantSDNode>(RHS)) {
11375       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
11376              "Counter decrement comparison is not EQ or NE");
11377 
11378       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
11379       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
11380                     (CC == ISD::SETNE && !Val);
11381 
11382       // We now need to make the intrinsic dead (it cannot be instruction
11383       // selected).
11384       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
11385       assert(LHS.getNode()->hasOneUse() &&
11386              "Counter decrement has more than one use");
11387 
11388       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
11389                          N->getOperand(0), N->getOperand(4));
11390     }
11391 
11392     int CompareOpc;
11393     bool isDot;
11394 
11395     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
11396         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
11397         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
11398       assert(isDot && "Can't compare against a vector result!");
11399 
11400       // If this is a comparison against something other than 0/1, then we know
11401       // that the condition is never/always true.
11402       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
11403       if (Val != 0 && Val != 1) {
11404         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
11405           return N->getOperand(0);
11406         // Always !=, turn it into an unconditional branch.
11407         return DAG.getNode(ISD::BR, dl, MVT::Other,
11408                            N->getOperand(0), N->getOperand(4));
11409       }
11410 
11411       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
11412 
11413       // Create the PPCISD altivec 'dot' comparison node.
11414       SDValue Ops[] = {
11415         LHS.getOperand(2),  // LHS of compare
11416         LHS.getOperand(3),  // RHS of compare
11417         DAG.getConstant(CompareOpc, dl, MVT::i32)
11418       };
11419       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
11420       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
11421 
11422       // Unpack the result based on how the target uses it.
11423       PPC::Predicate CompOpc;
11424       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
11425       default:  // Can't happen, don't crash on invalid number though.
11426       case 0:   // Branch on the value of the EQ bit of CR6.
11427         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
11428         break;
11429       case 1:   // Branch on the inverted value of the EQ bit of CR6.
11430         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
11431         break;
11432       case 2:   // Branch on the value of the LT bit of CR6.
11433         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
11434         break;
11435       case 3:   // Branch on the inverted value of the LT bit of CR6.
11436         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
11437         break;
11438       }
11439 
11440       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
11441                          DAG.getConstant(CompOpc, dl, MVT::i32),
11442                          DAG.getRegister(PPC::CR6, MVT::i32),
11443                          N->getOperand(4), CompNode.getValue(1));
11444     }
11445     break;
11446   }
11447   case ISD::BUILD_VECTOR:
11448     return DAGCombineBuildVector(N, DCI);
11449   }
11450 
11451   return SDValue();
11452 }
11453 
11454 SDValue
11455 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11456                                   SelectionDAG &DAG,
11457                                   std::vector<SDNode *> *Created) const {
11458   // fold (sdiv X, pow2)
11459   EVT VT = N->getValueType(0);
11460   if (VT == MVT::i64 && !Subtarget.isPPC64())
11461     return SDValue();
11462   if ((VT != MVT::i32 && VT != MVT::i64) ||
11463       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
11464     return SDValue();
11465 
11466   SDLoc DL(N);
11467   SDValue N0 = N->getOperand(0);
11468 
11469   bool IsNegPow2 = (-Divisor).isPowerOf2();
11470   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
11471   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
11472 
11473   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
11474   if (Created)
11475     Created->push_back(Op.getNode());
11476 
11477   if (IsNegPow2) {
11478     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
11479     if (Created)
11480       Created->push_back(Op.getNode());
11481   }
11482 
11483   return Op;
11484 }
11485 
11486 //===----------------------------------------------------------------------===//
11487 // Inline Assembly Support
11488 //===----------------------------------------------------------------------===//
11489 
11490 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
11491                                                       APInt &KnownZero,
11492                                                       APInt &KnownOne,
11493                                                       const SelectionDAG &DAG,
11494                                                       unsigned Depth) const {
11495   KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
11496   switch (Op.getOpcode()) {
11497   default: break;
11498   case PPCISD::LBRX: {
11499     // lhbrx is known to have the top bits cleared out.
11500     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
11501       KnownZero = 0xFFFF0000;
11502     break;
11503   }
11504   case ISD::INTRINSIC_WO_CHAIN: {
11505     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
11506     default: break;
11507     case Intrinsic::ppc_altivec_vcmpbfp_p:
11508     case Intrinsic::ppc_altivec_vcmpeqfp_p:
11509     case Intrinsic::ppc_altivec_vcmpequb_p:
11510     case Intrinsic::ppc_altivec_vcmpequh_p:
11511     case Intrinsic::ppc_altivec_vcmpequw_p:
11512     case Intrinsic::ppc_altivec_vcmpequd_p:
11513     case Intrinsic::ppc_altivec_vcmpgefp_p:
11514     case Intrinsic::ppc_altivec_vcmpgtfp_p:
11515     case Intrinsic::ppc_altivec_vcmpgtsb_p:
11516     case Intrinsic::ppc_altivec_vcmpgtsh_p:
11517     case Intrinsic::ppc_altivec_vcmpgtsw_p:
11518     case Intrinsic::ppc_altivec_vcmpgtsd_p:
11519     case Intrinsic::ppc_altivec_vcmpgtub_p:
11520     case Intrinsic::ppc_altivec_vcmpgtuh_p:
11521     case Intrinsic::ppc_altivec_vcmpgtuw_p:
11522     case Intrinsic::ppc_altivec_vcmpgtud_p:
11523       KnownZero = ~1U;  // All bits but the low one are known to be zero.
11524       break;
11525     }
11526   }
11527   }
11528 }
11529 
11530 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
11531   switch (Subtarget.getDarwinDirective()) {
11532   default: break;
11533   case PPC::DIR_970:
11534   case PPC::DIR_PWR4:
11535   case PPC::DIR_PWR5:
11536   case PPC::DIR_PWR5X:
11537   case PPC::DIR_PWR6:
11538   case PPC::DIR_PWR6X:
11539   case PPC::DIR_PWR7:
11540   case PPC::DIR_PWR8:
11541   case PPC::DIR_PWR9: {
11542     if (!ML)
11543       break;
11544 
11545     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
11546 
11547     // For small loops (between 5 and 8 instructions), align to a 32-byte
11548     // boundary so that the entire loop fits in one instruction-cache line.
11549     uint64_t LoopSize = 0;
11550     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
11551       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
11552         LoopSize += TII->getInstSizeInBytes(*J);
11553         if (LoopSize > 32)
11554           break;
11555       }
11556 
11557     if (LoopSize > 16 && LoopSize <= 32)
11558       return 5;
11559 
11560     break;
11561   }
11562   }
11563 
11564   return TargetLowering::getPrefLoopAlignment(ML);
11565 }
11566 
11567 /// getConstraintType - Given a constraint, return the type of
11568 /// constraint it is for this target.
11569 PPCTargetLowering::ConstraintType
11570 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
11571   if (Constraint.size() == 1) {
11572     switch (Constraint[0]) {
11573     default: break;
11574     case 'b':
11575     case 'r':
11576     case 'f':
11577     case 'd':
11578     case 'v':
11579     case 'y':
11580       return C_RegisterClass;
11581     case 'Z':
11582       // FIXME: While Z does indicate a memory constraint, it specifically
11583       // indicates an r+r address (used in conjunction with the 'y' modifier
11584       // in the replacement string). Currently, we're forcing the base
11585       // register to be r0 in the asm printer (which is interpreted as zero)
11586       // and forming the complete address in the second register. This is
11587       // suboptimal.
11588       return C_Memory;
11589     }
11590   } else if (Constraint == "wc") { // individual CR bits.
11591     return C_RegisterClass;
11592   } else if (Constraint == "wa" || Constraint == "wd" ||
11593              Constraint == "wf" || Constraint == "ws") {
11594     return C_RegisterClass; // VSX registers.
11595   }
11596   return TargetLowering::getConstraintType(Constraint);
11597 }
11598 
11599 /// Examine constraint type and operand type and determine a weight value.
11600 /// This object must already have been set up with the operand type
11601 /// and the current alternative constraint selected.
11602 TargetLowering::ConstraintWeight
11603 PPCTargetLowering::getSingleConstraintMatchWeight(
11604     AsmOperandInfo &info, const char *constraint) const {
11605   ConstraintWeight weight = CW_Invalid;
11606   Value *CallOperandVal = info.CallOperandVal;
11607     // If we don't have a value, we can't do a match,
11608     // but allow it at the lowest weight.
11609   if (!CallOperandVal)
11610     return CW_Default;
11611   Type *type = CallOperandVal->getType();
11612 
11613   // Look at the constraint type.
11614   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
11615     return CW_Register; // an individual CR bit.
11616   else if ((StringRef(constraint) == "wa" ||
11617             StringRef(constraint) == "wd" ||
11618             StringRef(constraint) == "wf") &&
11619            type->isVectorTy())
11620     return CW_Register;
11621   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
11622     return CW_Register;
11623 
11624   switch (*constraint) {
11625   default:
11626     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
11627     break;
11628   case 'b':
11629     if (type->isIntegerTy())
11630       weight = CW_Register;
11631     break;
11632   case 'f':
11633     if (type->isFloatTy())
11634       weight = CW_Register;
11635     break;
11636   case 'd':
11637     if (type->isDoubleTy())
11638       weight = CW_Register;
11639     break;
11640   case 'v':
11641     if (type->isVectorTy())
11642       weight = CW_Register;
11643     break;
11644   case 'y':
11645     weight = CW_Register;
11646     break;
11647   case 'Z':
11648     weight = CW_Memory;
11649     break;
11650   }
11651   return weight;
11652 }
11653 
11654 std::pair<unsigned, const TargetRegisterClass *>
11655 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11656                                                 StringRef Constraint,
11657                                                 MVT VT) const {
11658   if (Constraint.size() == 1) {
11659     // GCC RS6000 Constraint Letters
11660     switch (Constraint[0]) {
11661     case 'b':   // R1-R31
11662       if (VT == MVT::i64 && Subtarget.isPPC64())
11663         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
11664       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
11665     case 'r':   // R0-R31
11666       if (VT == MVT::i64 && Subtarget.isPPC64())
11667         return std::make_pair(0U, &PPC::G8RCRegClass);
11668       return std::make_pair(0U, &PPC::GPRCRegClass);
11669     // 'd' and 'f' constraints are both defined to be "the floating point
11670     // registers", where one is for 32-bit and the other for 64-bit. We don't
11671     // really care overly much here so just give them all the same reg classes.
11672     case 'd':
11673     case 'f':
11674       if (VT == MVT::f32 || VT == MVT::i32)
11675         return std::make_pair(0U, &PPC::F4RCRegClass);
11676       if (VT == MVT::f64 || VT == MVT::i64)
11677         return std::make_pair(0U, &PPC::F8RCRegClass);
11678       if (VT == MVT::v4f64 && Subtarget.hasQPX())
11679         return std::make_pair(0U, &PPC::QFRCRegClass);
11680       if (VT == MVT::v4f32 && Subtarget.hasQPX())
11681         return std::make_pair(0U, &PPC::QSRCRegClass);
11682       break;
11683     case 'v':
11684       if (VT == MVT::v4f64 && Subtarget.hasQPX())
11685         return std::make_pair(0U, &PPC::QFRCRegClass);
11686       if (VT == MVT::v4f32 && Subtarget.hasQPX())
11687         return std::make_pair(0U, &PPC::QSRCRegClass);
11688       if (Subtarget.hasAltivec())
11689         return std::make_pair(0U, &PPC::VRRCRegClass);
11690     case 'y':   // crrc
11691       return std::make_pair(0U, &PPC::CRRCRegClass);
11692     }
11693   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
11694     // An individual CR bit.
11695     return std::make_pair(0U, &PPC::CRBITRCRegClass);
11696   } else if ((Constraint == "wa" || Constraint == "wd" ||
11697              Constraint == "wf") && Subtarget.hasVSX()) {
11698     return std::make_pair(0U, &PPC::VSRCRegClass);
11699   } else if (Constraint == "ws" && Subtarget.hasVSX()) {
11700     if (VT == MVT::f32 && Subtarget.hasP8Vector())
11701       return std::make_pair(0U, &PPC::VSSRCRegClass);
11702     else
11703       return std::make_pair(0U, &PPC::VSFRCRegClass);
11704   }
11705 
11706   std::pair<unsigned, const TargetRegisterClass *> R =
11707       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11708 
11709   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
11710   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
11711   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
11712   // register.
11713   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
11714   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
11715   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
11716       PPC::GPRCRegClass.contains(R.first))
11717     return std::make_pair(TRI->getMatchingSuperReg(R.first,
11718                             PPC::sub_32, &PPC::G8RCRegClass),
11719                           &PPC::G8RCRegClass);
11720 
11721   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
11722   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
11723     R.first = PPC::CR0;
11724     R.second = &PPC::CRRCRegClass;
11725   }
11726 
11727   return R;
11728 }
11729 
11730 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
11731 /// vector.  If it is invalid, don't add anything to Ops.
11732 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
11733                                                      std::string &Constraint,
11734                                                      std::vector<SDValue>&Ops,
11735                                                      SelectionDAG &DAG) const {
11736   SDValue Result;
11737 
11738   // Only support length 1 constraints.
11739   if (Constraint.length() > 1) return;
11740 
11741   char Letter = Constraint[0];
11742   switch (Letter) {
11743   default: break;
11744   case 'I':
11745   case 'J':
11746   case 'K':
11747   case 'L':
11748   case 'M':
11749   case 'N':
11750   case 'O':
11751   case 'P': {
11752     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
11753     if (!CST) return; // Must be an immediate to match.
11754     SDLoc dl(Op);
11755     int64_t Value = CST->getSExtValue();
11756     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
11757                          // numbers are printed as such.
11758     switch (Letter) {
11759     default: llvm_unreachable("Unknown constraint letter!");
11760     case 'I':  // "I" is a signed 16-bit constant.
11761       if (isInt<16>(Value))
11762         Result = DAG.getTargetConstant(Value, dl, TCVT);
11763       break;
11764     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
11765       if (isShiftedUInt<16, 16>(Value))
11766         Result = DAG.getTargetConstant(Value, dl, TCVT);
11767       break;
11768     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
11769       if (isShiftedInt<16, 16>(Value))
11770         Result = DAG.getTargetConstant(Value, dl, TCVT);
11771       break;
11772     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
11773       if (isUInt<16>(Value))
11774         Result = DAG.getTargetConstant(Value, dl, TCVT);
11775       break;
11776     case 'M':  // "M" is a constant that is greater than 31.
11777       if (Value > 31)
11778         Result = DAG.getTargetConstant(Value, dl, TCVT);
11779       break;
11780     case 'N':  // "N" is a positive constant that is an exact power of two.
11781       if (Value > 0 && isPowerOf2_64(Value))
11782         Result = DAG.getTargetConstant(Value, dl, TCVT);
11783       break;
11784     case 'O':  // "O" is the constant zero.
11785       if (Value == 0)
11786         Result = DAG.getTargetConstant(Value, dl, TCVT);
11787       break;
11788     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
11789       if (isInt<16>(-Value))
11790         Result = DAG.getTargetConstant(Value, dl, TCVT);
11791       break;
11792     }
11793     break;
11794   }
11795   }
11796 
11797   if (Result.getNode()) {
11798     Ops.push_back(Result);
11799     return;
11800   }
11801 
11802   // Handle standard constraint letters.
11803   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11804 }
11805 
11806 // isLegalAddressingMode - Return true if the addressing mode represented
11807 // by AM is legal for this target, for a load/store of the specified type.
11808 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
11809                                               const AddrMode &AM, Type *Ty,
11810                                               unsigned AS) const {
11811   // PPC does not allow r+i addressing modes for vectors!
11812   if (Ty->isVectorTy() && AM.BaseOffs != 0)
11813     return false;
11814 
11815   // PPC allows a sign-extended 16-bit immediate field.
11816   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
11817     return false;
11818 
11819   // No global is ever allowed as a base.
11820   if (AM.BaseGV)
11821     return false;
11822 
11823   // PPC only support r+r,
11824   switch (AM.Scale) {
11825   case 0:  // "r+i" or just "i", depending on HasBaseReg.
11826     break;
11827   case 1:
11828     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
11829       return false;
11830     // Otherwise we have r+r or r+i.
11831     break;
11832   case 2:
11833     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
11834       return false;
11835     // Allow 2*r as r+r.
11836     break;
11837   default:
11838     // No other scales are supported.
11839     return false;
11840   }
11841 
11842   return true;
11843 }
11844 
11845 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
11846                                            SelectionDAG &DAG) const {
11847   MachineFunction &MF = DAG.getMachineFunction();
11848   MachineFrameInfo &MFI = MF.getFrameInfo();
11849   MFI.setReturnAddressIsTaken(true);
11850 
11851   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
11852     return SDValue();
11853 
11854   SDLoc dl(Op);
11855   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
11856 
11857   // Make sure the function does not optimize away the store of the RA to
11858   // the stack.
11859   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
11860   FuncInfo->setLRStoreRequired();
11861   bool isPPC64 = Subtarget.isPPC64();
11862   auto PtrVT = getPointerTy(MF.getDataLayout());
11863 
11864   if (Depth > 0) {
11865     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
11866     SDValue Offset =
11867         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
11868                         isPPC64 ? MVT::i64 : MVT::i32);
11869     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
11870                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
11871                        MachinePointerInfo());
11872   }
11873 
11874   // Just load the return address off the stack.
11875   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
11876   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
11877                      MachinePointerInfo());
11878 }
11879 
11880 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
11881                                           SelectionDAG &DAG) const {
11882   SDLoc dl(Op);
11883   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
11884 
11885   MachineFunction &MF = DAG.getMachineFunction();
11886   MachineFrameInfo &MFI = MF.getFrameInfo();
11887   MFI.setFrameAddressIsTaken(true);
11888 
11889   EVT PtrVT = getPointerTy(MF.getDataLayout());
11890   bool isPPC64 = PtrVT == MVT::i64;
11891 
11892   // Naked functions never have a frame pointer, and so we use r1. For all
11893   // other functions, this decision must be delayed until during PEI.
11894   unsigned FrameReg;
11895   if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
11896     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
11897   else
11898     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
11899 
11900   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
11901                                          PtrVT);
11902   while (Depth--)
11903     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
11904                             FrameAddr, MachinePointerInfo());
11905   return FrameAddr;
11906 }
11907 
11908 // FIXME? Maybe this could be a TableGen attribute on some registers and
11909 // this table could be generated automatically from RegInfo.
11910 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT,
11911                                               SelectionDAG &DAG) const {
11912   bool isPPC64 = Subtarget.isPPC64();
11913   bool isDarwinABI = Subtarget.isDarwinABI();
11914 
11915   if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
11916       (!isPPC64 && VT != MVT::i32))
11917     report_fatal_error("Invalid register global variable type");
11918 
11919   bool is64Bit = isPPC64 && VT == MVT::i64;
11920   unsigned Reg = StringSwitch<unsigned>(RegName)
11921                    .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
11922                    .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
11923                    .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
11924                                   (is64Bit ? PPC::X13 : PPC::R13))
11925                    .Default(0);
11926 
11927   if (Reg)
11928     return Reg;
11929   report_fatal_error("Invalid register name global variable");
11930 }
11931 
11932 bool
11933 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
11934   // The PowerPC target isn't yet aware of offsets.
11935   return false;
11936 }
11937 
11938 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
11939                                            const CallInst &I,
11940                                            unsigned Intrinsic) const {
11941 
11942   switch (Intrinsic) {
11943   case Intrinsic::ppc_qpx_qvlfd:
11944   case Intrinsic::ppc_qpx_qvlfs:
11945   case Intrinsic::ppc_qpx_qvlfcd:
11946   case Intrinsic::ppc_qpx_qvlfcs:
11947   case Intrinsic::ppc_qpx_qvlfiwa:
11948   case Intrinsic::ppc_qpx_qvlfiwz:
11949   case Intrinsic::ppc_altivec_lvx:
11950   case Intrinsic::ppc_altivec_lvxl:
11951   case Intrinsic::ppc_altivec_lvebx:
11952   case Intrinsic::ppc_altivec_lvehx:
11953   case Intrinsic::ppc_altivec_lvewx:
11954   case Intrinsic::ppc_vsx_lxvd2x:
11955   case Intrinsic::ppc_vsx_lxvw4x: {
11956     EVT VT;
11957     switch (Intrinsic) {
11958     case Intrinsic::ppc_altivec_lvebx:
11959       VT = MVT::i8;
11960       break;
11961     case Intrinsic::ppc_altivec_lvehx:
11962       VT = MVT::i16;
11963       break;
11964     case Intrinsic::ppc_altivec_lvewx:
11965       VT = MVT::i32;
11966       break;
11967     case Intrinsic::ppc_vsx_lxvd2x:
11968       VT = MVT::v2f64;
11969       break;
11970     case Intrinsic::ppc_qpx_qvlfd:
11971       VT = MVT::v4f64;
11972       break;
11973     case Intrinsic::ppc_qpx_qvlfs:
11974       VT = MVT::v4f32;
11975       break;
11976     case Intrinsic::ppc_qpx_qvlfcd:
11977       VT = MVT::v2f64;
11978       break;
11979     case Intrinsic::ppc_qpx_qvlfcs:
11980       VT = MVT::v2f32;
11981       break;
11982     default:
11983       VT = MVT::v4i32;
11984       break;
11985     }
11986 
11987     Info.opc = ISD::INTRINSIC_W_CHAIN;
11988     Info.memVT = VT;
11989     Info.ptrVal = I.getArgOperand(0);
11990     Info.offset = -VT.getStoreSize()+1;
11991     Info.size = 2*VT.getStoreSize()-1;
11992     Info.align = 1;
11993     Info.vol = false;
11994     Info.readMem = true;
11995     Info.writeMem = false;
11996     return true;
11997   }
11998   case Intrinsic::ppc_qpx_qvlfda:
11999   case Intrinsic::ppc_qpx_qvlfsa:
12000   case Intrinsic::ppc_qpx_qvlfcda:
12001   case Intrinsic::ppc_qpx_qvlfcsa:
12002   case Intrinsic::ppc_qpx_qvlfiwaa:
12003   case Intrinsic::ppc_qpx_qvlfiwza: {
12004     EVT VT;
12005     switch (Intrinsic) {
12006     case Intrinsic::ppc_qpx_qvlfda:
12007       VT = MVT::v4f64;
12008       break;
12009     case Intrinsic::ppc_qpx_qvlfsa:
12010       VT = MVT::v4f32;
12011       break;
12012     case Intrinsic::ppc_qpx_qvlfcda:
12013       VT = MVT::v2f64;
12014       break;
12015     case Intrinsic::ppc_qpx_qvlfcsa:
12016       VT = MVT::v2f32;
12017       break;
12018     default:
12019       VT = MVT::v4i32;
12020       break;
12021     }
12022 
12023     Info.opc = ISD::INTRINSIC_W_CHAIN;
12024     Info.memVT = VT;
12025     Info.ptrVal = I.getArgOperand(0);
12026     Info.offset = 0;
12027     Info.size = VT.getStoreSize();
12028     Info.align = 1;
12029     Info.vol = false;
12030     Info.readMem = true;
12031     Info.writeMem = false;
12032     return true;
12033   }
12034   case Intrinsic::ppc_qpx_qvstfd:
12035   case Intrinsic::ppc_qpx_qvstfs:
12036   case Intrinsic::ppc_qpx_qvstfcd:
12037   case Intrinsic::ppc_qpx_qvstfcs:
12038   case Intrinsic::ppc_qpx_qvstfiw:
12039   case Intrinsic::ppc_altivec_stvx:
12040   case Intrinsic::ppc_altivec_stvxl:
12041   case Intrinsic::ppc_altivec_stvebx:
12042   case Intrinsic::ppc_altivec_stvehx:
12043   case Intrinsic::ppc_altivec_stvewx:
12044   case Intrinsic::ppc_vsx_stxvd2x:
12045   case Intrinsic::ppc_vsx_stxvw4x: {
12046     EVT VT;
12047     switch (Intrinsic) {
12048     case Intrinsic::ppc_altivec_stvebx:
12049       VT = MVT::i8;
12050       break;
12051     case Intrinsic::ppc_altivec_stvehx:
12052       VT = MVT::i16;
12053       break;
12054     case Intrinsic::ppc_altivec_stvewx:
12055       VT = MVT::i32;
12056       break;
12057     case Intrinsic::ppc_vsx_stxvd2x:
12058       VT = MVT::v2f64;
12059       break;
12060     case Intrinsic::ppc_qpx_qvstfd:
12061       VT = MVT::v4f64;
12062       break;
12063     case Intrinsic::ppc_qpx_qvstfs:
12064       VT = MVT::v4f32;
12065       break;
12066     case Intrinsic::ppc_qpx_qvstfcd:
12067       VT = MVT::v2f64;
12068       break;
12069     case Intrinsic::ppc_qpx_qvstfcs:
12070       VT = MVT::v2f32;
12071       break;
12072     default:
12073       VT = MVT::v4i32;
12074       break;
12075     }
12076 
12077     Info.opc = ISD::INTRINSIC_VOID;
12078     Info.memVT = VT;
12079     Info.ptrVal = I.getArgOperand(1);
12080     Info.offset = -VT.getStoreSize()+1;
12081     Info.size = 2*VT.getStoreSize()-1;
12082     Info.align = 1;
12083     Info.vol = false;
12084     Info.readMem = false;
12085     Info.writeMem = true;
12086     return true;
12087   }
12088   case Intrinsic::ppc_qpx_qvstfda:
12089   case Intrinsic::ppc_qpx_qvstfsa:
12090   case Intrinsic::ppc_qpx_qvstfcda:
12091   case Intrinsic::ppc_qpx_qvstfcsa:
12092   case Intrinsic::ppc_qpx_qvstfiwa: {
12093     EVT VT;
12094     switch (Intrinsic) {
12095     case Intrinsic::ppc_qpx_qvstfda:
12096       VT = MVT::v4f64;
12097       break;
12098     case Intrinsic::ppc_qpx_qvstfsa:
12099       VT = MVT::v4f32;
12100       break;
12101     case Intrinsic::ppc_qpx_qvstfcda:
12102       VT = MVT::v2f64;
12103       break;
12104     case Intrinsic::ppc_qpx_qvstfcsa:
12105       VT = MVT::v2f32;
12106       break;
12107     default:
12108       VT = MVT::v4i32;
12109       break;
12110     }
12111 
12112     Info.opc = ISD::INTRINSIC_VOID;
12113     Info.memVT = VT;
12114     Info.ptrVal = I.getArgOperand(1);
12115     Info.offset = 0;
12116     Info.size = VT.getStoreSize();
12117     Info.align = 1;
12118     Info.vol = false;
12119     Info.readMem = false;
12120     Info.writeMem = true;
12121     return true;
12122   }
12123   default:
12124     break;
12125   }
12126 
12127   return false;
12128 }
12129 
12130 /// getOptimalMemOpType - Returns the target specific optimal type for load
12131 /// and store operations as a result of memset, memcpy, and memmove
12132 /// lowering. If DstAlign is zero that means it's safe to destination
12133 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
12134 /// means there isn't a need to check it against alignment requirement,
12135 /// probably because the source does not need to be loaded. If 'IsMemset' is
12136 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
12137 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
12138 /// source is constant so it does not need to be loaded.
12139 /// It returns EVT::Other if the type should be determined using generic
12140 /// target-independent logic.
12141 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
12142                                            unsigned DstAlign, unsigned SrcAlign,
12143                                            bool IsMemset, bool ZeroMemset,
12144                                            bool MemcpyStrSrc,
12145                                            MachineFunction &MF) const {
12146   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
12147     const Function *F = MF.getFunction();
12148     // When expanding a memset, require at least two QPX instructions to cover
12149     // the cost of loading the value to be stored from the constant pool.
12150     if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
12151        (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
12152         !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
12153       return MVT::v4f64;
12154     }
12155 
12156     // We should use Altivec/VSX loads and stores when available. For unaligned
12157     // addresses, unaligned VSX loads are only fast starting with the P8.
12158     if (Subtarget.hasAltivec() && Size >= 16 &&
12159         (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
12160          ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
12161       return MVT::v4i32;
12162   }
12163 
12164   if (Subtarget.isPPC64()) {
12165     return MVT::i64;
12166   }
12167 
12168   return MVT::i32;
12169 }
12170 
12171 /// \brief Returns true if it is beneficial to convert a load of a constant
12172 /// to just the constant itself.
12173 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
12174                                                           Type *Ty) const {
12175   assert(Ty->isIntegerTy());
12176 
12177   unsigned BitSize = Ty->getPrimitiveSizeInBits();
12178   return !(BitSize == 0 || BitSize > 64);
12179 }
12180 
12181 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
12182   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12183     return false;
12184   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
12185   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
12186   return NumBits1 == 64 && NumBits2 == 32;
12187 }
12188 
12189 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
12190   if (!VT1.isInteger() || !VT2.isInteger())
12191     return false;
12192   unsigned NumBits1 = VT1.getSizeInBits();
12193   unsigned NumBits2 = VT2.getSizeInBits();
12194   return NumBits1 == 64 && NumBits2 == 32;
12195 }
12196 
12197 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12198   // Generally speaking, zexts are not free, but they are free when they can be
12199   // folded with other operations.
12200   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
12201     EVT MemVT = LD->getMemoryVT();
12202     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
12203          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
12204         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
12205          LD->getExtensionType() == ISD::ZEXTLOAD))
12206       return true;
12207   }
12208 
12209   // FIXME: Add other cases...
12210   //  - 32-bit shifts with a zext to i64
12211   //  - zext after ctlz, bswap, etc.
12212   //  - zext after and by a constant mask
12213 
12214   return TargetLowering::isZExtFree(Val, VT2);
12215 }
12216 
12217 bool PPCTargetLowering::isFPExtFree(EVT VT) const {
12218   assert(VT.isFloatingPoint());
12219   return true;
12220 }
12221 
12222 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
12223   return isInt<16>(Imm) || isUInt<16>(Imm);
12224 }
12225 
12226 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
12227   return isInt<16>(Imm) || isUInt<16>(Imm);
12228 }
12229 
12230 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
12231                                                        unsigned,
12232                                                        unsigned,
12233                                                        bool *Fast) const {
12234   if (DisablePPCUnaligned)
12235     return false;
12236 
12237   // PowerPC supports unaligned memory access for simple non-vector types.
12238   // Although accessing unaligned addresses is not as efficient as accessing
12239   // aligned addresses, it is generally more efficient than manual expansion,
12240   // and generally only traps for software emulation when crossing page
12241   // boundaries.
12242 
12243   if (!VT.isSimple())
12244     return false;
12245 
12246   if (VT.getSimpleVT().isVector()) {
12247     if (Subtarget.hasVSX()) {
12248       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
12249           VT != MVT::v4f32 && VT != MVT::v4i32)
12250         return false;
12251     } else {
12252       return false;
12253     }
12254   }
12255 
12256   if (VT == MVT::ppcf128)
12257     return false;
12258 
12259   if (Fast)
12260     *Fast = true;
12261 
12262   return true;
12263 }
12264 
12265 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
12266   VT = VT.getScalarType();
12267 
12268   if (!VT.isSimple())
12269     return false;
12270 
12271   switch (VT.getSimpleVT().SimpleTy) {
12272   case MVT::f32:
12273   case MVT::f64:
12274     return true;
12275   default:
12276     break;
12277   }
12278 
12279   return false;
12280 }
12281 
12282 const MCPhysReg *
12283 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
12284   // LR is a callee-save register, but we must treat it as clobbered by any call
12285   // site. Hence we include LR in the scratch registers, which are in turn added
12286   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
12287   // to CTR, which is used by any indirect call.
12288   static const MCPhysReg ScratchRegs[] = {
12289     PPC::X12, PPC::LR8, PPC::CTR8, 0
12290   };
12291 
12292   return ScratchRegs;
12293 }
12294 
12295 unsigned PPCTargetLowering::getExceptionPointerRegister(
12296     const Constant *PersonalityFn) const {
12297   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
12298 }
12299 
12300 unsigned PPCTargetLowering::getExceptionSelectorRegister(
12301     const Constant *PersonalityFn) const {
12302   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
12303 }
12304 
12305 bool
12306 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
12307                      EVT VT , unsigned DefinedValues) const {
12308   if (VT == MVT::v2i64)
12309     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
12310 
12311   if (Subtarget.hasVSX() || Subtarget.hasQPX())
12312     return true;
12313 
12314   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
12315 }
12316 
12317 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
12318   if (DisableILPPref || Subtarget.enableMachineScheduler())
12319     return TargetLowering::getSchedulingPreference(N);
12320 
12321   return Sched::ILP;
12322 }
12323 
12324 // Create a fast isel object.
12325 FastISel *
12326 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
12327                                   const TargetLibraryInfo *LibInfo) const {
12328   return PPC::createFastISel(FuncInfo, LibInfo);
12329 }
12330 
12331 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
12332   if (Subtarget.isDarwinABI()) return;
12333   if (!Subtarget.isPPC64()) return;
12334 
12335   // Update IsSplitCSR in PPCFunctionInfo
12336   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
12337   PFI->setIsSplitCSR(true);
12338 }
12339 
12340 void PPCTargetLowering::insertCopiesSplitCSR(
12341   MachineBasicBlock *Entry,
12342   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
12343   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
12344   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
12345   if (!IStart)
12346     return;
12347 
12348   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12349   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
12350   MachineBasicBlock::iterator MBBI = Entry->begin();
12351   for (const MCPhysReg *I = IStart; *I; ++I) {
12352     const TargetRegisterClass *RC = nullptr;
12353     if (PPC::G8RCRegClass.contains(*I))
12354       RC = &PPC::G8RCRegClass;
12355     else if (PPC::F8RCRegClass.contains(*I))
12356       RC = &PPC::F8RCRegClass;
12357     else if (PPC::CRRCRegClass.contains(*I))
12358       RC = &PPC::CRRCRegClass;
12359     else if (PPC::VRRCRegClass.contains(*I))
12360       RC = &PPC::VRRCRegClass;
12361     else
12362       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
12363 
12364     unsigned NewVR = MRI->createVirtualRegister(RC);
12365     // Create copy from CSR to a virtual register.
12366     // FIXME: this currently does not emit CFI pseudo-instructions, it works
12367     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
12368     // nounwind. If we want to generalize this later, we may need to emit
12369     // CFI pseudo-instructions.
12370     assert(Entry->getParent()->getFunction()->hasFnAttribute(
12371              Attribute::NoUnwind) &&
12372            "Function should be nounwind in insertCopiesSplitCSR!");
12373     Entry->addLiveIn(*I);
12374     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
12375       .addReg(*I);
12376 
12377     // Insert the copy-back instructions right before the terminator
12378     for (auto *Exit : Exits)
12379       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
12380               TII->get(TargetOpcode::COPY), *I)
12381         .addReg(NewVR);
12382   }
12383 }
12384 
12385 // Override to enable LOAD_STACK_GUARD lowering on Linux.
12386 bool PPCTargetLowering::useLoadStackGuardNode() const {
12387   if (!Subtarget.isTargetLinux())
12388     return TargetLowering::useLoadStackGuardNode();
12389   return true;
12390 }
12391 
12392 // Override to disable global variable loading on Linux.
12393 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
12394   if (!Subtarget.isTargetLinux())
12395     return TargetLowering::insertSSPDeclarations(M);
12396 }
12397