1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the PPCISelLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCISelLowering.h"
15 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPC.h"
17 #include "PPCCCState.h"
18 #include "PPCCallingConv.h"
19 #include "PPCFrameLowering.h"
20 #include "PPCInstrInfo.h"
21 #include "PPCMachineFunctionInfo.h"
22 #include "PPCPerfectShuffle.h"
23 #include "PPCRegisterInfo.h"
24 #include "PPCSubtarget.h"
25 #include "PPCTargetMachine.h"
26 #include "llvm/ADT/APFloat.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/StringSwitch.h"
38 #include "llvm/CodeGen/CallingConvLower.h"
39 #include "llvm/CodeGen/ISDOpcodes.h"
40 #include "llvm/CodeGen/MachineBasicBlock.h"
41 #include "llvm/CodeGen/MachineFrameInfo.h"
42 #include "llvm/CodeGen/MachineFunction.h"
43 #include "llvm/CodeGen/MachineInstr.h"
44 #include "llvm/CodeGen/MachineInstrBuilder.h"
45 #include "llvm/CodeGen/MachineJumpTableInfo.h"
46 #include "llvm/CodeGen/MachineLoopInfo.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetRegisterInfo.h"
56 #include "llvm/CodeGen/ValueTypes.h"
57 #include "llvm/IR/CallSite.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/Module.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/MC/MCExpr.h"
74 #include "llvm/MC/MCRegisterInfo.h"
75 #include "llvm/Support/AtomicOrdering.h"
76 #include "llvm/Support/BranchProbability.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CodeGen.h"
79 #include "llvm/Support/CommandLine.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/Format.h"
84 #include "llvm/Support/KnownBits.h"
85 #include "llvm/Support/MachineValueType.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include <algorithm>
91 #include <cassert>
92 #include <cstdint>
93 #include <iterator>
94 #include <list>
95 #include <utility>
96 #include <vector>
97 
98 using namespace llvm;
99 
100 #define DEBUG_TYPE "ppc-lowering"
101 
102 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
103 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
104 
105 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
106 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
109 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisableSCO("disable-ppc-sco",
112 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
113 
114 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
115 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
116 
117 STATISTIC(NumTailCalls, "Number of tail calls");
118 STATISTIC(NumSiblingCalls, "Number of sibling calls");
119 
120 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
121 
122 // FIXME: Remove this once the bug has been fixed!
123 extern cl::opt<bool> ANDIGlueBug;
124 
125 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
126                                      const PPCSubtarget &STI)
127     : TargetLowering(TM), Subtarget(STI) {
128   // Use _setjmp/_longjmp instead of setjmp/longjmp.
129   setUseUnderscoreSetJmp(true);
130   setUseUnderscoreLongJmp(true);
131 
132   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
133   // arguments are at least 4/8 bytes aligned.
134   bool isPPC64 = Subtarget.isPPC64();
135   setMinStackArgumentAlignment(isPPC64 ? 8:4);
136 
137   // Set up the register classes.
138   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
139   if (!useSoftFloat()) {
140     if (hasSPE()) {
141       addRegisterClass(MVT::f32, &PPC::SPE4RCRegClass);
142       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
143     } else {
144       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
145       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
146     }
147   }
148 
149   // Match BITREVERSE to customized fast code sequence in the td file.
150   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
151   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
152 
153   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
154   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
155 
156   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
157   for (MVT VT : MVT::integer_valuetypes()) {
158     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
159     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
160   }
161 
162   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
163 
164   // PowerPC has pre-inc load and store's.
165   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
166   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
167   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
168   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
169   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
170   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
171   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
172   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
173   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
174   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
175   if (!Subtarget.hasSPE()) {
176     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
177     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
178     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
179     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
180   }
181 
182   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
183   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
184   for (MVT VT : ScalarIntVTs) {
185     setOperationAction(ISD::ADDC, VT, Legal);
186     setOperationAction(ISD::ADDE, VT, Legal);
187     setOperationAction(ISD::SUBC, VT, Legal);
188     setOperationAction(ISD::SUBE, VT, Legal);
189   }
190 
191   if (Subtarget.useCRBits()) {
192     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193 
194     if (isPPC64 || Subtarget.hasFPCVT()) {
195       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
196       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
197                          isPPC64 ? MVT::i64 : MVT::i32);
198       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
199       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
200                         isPPC64 ? MVT::i64 : MVT::i32);
201     } else {
202       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
203       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
204     }
205 
206     // PowerPC does not support direct load/store of condition registers.
207     setOperationAction(ISD::LOAD, MVT::i1, Custom);
208     setOperationAction(ISD::STORE, MVT::i1, Custom);
209 
210     // FIXME: Remove this once the ANDI glue bug is fixed:
211     if (ANDIGlueBug)
212       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
213 
214     for (MVT VT : MVT::integer_valuetypes()) {
215       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
216       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
217       setTruncStoreAction(VT, MVT::i1, Expand);
218     }
219 
220     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
221   }
222 
223   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
224   // PPC (the libcall is not available).
225   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
226   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
227 
228   // We do not currently implement these libm ops for PowerPC.
229   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
230   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
231   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
232   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
233   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
234   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
235 
236   // PowerPC has no SREM/UREM instructions unless we are on P9
237   // On P9 we may use a hardware instruction to compute the remainder.
238   // The instructions are not legalized directly because in the cases where the
239   // result of both the remainder and the division is required it is more
240   // efficient to compute the remainder from the result of the division rather
241   // than use the remainder instruction.
242   if (Subtarget.isISA3_0()) {
243     setOperationAction(ISD::SREM, MVT::i32, Custom);
244     setOperationAction(ISD::UREM, MVT::i32, Custom);
245     setOperationAction(ISD::SREM, MVT::i64, Custom);
246     setOperationAction(ISD::UREM, MVT::i64, Custom);
247   } else {
248     setOperationAction(ISD::SREM, MVT::i32, Expand);
249     setOperationAction(ISD::UREM, MVT::i32, Expand);
250     setOperationAction(ISD::SREM, MVT::i64, Expand);
251     setOperationAction(ISD::UREM, MVT::i64, Expand);
252   }
253 
254   if (Subtarget.hasP9Vector()) {
255     setOperationAction(ISD::ABS, MVT::v4i32, Legal);
256     setOperationAction(ISD::ABS, MVT::v8i16, Legal);
257     setOperationAction(ISD::ABS, MVT::v16i8, Legal);
258   }
259 
260   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
261   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
262   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
263   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
264   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
265   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
266   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
267   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
268   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
269 
270   // We don't support sin/cos/sqrt/fmod/pow
271   setOperationAction(ISD::FSIN , MVT::f64, Expand);
272   setOperationAction(ISD::FCOS , MVT::f64, Expand);
273   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
274   setOperationAction(ISD::FREM , MVT::f64, Expand);
275   setOperationAction(ISD::FPOW , MVT::f64, Expand);
276   setOperationAction(ISD::FSIN , MVT::f32, Expand);
277   setOperationAction(ISD::FCOS , MVT::f32, Expand);
278   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
279   setOperationAction(ISD::FREM , MVT::f32, Expand);
280   setOperationAction(ISD::FPOW , MVT::f32, Expand);
281   if (Subtarget.hasSPE()) {
282     setOperationAction(ISD::FMA  , MVT::f64, Expand);
283     setOperationAction(ISD::FMA  , MVT::f32, Expand);
284   } else {
285     setOperationAction(ISD::FMA  , MVT::f64, Legal);
286     setOperationAction(ISD::FMA  , MVT::f32, Legal);
287   }
288 
289   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
290 
291   // If we're enabling GP optimizations, use hardware square root
292   if (!Subtarget.hasFSQRT() &&
293       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
294         Subtarget.hasFRE()))
295     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
296 
297   if (!Subtarget.hasFSQRT() &&
298       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
299         Subtarget.hasFRES()))
300     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
301 
302   if (Subtarget.hasFCPSGN()) {
303     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
304     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
305   } else {
306     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
307     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
308   }
309 
310   if (Subtarget.hasFPRND()) {
311     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
312     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
313     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
314     setOperationAction(ISD::FROUND, MVT::f64, Legal);
315 
316     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
317     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
318     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
319     setOperationAction(ISD::FROUND, MVT::f32, Legal);
320   }
321 
322   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
323   // to speed up scalar BSWAP64.
324   // CTPOP or CTTZ were introduced in P8/P9 respectively
325   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
326   if (Subtarget.isISA3_0()) {
327     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
328     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
329     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
330   } else {
331     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
332     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
333     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
334   }
335 
336   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
337     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
338     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
339   } else {
340     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
341     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
342   }
343 
344   // PowerPC does not have ROTR
345   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
346   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
347 
348   if (!Subtarget.useCRBits()) {
349     // PowerPC does not have Select
350     setOperationAction(ISD::SELECT, MVT::i32, Expand);
351     setOperationAction(ISD::SELECT, MVT::i64, Expand);
352     setOperationAction(ISD::SELECT, MVT::f32, Expand);
353     setOperationAction(ISD::SELECT, MVT::f64, Expand);
354   }
355 
356   // PowerPC wants to turn select_cc of FP into fsel when possible.
357   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
358   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
359 
360   // PowerPC wants to optimize integer setcc a bit
361   if (!Subtarget.useCRBits())
362     setOperationAction(ISD::SETCC, MVT::i32, Custom);
363 
364   // PowerPC does not have BRCOND which requires SetCC
365   if (!Subtarget.useCRBits())
366     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
367 
368   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
369 
370   if (Subtarget.hasSPE()) {
371     // SPE has built-in conversions
372     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
373     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
374     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
375   } else {
376     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
377     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
378 
379     // PowerPC does not have [U|S]INT_TO_FP
380     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
381     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
382   }
383 
384   if (Subtarget.hasDirectMove() && isPPC64) {
385     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
386     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
387     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
388     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
389   } else {
390     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
391     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
392     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
393     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
394   }
395 
396   // We cannot sextinreg(i1).  Expand to shifts.
397   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
398 
399   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
400   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
401   // support continuation, user-level threading, and etc.. As a result, no
402   // other SjLj exception interfaces are implemented and please don't build
403   // your own exception handling based on them.
404   // LLVM/Clang supports zero-cost DWARF exception handling.
405   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
406   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
407 
408   // We want to legalize GlobalAddress and ConstantPool nodes into the
409   // appropriate instructions to materialize the address.
410   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
411   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
412   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
413   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
414   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
415   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
416   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
417   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
418   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
419   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
420 
421   // TRAP is legal.
422   setOperationAction(ISD::TRAP, MVT::Other, Legal);
423 
424   // TRAMPOLINE is custom lowered.
425   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
426   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
427 
428   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
429   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
430 
431   if (Subtarget.isSVR4ABI()) {
432     if (isPPC64) {
433       // VAARG always uses double-word chunks, so promote anything smaller.
434       setOperationAction(ISD::VAARG, MVT::i1, Promote);
435       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
436       setOperationAction(ISD::VAARG, MVT::i8, Promote);
437       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
438       setOperationAction(ISD::VAARG, MVT::i16, Promote);
439       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
440       setOperationAction(ISD::VAARG, MVT::i32, Promote);
441       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
442       setOperationAction(ISD::VAARG, MVT::Other, Expand);
443     } else {
444       // VAARG is custom lowered with the 32-bit SVR4 ABI.
445       setOperationAction(ISD::VAARG, MVT::Other, Custom);
446       setOperationAction(ISD::VAARG, MVT::i64, Custom);
447     }
448   } else
449     setOperationAction(ISD::VAARG, MVT::Other, Expand);
450 
451   if (Subtarget.isSVR4ABI() && !isPPC64)
452     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
453     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
454   else
455     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
456 
457   // Use the default implementation.
458   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
459   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
460   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
461   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
462   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
463   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
464   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
465   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
466   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
467 
468   // We want to custom lower some of our intrinsics.
469   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
470 
471   // To handle counter-based loop conditions.
472   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
473 
474   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
475   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
476   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
477   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
478 
479   // Comparisons that require checking two conditions.
480   if (Subtarget.hasSPE()) {
481     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
482     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
483     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
484     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
485   }
486   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
487   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
488   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
489   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
490   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
491   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
492   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
493   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
494   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
495   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
496   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
497   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
498 
499   if (Subtarget.has64BitSupport()) {
500     // They also have instructions for converting between i64 and fp.
501     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
502     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
503     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
504     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
505     // This is just the low 32 bits of a (signed) fp->i64 conversion.
506     // We cannot do this with Promote because i64 is not a legal type.
507     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
508 
509     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
510       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
511   } else {
512     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
513     if (Subtarget.hasSPE())
514       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
515     else
516       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
517   }
518 
519   // With the instructions enabled under FPCVT, we can do everything.
520   if (Subtarget.hasFPCVT()) {
521     if (Subtarget.has64BitSupport()) {
522       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
523       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
524       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
525       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
526     }
527 
528     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
529     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
530     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
531     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
532   }
533 
534   if (Subtarget.use64BitRegs()) {
535     // 64-bit PowerPC implementations can support i64 types directly
536     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
537     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
538     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
539     // 64-bit PowerPC wants to expand i128 shifts itself.
540     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
541     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
542     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
543   } else {
544     // 32-bit PowerPC wants to expand i64 shifts itself.
545     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
546     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
547     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
548   }
549 
550   if (Subtarget.hasAltivec()) {
551     // First set operation action for all vector types to expand. Then we
552     // will selectively turn on ones that can be effectively codegen'd.
553     for (MVT VT : MVT::vector_valuetypes()) {
554       // add/sub are legal for all supported vector VT's.
555       setOperationAction(ISD::ADD, VT, Legal);
556       setOperationAction(ISD::SUB, VT, Legal);
557 
558       // Vector instructions introduced in P8
559       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
560         setOperationAction(ISD::CTPOP, VT, Legal);
561         setOperationAction(ISD::CTLZ, VT, Legal);
562       }
563       else {
564         setOperationAction(ISD::CTPOP, VT, Expand);
565         setOperationAction(ISD::CTLZ, VT, Expand);
566       }
567 
568       // Vector instructions introduced in P9
569       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
570         setOperationAction(ISD::CTTZ, VT, Legal);
571       else
572         setOperationAction(ISD::CTTZ, VT, Expand);
573 
574       // We promote all shuffles to v16i8.
575       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
576       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
577 
578       // We promote all non-typed operations to v4i32.
579       setOperationAction(ISD::AND   , VT, Promote);
580       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
581       setOperationAction(ISD::OR    , VT, Promote);
582       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
583       setOperationAction(ISD::XOR   , VT, Promote);
584       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
585       setOperationAction(ISD::LOAD  , VT, Promote);
586       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
587       setOperationAction(ISD::SELECT, VT, Promote);
588       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
589       setOperationAction(ISD::SELECT_CC, VT, Promote);
590       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
591       setOperationAction(ISD::STORE, VT, Promote);
592       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
593 
594       // No other operations are legal.
595       setOperationAction(ISD::MUL , VT, Expand);
596       setOperationAction(ISD::SDIV, VT, Expand);
597       setOperationAction(ISD::SREM, VT, Expand);
598       setOperationAction(ISD::UDIV, VT, Expand);
599       setOperationAction(ISD::UREM, VT, Expand);
600       setOperationAction(ISD::FDIV, VT, Expand);
601       setOperationAction(ISD::FREM, VT, Expand);
602       setOperationAction(ISD::FNEG, VT, Expand);
603       setOperationAction(ISD::FSQRT, VT, Expand);
604       setOperationAction(ISD::FLOG, VT, Expand);
605       setOperationAction(ISD::FLOG10, VT, Expand);
606       setOperationAction(ISD::FLOG2, VT, Expand);
607       setOperationAction(ISD::FEXP, VT, Expand);
608       setOperationAction(ISD::FEXP2, VT, Expand);
609       setOperationAction(ISD::FSIN, VT, Expand);
610       setOperationAction(ISD::FCOS, VT, Expand);
611       setOperationAction(ISD::FABS, VT, Expand);
612       setOperationAction(ISD::FFLOOR, VT, Expand);
613       setOperationAction(ISD::FCEIL,  VT, Expand);
614       setOperationAction(ISD::FTRUNC, VT, Expand);
615       setOperationAction(ISD::FRINT,  VT, Expand);
616       setOperationAction(ISD::FNEARBYINT, VT, Expand);
617       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
618       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
619       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
620       setOperationAction(ISD::MULHU, VT, Expand);
621       setOperationAction(ISD::MULHS, VT, Expand);
622       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
623       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
624       setOperationAction(ISD::UDIVREM, VT, Expand);
625       setOperationAction(ISD::SDIVREM, VT, Expand);
626       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
627       setOperationAction(ISD::FPOW, VT, Expand);
628       setOperationAction(ISD::BSWAP, VT, Expand);
629       setOperationAction(ISD::VSELECT, VT, Expand);
630       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
631       setOperationAction(ISD::ROTL, VT, Expand);
632       setOperationAction(ISD::ROTR, VT, Expand);
633 
634       for (MVT InnerVT : MVT::vector_valuetypes()) {
635         setTruncStoreAction(VT, InnerVT, Expand);
636         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
637         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
638         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
639       }
640     }
641 
642     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
643     // with merges, splats, etc.
644     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
645 
646     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
647     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
648     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
649     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
650     setOperationAction(ISD::SELECT, MVT::v4i32,
651                        Subtarget.useCRBits() ? Legal : Expand);
652     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
653     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
654     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
655     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
656     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
657     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
658     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
659     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
660     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
661 
662     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
663     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
664     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
665     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
666 
667     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
668     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
669 
670     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
671       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
672       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
673     }
674 
675     if (Subtarget.hasP8Altivec())
676       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
677     else
678       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
679 
680     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
681     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
682 
683     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
684     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
685 
686     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
687     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
688     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
689     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
690 
691     // Altivec does not contain unordered floating-point compare instructions
692     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
693     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
694     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
695     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
696 
697     if (Subtarget.hasVSX()) {
698       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
699       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
700       if (Subtarget.hasP8Vector()) {
701         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
702         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
703       }
704       if (Subtarget.hasDirectMove() && isPPC64) {
705         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
706         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
707         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
708         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
709         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
710         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
711         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
712         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
713       }
714       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
715 
716       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
717       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
718       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
719       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
720       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
721 
722       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
723 
724       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
725       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
726 
727       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
728       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
729 
730       setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
731       setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
732       setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
733       setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
734       setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
735 
736       // Share the Altivec comparison restrictions.
737       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
738       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
739       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
740       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
741 
742       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
743       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
744 
745       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
746 
747       if (Subtarget.hasP8Vector())
748         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
749 
750       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
751 
752       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
753       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
754       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
755 
756       if (Subtarget.hasP8Altivec()) {
757         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
758         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
759         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
760 
761         // 128 bit shifts can be accomplished via 3 instructions for SHL and
762         // SRL, but not for SRA because of the instructions available:
763         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
764         // doing
765         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
766         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
767         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
768 
769         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
770       }
771       else {
772         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
773         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
774         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
775 
776         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
777 
778         // VSX v2i64 only supports non-arithmetic operations.
779         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
780         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
781       }
782 
783       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
784       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
785       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
786       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
787 
788       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
789 
790       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
791       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
792       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
793       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
794 
795       // Vector operation legalization checks the result type of
796       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
797       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
798       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
799       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
800       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
801 
802       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
803       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
804       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
805       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
806 
807       if (Subtarget.hasDirectMove())
808         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
809       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
810 
811       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
812     }
813 
814     if (Subtarget.hasP8Altivec()) {
815       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
816       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
817     }
818 
819     if (Subtarget.hasP9Vector()) {
820       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
821       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
822 
823       // 128 bit shifts can be accomplished via 3 instructions for SHL and
824       // SRL, but not for SRA because of the instructions available:
825       // VS{RL} and VS{RL}O.
826       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
827       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
828       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
829 
830       if (EnableQuadPrecision) {
831         addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
832         setOperationAction(ISD::FADD, MVT::f128, Legal);
833         setOperationAction(ISD::FSUB, MVT::f128, Legal);
834         setOperationAction(ISD::FDIV, MVT::f128, Legal);
835         setOperationAction(ISD::FMUL, MVT::f128, Legal);
836         setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
837         // No extending loads to f128 on PPC.
838         for (MVT FPT : MVT::fp_valuetypes())
839           setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
840         setOperationAction(ISD::FMA, MVT::f128, Legal);
841         setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
842         setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
843         setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
844         setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
845         setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
846         setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
847 
848         setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
849         setOperationAction(ISD::FRINT, MVT::f128, Legal);
850         setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
851         setOperationAction(ISD::FCEIL, MVT::f128, Legal);
852         setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
853         setOperationAction(ISD::FROUND, MVT::f128, Legal);
854 
855         setOperationAction(ISD::SELECT, MVT::f128, Expand);
856         setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
857         setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
858         setTruncStoreAction(MVT::f128, MVT::f64, Expand);
859         setTruncStoreAction(MVT::f128, MVT::f32, Expand);
860         setOperationAction(ISD::BITCAST, MVT::i128, Custom);
861         // No implementation for these ops for PowerPC.
862         setOperationAction(ISD::FSIN , MVT::f128, Expand);
863         setOperationAction(ISD::FCOS , MVT::f128, Expand);
864         setOperationAction(ISD::FPOW, MVT::f128, Expand);
865         setOperationAction(ISD::FPOWI, MVT::f128, Expand);
866         setOperationAction(ISD::FREM, MVT::f128, Expand);
867       }
868 
869     }
870 
871     if (Subtarget.hasP9Altivec()) {
872       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
873       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
874     }
875   }
876 
877   if (Subtarget.hasQPX()) {
878     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
879     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
880     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
881     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
882 
883     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
884     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
885 
886     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
887     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
888 
889     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
890     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
891 
892     if (!Subtarget.useCRBits())
893       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
894     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
895 
896     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
897     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
898     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
899     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
900     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
901     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
902     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
903 
904     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
905     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
906 
907     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
908     setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand);
909     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
910 
911     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
912     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
913     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
914     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
915     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
916     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
917     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
918     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
919     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
920     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
921 
922     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
923     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
924 
925     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
926     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
927 
928     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
929 
930     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
931     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
932     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
933     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
934 
935     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
936     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
937 
938     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
939     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
940 
941     if (!Subtarget.useCRBits())
942       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
943     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
944 
945     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
946     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
947     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
948     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
949     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
950     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
951     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
952 
953     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
954     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
955 
956     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
957     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
958     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
959     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
960     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
961     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
962     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
963     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
964     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
965     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
966 
967     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
968     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
969 
970     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
971     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
972 
973     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
974 
975     setOperationAction(ISD::AND , MVT::v4i1, Legal);
976     setOperationAction(ISD::OR , MVT::v4i1, Legal);
977     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
978 
979     if (!Subtarget.useCRBits())
980       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
981     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
982 
983     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
984     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
985 
986     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
987     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
988     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
989     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
990     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
991     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
992     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
993 
994     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
995     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
996 
997     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
998 
999     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1000     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1001     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1002     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1003 
1004     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1005     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1006     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1007     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1008 
1009     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1010     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1011 
1012     // These need to set FE_INEXACT, and so cannot be vectorized here.
1013     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1014     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1015 
1016     if (TM.Options.UnsafeFPMath) {
1017       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1018       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1019 
1020       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1021       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1022     } else {
1023       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1024       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1025 
1026       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1027       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1028     }
1029   }
1030 
1031   if (Subtarget.has64BitSupport())
1032     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1033 
1034   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1035 
1036   if (!isPPC64) {
1037     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1038     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1039   }
1040 
1041   setBooleanContents(ZeroOrOneBooleanContent);
1042 
1043   if (Subtarget.hasAltivec()) {
1044     // Altivec instructions set fields to all zeros or all ones.
1045     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1046   }
1047 
1048   if (!isPPC64) {
1049     // These libcalls are not available in 32-bit.
1050     setLibcallName(RTLIB::SHL_I128, nullptr);
1051     setLibcallName(RTLIB::SRL_I128, nullptr);
1052     setLibcallName(RTLIB::SRA_I128, nullptr);
1053   }
1054 
1055   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1056 
1057   // We have target-specific dag combine patterns for the following nodes:
1058   setTargetDAGCombine(ISD::SHL);
1059   setTargetDAGCombine(ISD::SRA);
1060   setTargetDAGCombine(ISD::SRL);
1061   setTargetDAGCombine(ISD::SINT_TO_FP);
1062   setTargetDAGCombine(ISD::BUILD_VECTOR);
1063   if (Subtarget.hasFPCVT())
1064     setTargetDAGCombine(ISD::UINT_TO_FP);
1065   setTargetDAGCombine(ISD::LOAD);
1066   setTargetDAGCombine(ISD::STORE);
1067   setTargetDAGCombine(ISD::BR_CC);
1068   if (Subtarget.useCRBits())
1069     setTargetDAGCombine(ISD::BRCOND);
1070   setTargetDAGCombine(ISD::BSWAP);
1071   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1072   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1073   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1074 
1075   setTargetDAGCombine(ISD::SIGN_EXTEND);
1076   setTargetDAGCombine(ISD::ZERO_EXTEND);
1077   setTargetDAGCombine(ISD::ANY_EXTEND);
1078 
1079   if (Subtarget.useCRBits()) {
1080     setTargetDAGCombine(ISD::TRUNCATE);
1081     setTargetDAGCombine(ISD::SETCC);
1082     setTargetDAGCombine(ISD::SELECT_CC);
1083   }
1084 
1085   // Use reciprocal estimates.
1086   if (TM.Options.UnsafeFPMath) {
1087     setTargetDAGCombine(ISD::FDIV);
1088     setTargetDAGCombine(ISD::FSQRT);
1089   }
1090 
1091   // Darwin long double math library functions have $LDBL128 appended.
1092   if (Subtarget.isDarwin()) {
1093     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1094     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1095     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1096     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1097     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1098     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1099     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1100     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1101     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1102     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1103   }
1104 
1105   if (EnableQuadPrecision) {
1106     setLibcallName(RTLIB::LOG_F128, "logf128");
1107     setLibcallName(RTLIB::LOG2_F128, "log2f128");
1108     setLibcallName(RTLIB::LOG10_F128, "log10f128");
1109     setLibcallName(RTLIB::EXP_F128, "expf128");
1110     setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1111     setLibcallName(RTLIB::SIN_F128, "sinf128");
1112     setLibcallName(RTLIB::COS_F128, "cosf128");
1113     setLibcallName(RTLIB::POW_F128, "powf128");
1114     setLibcallName(RTLIB::FMIN_F128, "fminf128");
1115     setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1116     setLibcallName(RTLIB::POWI_F128, "__powikf2");
1117     setLibcallName(RTLIB::REM_F128, "fmodf128");
1118   }
1119 
1120   // With 32 condition bits, we don't need to sink (and duplicate) compares
1121   // aggressively in CodeGenPrep.
1122   if (Subtarget.useCRBits()) {
1123     setHasMultipleConditionRegisters();
1124     setJumpIsExpensive();
1125   }
1126 
1127   setMinFunctionAlignment(2);
1128   if (Subtarget.isDarwin())
1129     setPrefFunctionAlignment(4);
1130 
1131   switch (Subtarget.getDarwinDirective()) {
1132   default: break;
1133   case PPC::DIR_970:
1134   case PPC::DIR_A2:
1135   case PPC::DIR_E500:
1136   case PPC::DIR_E500mc:
1137   case PPC::DIR_E5500:
1138   case PPC::DIR_PWR4:
1139   case PPC::DIR_PWR5:
1140   case PPC::DIR_PWR5X:
1141   case PPC::DIR_PWR6:
1142   case PPC::DIR_PWR6X:
1143   case PPC::DIR_PWR7:
1144   case PPC::DIR_PWR8:
1145   case PPC::DIR_PWR9:
1146     setPrefFunctionAlignment(4);
1147     setPrefLoopAlignment(4);
1148     break;
1149   }
1150 
1151   if (Subtarget.enableMachineScheduler())
1152     setSchedulingPreference(Sched::Source);
1153   else
1154     setSchedulingPreference(Sched::Hybrid);
1155 
1156   computeRegisterProperties(STI.getRegisterInfo());
1157 
1158   // The Freescale cores do better with aggressive inlining of memcpy and
1159   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1160   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
1161       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
1162     MaxStoresPerMemset = 32;
1163     MaxStoresPerMemsetOptSize = 16;
1164     MaxStoresPerMemcpy = 32;
1165     MaxStoresPerMemcpyOptSize = 8;
1166     MaxStoresPerMemmove = 32;
1167     MaxStoresPerMemmoveOptSize = 8;
1168   } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) {
1169     // The A2 also benefits from (very) aggressive inlining of memcpy and
1170     // friends. The overhead of a the function call, even when warm, can be
1171     // over one hundred cycles.
1172     MaxStoresPerMemset = 128;
1173     MaxStoresPerMemcpy = 128;
1174     MaxStoresPerMemmove = 128;
1175     MaxLoadsPerMemcmp = 128;
1176   } else {
1177     MaxLoadsPerMemcmp = 8;
1178     MaxLoadsPerMemcmpOptSize = 4;
1179   }
1180 }
1181 
1182 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1183 /// the desired ByVal argument alignment.
1184 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1185                              unsigned MaxMaxAlign) {
1186   if (MaxAlign == MaxMaxAlign)
1187     return;
1188   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1189     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1190       MaxAlign = 32;
1191     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1192       MaxAlign = 16;
1193   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1194     unsigned EltAlign = 0;
1195     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1196     if (EltAlign > MaxAlign)
1197       MaxAlign = EltAlign;
1198   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1199     for (auto *EltTy : STy->elements()) {
1200       unsigned EltAlign = 0;
1201       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1202       if (EltAlign > MaxAlign)
1203         MaxAlign = EltAlign;
1204       if (MaxAlign == MaxMaxAlign)
1205         break;
1206     }
1207   }
1208 }
1209 
1210 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1211 /// function arguments in the caller parameter area.
1212 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1213                                                   const DataLayout &DL) const {
1214   // Darwin passes everything on 4 byte boundary.
1215   if (Subtarget.isDarwin())
1216     return 4;
1217 
1218   // 16byte and wider vectors are passed on 16byte boundary.
1219   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1220   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1221   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1222     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1223   return Align;
1224 }
1225 
1226 unsigned PPCTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1227                                                           CallingConv:: ID CC,
1228                                                           EVT VT) const {
1229   if (Subtarget.hasSPE() && VT == MVT::f64)
1230     return 2;
1231   return PPCTargetLowering::getNumRegisters(Context, VT);
1232 }
1233 
1234 MVT PPCTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1235                                                      CallingConv:: ID CC,
1236                                                      EVT VT) const {
1237   if (Subtarget.hasSPE() && VT == MVT::f64)
1238     return MVT::i32;
1239   return PPCTargetLowering::getRegisterType(Context, VT);
1240 }
1241 
1242 bool PPCTargetLowering::useSoftFloat() const {
1243   return Subtarget.useSoftFloat();
1244 }
1245 
1246 bool PPCTargetLowering::hasSPE() const {
1247   return Subtarget.hasSPE();
1248 }
1249 
1250 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1251   switch ((PPCISD::NodeType)Opcode) {
1252   case PPCISD::FIRST_NUMBER:    break;
1253   case PPCISD::FSEL:            return "PPCISD::FSEL";
1254   case PPCISD::FCFID:           return "PPCISD::FCFID";
1255   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1256   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1257   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1258   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1259   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1260   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1261   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1262   case PPCISD::FP_TO_UINT_IN_VSR:
1263                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1264   case PPCISD::FP_TO_SINT_IN_VSR:
1265                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1266   case PPCISD::FRE:             return "PPCISD::FRE";
1267   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1268   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1269   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
1270   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
1271   case PPCISD::VPERM:           return "PPCISD::VPERM";
1272   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1273   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1274   case PPCISD::XXREVERSE:       return "PPCISD::XXREVERSE";
1275   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1276   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1277   case PPCISD::CMPB:            return "PPCISD::CMPB";
1278   case PPCISD::Hi:              return "PPCISD::Hi";
1279   case PPCISD::Lo:              return "PPCISD::Lo";
1280   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1281   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1282   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1283   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1284   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1285   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1286   case PPCISD::SRL:             return "PPCISD::SRL";
1287   case PPCISD::SRA:             return "PPCISD::SRA";
1288   case PPCISD::SHL:             return "PPCISD::SHL";
1289   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1290   case PPCISD::CALL:            return "PPCISD::CALL";
1291   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1292   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1293   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1294   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1295   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1296   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1297   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1298   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1299   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1300   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1301   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1302   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1303   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1304   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1305   case PPCISD::ANDIo_1_EQ_BIT:  return "PPCISD::ANDIo_1_EQ_BIT";
1306   case PPCISD::ANDIo_1_GT_BIT:  return "PPCISD::ANDIo_1_GT_BIT";
1307   case PPCISD::VCMP:            return "PPCISD::VCMP";
1308   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1309   case PPCISD::LBRX:            return "PPCISD::LBRX";
1310   case PPCISD::STBRX:           return "PPCISD::STBRX";
1311   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1312   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1313   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1314   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1315   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1316   case PPCISD::SExtVElems:      return "PPCISD::SExtVElems";
1317   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1318   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1319   case PPCISD::ST_VSR_SCAL_INT:
1320                                 return "PPCISD::ST_VSR_SCAL_INT";
1321   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1322   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1323   case PPCISD::BDZ:             return "PPCISD::BDZ";
1324   case PPCISD::MFFS:            return "PPCISD::MFFS";
1325   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1326   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1327   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1328   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1329   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1330   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1331   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1332   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1333   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1334   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1335   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1336   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1337   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1338   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1339   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1340   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1341   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1342   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1343   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1344   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1345   case PPCISD::SC:              return "PPCISD::SC";
1346   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1347   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1348   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1349   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1350   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1351   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1352   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1353   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1354   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1355   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1356   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1357   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1358   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1359   }
1360   return nullptr;
1361 }
1362 
1363 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1364                                           EVT VT) const {
1365   if (!VT.isVector())
1366     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1367 
1368   if (Subtarget.hasQPX())
1369     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1370 
1371   return VT.changeVectorElementTypeToInteger();
1372 }
1373 
1374 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1375   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1376   return true;
1377 }
1378 
1379 //===----------------------------------------------------------------------===//
1380 // Node matching predicates, for use by the tblgen matching code.
1381 //===----------------------------------------------------------------------===//
1382 
1383 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1384 static bool isFloatingPointZero(SDValue Op) {
1385   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1386     return CFP->getValueAPF().isZero();
1387   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1388     // Maybe this has already been legalized into the constant pool?
1389     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1390       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1391         return CFP->getValueAPF().isZero();
1392   }
1393   return false;
1394 }
1395 
1396 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1397 /// true if Op is undef or if it matches the specified value.
1398 static bool isConstantOrUndef(int Op, int Val) {
1399   return Op < 0 || Op == Val;
1400 }
1401 
1402 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1403 /// VPKUHUM instruction.
1404 /// The ShuffleKind distinguishes between big-endian operations with
1405 /// two different inputs (0), either-endian operations with two identical
1406 /// inputs (1), and little-endian operations with two different inputs (2).
1407 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1408 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1409                                SelectionDAG &DAG) {
1410   bool IsLE = DAG.getDataLayout().isLittleEndian();
1411   if (ShuffleKind == 0) {
1412     if (IsLE)
1413       return false;
1414     for (unsigned i = 0; i != 16; ++i)
1415       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1416         return false;
1417   } else if (ShuffleKind == 2) {
1418     if (!IsLE)
1419       return false;
1420     for (unsigned i = 0; i != 16; ++i)
1421       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1422         return false;
1423   } else if (ShuffleKind == 1) {
1424     unsigned j = IsLE ? 0 : 1;
1425     for (unsigned i = 0; i != 8; ++i)
1426       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1427           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1428         return false;
1429   }
1430   return true;
1431 }
1432 
1433 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1434 /// VPKUWUM instruction.
1435 /// The ShuffleKind distinguishes between big-endian operations with
1436 /// two different inputs (0), either-endian operations with two identical
1437 /// inputs (1), and little-endian operations with two different inputs (2).
1438 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1439 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1440                                SelectionDAG &DAG) {
1441   bool IsLE = DAG.getDataLayout().isLittleEndian();
1442   if (ShuffleKind == 0) {
1443     if (IsLE)
1444       return false;
1445     for (unsigned i = 0; i != 16; i += 2)
1446       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1447           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1448         return false;
1449   } else if (ShuffleKind == 2) {
1450     if (!IsLE)
1451       return false;
1452     for (unsigned i = 0; i != 16; i += 2)
1453       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1454           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1455         return false;
1456   } else if (ShuffleKind == 1) {
1457     unsigned j = IsLE ? 0 : 2;
1458     for (unsigned i = 0; i != 8; i += 2)
1459       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1460           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1461           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1462           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1463         return false;
1464   }
1465   return true;
1466 }
1467 
1468 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1469 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1470 /// current subtarget.
1471 ///
1472 /// The ShuffleKind distinguishes between big-endian operations with
1473 /// two different inputs (0), either-endian operations with two identical
1474 /// inputs (1), and little-endian operations with two different inputs (2).
1475 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1476 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1477                                SelectionDAG &DAG) {
1478   const PPCSubtarget& Subtarget =
1479     static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1480   if (!Subtarget.hasP8Vector())
1481     return false;
1482 
1483   bool IsLE = DAG.getDataLayout().isLittleEndian();
1484   if (ShuffleKind == 0) {
1485     if (IsLE)
1486       return false;
1487     for (unsigned i = 0; i != 16; i += 4)
1488       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1489           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1490           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1491           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1492         return false;
1493   } else if (ShuffleKind == 2) {
1494     if (!IsLE)
1495       return false;
1496     for (unsigned i = 0; i != 16; i += 4)
1497       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1498           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1499           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1500           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1501         return false;
1502   } else if (ShuffleKind == 1) {
1503     unsigned j = IsLE ? 0 : 4;
1504     for (unsigned i = 0; i != 8; i += 4)
1505       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1506           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1507           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1508           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1509           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1510           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1511           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1512           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1513         return false;
1514   }
1515   return true;
1516 }
1517 
1518 /// isVMerge - Common function, used to match vmrg* shuffles.
1519 ///
1520 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1521                      unsigned LHSStart, unsigned RHSStart) {
1522   if (N->getValueType(0) != MVT::v16i8)
1523     return false;
1524   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1525          "Unsupported merge size!");
1526 
1527   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1528     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1529       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1530                              LHSStart+j+i*UnitSize) ||
1531           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1532                              RHSStart+j+i*UnitSize))
1533         return false;
1534     }
1535   return true;
1536 }
1537 
1538 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1539 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1540 /// The ShuffleKind distinguishes between big-endian merges with two
1541 /// different inputs (0), either-endian merges with two identical inputs (1),
1542 /// and little-endian merges with two different inputs (2).  For the latter,
1543 /// the input operands are swapped (see PPCInstrAltivec.td).
1544 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1545                              unsigned ShuffleKind, SelectionDAG &DAG) {
1546   if (DAG.getDataLayout().isLittleEndian()) {
1547     if (ShuffleKind == 1) // unary
1548       return isVMerge(N, UnitSize, 0, 0);
1549     else if (ShuffleKind == 2) // swapped
1550       return isVMerge(N, UnitSize, 0, 16);
1551     else
1552       return false;
1553   } else {
1554     if (ShuffleKind == 1) // unary
1555       return isVMerge(N, UnitSize, 8, 8);
1556     else if (ShuffleKind == 0) // normal
1557       return isVMerge(N, UnitSize, 8, 24);
1558     else
1559       return false;
1560   }
1561 }
1562 
1563 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1564 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1565 /// The ShuffleKind distinguishes between big-endian merges with two
1566 /// different inputs (0), either-endian merges with two identical inputs (1),
1567 /// and little-endian merges with two different inputs (2).  For the latter,
1568 /// the input operands are swapped (see PPCInstrAltivec.td).
1569 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1570                              unsigned ShuffleKind, SelectionDAG &DAG) {
1571   if (DAG.getDataLayout().isLittleEndian()) {
1572     if (ShuffleKind == 1) // unary
1573       return isVMerge(N, UnitSize, 8, 8);
1574     else if (ShuffleKind == 2) // swapped
1575       return isVMerge(N, UnitSize, 8, 24);
1576     else
1577       return false;
1578   } else {
1579     if (ShuffleKind == 1) // unary
1580       return isVMerge(N, UnitSize, 0, 0);
1581     else if (ShuffleKind == 0) // normal
1582       return isVMerge(N, UnitSize, 0, 16);
1583     else
1584       return false;
1585   }
1586 }
1587 
1588 /**
1589  * Common function used to match vmrgew and vmrgow shuffles
1590  *
1591  * The indexOffset determines whether to look for even or odd words in
1592  * the shuffle mask. This is based on the of the endianness of the target
1593  * machine.
1594  *   - Little Endian:
1595  *     - Use offset of 0 to check for odd elements
1596  *     - Use offset of 4 to check for even elements
1597  *   - Big Endian:
1598  *     - Use offset of 0 to check for even elements
1599  *     - Use offset of 4 to check for odd elements
1600  * A detailed description of the vector element ordering for little endian and
1601  * big endian can be found at
1602  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1603  * Targeting your applications - what little endian and big endian IBM XL C/C++
1604  * compiler differences mean to you
1605  *
1606  * The mask to the shuffle vector instruction specifies the indices of the
1607  * elements from the two input vectors to place in the result. The elements are
1608  * numbered in array-access order, starting with the first vector. These vectors
1609  * are always of type v16i8, thus each vector will contain 16 elements of size
1610  * 8. More info on the shuffle vector can be found in the
1611  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1612  * Language Reference.
1613  *
1614  * The RHSStartValue indicates whether the same input vectors are used (unary)
1615  * or two different input vectors are used, based on the following:
1616  *   - If the instruction uses the same vector for both inputs, the range of the
1617  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1618  *     be 0.
1619  *   - If the instruction has two different vectors then the range of the
1620  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1621  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1622  *     to 31 specify elements in the second vector).
1623  *
1624  * \param[in] N The shuffle vector SD Node to analyze
1625  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1626  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1627  * vector to the shuffle_vector instruction
1628  * \return true iff this shuffle vector represents an even or odd word merge
1629  */
1630 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1631                      unsigned RHSStartValue) {
1632   if (N->getValueType(0) != MVT::v16i8)
1633     return false;
1634 
1635   for (unsigned i = 0; i < 2; ++i)
1636     for (unsigned j = 0; j < 4; ++j)
1637       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1638                              i*RHSStartValue+j+IndexOffset) ||
1639           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1640                              i*RHSStartValue+j+IndexOffset+8))
1641         return false;
1642   return true;
1643 }
1644 
1645 /**
1646  * Determine if the specified shuffle mask is suitable for the vmrgew or
1647  * vmrgow instructions.
1648  *
1649  * \param[in] N The shuffle vector SD Node to analyze
1650  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1651  * \param[in] ShuffleKind Identify the type of merge:
1652  *   - 0 = big-endian merge with two different inputs;
1653  *   - 1 = either-endian merge with two identical inputs;
1654  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1655  *     little-endian merges).
1656  * \param[in] DAG The current SelectionDAG
1657  * \return true iff this shuffle mask
1658  */
1659 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1660                               unsigned ShuffleKind, SelectionDAG &DAG) {
1661   if (DAG.getDataLayout().isLittleEndian()) {
1662     unsigned indexOffset = CheckEven ? 4 : 0;
1663     if (ShuffleKind == 1) // Unary
1664       return isVMerge(N, indexOffset, 0);
1665     else if (ShuffleKind == 2) // swapped
1666       return isVMerge(N, indexOffset, 16);
1667     else
1668       return false;
1669   }
1670   else {
1671     unsigned indexOffset = CheckEven ? 0 : 4;
1672     if (ShuffleKind == 1) // Unary
1673       return isVMerge(N, indexOffset, 0);
1674     else if (ShuffleKind == 0) // Normal
1675       return isVMerge(N, indexOffset, 16);
1676     else
1677       return false;
1678   }
1679   return false;
1680 }
1681 
1682 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1683 /// amount, otherwise return -1.
1684 /// The ShuffleKind distinguishes between big-endian operations with two
1685 /// different inputs (0), either-endian operations with two identical inputs
1686 /// (1), and little-endian operations with two different inputs (2).  For the
1687 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1688 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1689                              SelectionDAG &DAG) {
1690   if (N->getValueType(0) != MVT::v16i8)
1691     return -1;
1692 
1693   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1694 
1695   // Find the first non-undef value in the shuffle mask.
1696   unsigned i;
1697   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1698     /*search*/;
1699 
1700   if (i == 16) return -1;  // all undef.
1701 
1702   // Otherwise, check to see if the rest of the elements are consecutively
1703   // numbered from this value.
1704   unsigned ShiftAmt = SVOp->getMaskElt(i);
1705   if (ShiftAmt < i) return -1;
1706 
1707   ShiftAmt -= i;
1708   bool isLE = DAG.getDataLayout().isLittleEndian();
1709 
1710   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1711     // Check the rest of the elements to see if they are consecutive.
1712     for (++i; i != 16; ++i)
1713       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1714         return -1;
1715   } else if (ShuffleKind == 1) {
1716     // Check the rest of the elements to see if they are consecutive.
1717     for (++i; i != 16; ++i)
1718       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1719         return -1;
1720   } else
1721     return -1;
1722 
1723   if (isLE)
1724     ShiftAmt = 16 - ShiftAmt;
1725 
1726   return ShiftAmt;
1727 }
1728 
1729 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1730 /// specifies a splat of a single element that is suitable for input to
1731 /// VSPLTB/VSPLTH/VSPLTW.
1732 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1733   assert(N->getValueType(0) == MVT::v16i8 &&
1734          (EltSize == 1 || EltSize == 2 || EltSize == 4));
1735 
1736   // The consecutive indices need to specify an element, not part of two
1737   // different elements.  So abandon ship early if this isn't the case.
1738   if (N->getMaskElt(0) % EltSize != 0)
1739     return false;
1740 
1741   // This is a splat operation if each element of the permute is the same, and
1742   // if the value doesn't reference the second vector.
1743   unsigned ElementBase = N->getMaskElt(0);
1744 
1745   // FIXME: Handle UNDEF elements too!
1746   if (ElementBase >= 16)
1747     return false;
1748 
1749   // Check that the indices are consecutive, in the case of a multi-byte element
1750   // splatted with a v16i8 mask.
1751   for (unsigned i = 1; i != EltSize; ++i)
1752     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1753       return false;
1754 
1755   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1756     if (N->getMaskElt(i) < 0) continue;
1757     for (unsigned j = 0; j != EltSize; ++j)
1758       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1759         return false;
1760   }
1761   return true;
1762 }
1763 
1764 /// Check that the mask is shuffling N byte elements. Within each N byte
1765 /// element of the mask, the indices could be either in increasing or
1766 /// decreasing order as long as they are consecutive.
1767 /// \param[in] N the shuffle vector SD Node to analyze
1768 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1769 /// Word/DoubleWord/QuadWord).
1770 /// \param[in] StepLen the delta indices number among the N byte element, if
1771 /// the mask is in increasing/decreasing order then it is 1/-1.
1772 /// \return true iff the mask is shuffling N byte elements.
1773 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1774                                    int StepLen) {
1775   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1776          "Unexpected element width.");
1777   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1778 
1779   unsigned NumOfElem = 16 / Width;
1780   unsigned MaskVal[16]; //  Width is never greater than 16
1781   for (unsigned i = 0; i < NumOfElem; ++i) {
1782     MaskVal[0] = N->getMaskElt(i * Width);
1783     if ((StepLen == 1) && (MaskVal[0] % Width)) {
1784       return false;
1785     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1786       return false;
1787     }
1788 
1789     for (unsigned int j = 1; j < Width; ++j) {
1790       MaskVal[j] = N->getMaskElt(i * Width + j);
1791       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1792         return false;
1793       }
1794     }
1795   }
1796 
1797   return true;
1798 }
1799 
1800 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1801                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1802   if (!isNByteElemShuffleMask(N, 4, 1))
1803     return false;
1804 
1805   // Now we look at mask elements 0,4,8,12
1806   unsigned M0 = N->getMaskElt(0) / 4;
1807   unsigned M1 = N->getMaskElt(4) / 4;
1808   unsigned M2 = N->getMaskElt(8) / 4;
1809   unsigned M3 = N->getMaskElt(12) / 4;
1810   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1811   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1812 
1813   // Below, let H and L be arbitrary elements of the shuffle mask
1814   // where H is in the range [4,7] and L is in the range [0,3].
1815   // H, 1, 2, 3 or L, 5, 6, 7
1816   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1817       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1818     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1819     InsertAtByte = IsLE ? 12 : 0;
1820     Swap = M0 < 4;
1821     return true;
1822   }
1823   // 0, H, 2, 3 or 4, L, 6, 7
1824   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1825       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1826     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1827     InsertAtByte = IsLE ? 8 : 4;
1828     Swap = M1 < 4;
1829     return true;
1830   }
1831   // 0, 1, H, 3 or 4, 5, L, 7
1832   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1833       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1834     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1835     InsertAtByte = IsLE ? 4 : 8;
1836     Swap = M2 < 4;
1837     return true;
1838   }
1839   // 0, 1, 2, H or 4, 5, 6, L
1840   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1841       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1842     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1843     InsertAtByte = IsLE ? 0 : 12;
1844     Swap = M3 < 4;
1845     return true;
1846   }
1847 
1848   // If both vector operands for the shuffle are the same vector, the mask will
1849   // contain only elements from the first one and the second one will be undef.
1850   if (N->getOperand(1).isUndef()) {
1851     ShiftElts = 0;
1852     Swap = true;
1853     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1854     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1855       InsertAtByte = IsLE ? 12 : 0;
1856       return true;
1857     }
1858     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1859       InsertAtByte = IsLE ? 8 : 4;
1860       return true;
1861     }
1862     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1863       InsertAtByte = IsLE ? 4 : 8;
1864       return true;
1865     }
1866     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1867       InsertAtByte = IsLE ? 0 : 12;
1868       return true;
1869     }
1870   }
1871 
1872   return false;
1873 }
1874 
1875 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1876                                bool &Swap, bool IsLE) {
1877   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1878   // Ensure each byte index of the word is consecutive.
1879   if (!isNByteElemShuffleMask(N, 4, 1))
1880     return false;
1881 
1882   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1883   unsigned M0 = N->getMaskElt(0) / 4;
1884   unsigned M1 = N->getMaskElt(4) / 4;
1885   unsigned M2 = N->getMaskElt(8) / 4;
1886   unsigned M3 = N->getMaskElt(12) / 4;
1887 
1888   // If both vector operands for the shuffle are the same vector, the mask will
1889   // contain only elements from the first one and the second one will be undef.
1890   if (N->getOperand(1).isUndef()) {
1891     assert(M0 < 4 && "Indexing into an undef vector?");
1892     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1893       return false;
1894 
1895     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
1896     Swap = false;
1897     return true;
1898   }
1899 
1900   // Ensure each word index of the ShuffleVector Mask is consecutive.
1901   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
1902     return false;
1903 
1904   if (IsLE) {
1905     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
1906       // Input vectors don't need to be swapped if the leading element
1907       // of the result is one of the 3 left elements of the second vector
1908       // (or if there is no shift to be done at all).
1909       Swap = false;
1910       ShiftElts = (8 - M0) % 8;
1911     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
1912       // Input vectors need to be swapped if the leading element
1913       // of the result is one of the 3 left elements of the first vector
1914       // (or if we're shifting by 4 - thereby simply swapping the vectors).
1915       Swap = true;
1916       ShiftElts = (4 - M0) % 4;
1917     }
1918 
1919     return true;
1920   } else {                                          // BE
1921     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
1922       // Input vectors don't need to be swapped if the leading element
1923       // of the result is one of the 4 elements of the first vector.
1924       Swap = false;
1925       ShiftElts = M0;
1926     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
1927       // Input vectors need to be swapped if the leading element
1928       // of the result is one of the 4 elements of the right vector.
1929       Swap = true;
1930       ShiftElts = M0 - 4;
1931     }
1932 
1933     return true;
1934   }
1935 }
1936 
1937 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
1938   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1939 
1940   if (!isNByteElemShuffleMask(N, Width, -1))
1941     return false;
1942 
1943   for (int i = 0; i < 16; i += Width)
1944     if (N->getMaskElt(i) != i + Width - 1)
1945       return false;
1946 
1947   return true;
1948 }
1949 
1950 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
1951   return isXXBRShuffleMaskHelper(N, 2);
1952 }
1953 
1954 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
1955   return isXXBRShuffleMaskHelper(N, 4);
1956 }
1957 
1958 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
1959   return isXXBRShuffleMaskHelper(N, 8);
1960 }
1961 
1962 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
1963   return isXXBRShuffleMaskHelper(N, 16);
1964 }
1965 
1966 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
1967 /// if the inputs to the instruction should be swapped and set \p DM to the
1968 /// value for the immediate.
1969 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
1970 /// AND element 0 of the result comes from the first input (LE) or second input
1971 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
1972 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
1973 /// mask.
1974 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
1975                                bool &Swap, bool IsLE) {
1976   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1977 
1978   // Ensure each byte index of the double word is consecutive.
1979   if (!isNByteElemShuffleMask(N, 8, 1))
1980     return false;
1981 
1982   unsigned M0 = N->getMaskElt(0) / 8;
1983   unsigned M1 = N->getMaskElt(8) / 8;
1984   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
1985 
1986   // If both vector operands for the shuffle are the same vector, the mask will
1987   // contain only elements from the first one and the second one will be undef.
1988   if (N->getOperand(1).isUndef()) {
1989     if ((M0 | M1) < 2) {
1990       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
1991       Swap = false;
1992       return true;
1993     } else
1994       return false;
1995   }
1996 
1997   if (IsLE) {
1998     if (M0 > 1 && M1 < 2) {
1999       Swap = false;
2000     } else if (M0 < 2 && M1 > 1) {
2001       M0 = (M0 + 2) % 4;
2002       M1 = (M1 + 2) % 4;
2003       Swap = true;
2004     } else
2005       return false;
2006 
2007     // Note: if control flow comes here that means Swap is already set above
2008     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2009     return true;
2010   } else { // BE
2011     if (M0 < 2 && M1 > 1) {
2012       Swap = false;
2013     } else if (M0 > 1 && M1 < 2) {
2014       M0 = (M0 + 2) % 4;
2015       M1 = (M1 + 2) % 4;
2016       Swap = true;
2017     } else
2018       return false;
2019 
2020     // Note: if control flow comes here that means Swap is already set above
2021     DM = (M0 << 1) + (M1 & 1);
2022     return true;
2023   }
2024 }
2025 
2026 
2027 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
2028 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
2029 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
2030                                 SelectionDAG &DAG) {
2031   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2032   assert(isSplatShuffleMask(SVOp, EltSize));
2033   if (DAG.getDataLayout().isLittleEndian())
2034     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2035   else
2036     return SVOp->getMaskElt(0) / EltSize;
2037 }
2038 
2039 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2040 /// by using a vspltis[bhw] instruction of the specified element size, return
2041 /// the constant being splatted.  The ByteSize field indicates the number of
2042 /// bytes of each element [124] -> [bhw].
2043 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2044   SDValue OpVal(nullptr, 0);
2045 
2046   // If ByteSize of the splat is bigger than the element size of the
2047   // build_vector, then we have a case where we are checking for a splat where
2048   // multiple elements of the buildvector are folded together into a single
2049   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2050   unsigned EltSize = 16/N->getNumOperands();
2051   if (EltSize < ByteSize) {
2052     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2053     SDValue UniquedVals[4];
2054     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2055 
2056     // See if all of the elements in the buildvector agree across.
2057     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2058       if (N->getOperand(i).isUndef()) continue;
2059       // If the element isn't a constant, bail fully out.
2060       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2061 
2062       if (!UniquedVals[i&(Multiple-1)].getNode())
2063         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2064       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2065         return SDValue();  // no match.
2066     }
2067 
2068     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2069     // either constant or undef values that are identical for each chunk.  See
2070     // if these chunks can form into a larger vspltis*.
2071 
2072     // Check to see if all of the leading entries are either 0 or -1.  If
2073     // neither, then this won't fit into the immediate field.
2074     bool LeadingZero = true;
2075     bool LeadingOnes = true;
2076     for (unsigned i = 0; i != Multiple-1; ++i) {
2077       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2078 
2079       LeadingZero &= isNullConstant(UniquedVals[i]);
2080       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2081     }
2082     // Finally, check the least significant entry.
2083     if (LeadingZero) {
2084       if (!UniquedVals[Multiple-1].getNode())
2085         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2086       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2087       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2088         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2089     }
2090     if (LeadingOnes) {
2091       if (!UniquedVals[Multiple-1].getNode())
2092         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2093       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2094       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2095         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2096     }
2097 
2098     return SDValue();
2099   }
2100 
2101   // Check to see if this buildvec has a single non-undef value in its elements.
2102   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2103     if (N->getOperand(i).isUndef()) continue;
2104     if (!OpVal.getNode())
2105       OpVal = N->getOperand(i);
2106     else if (OpVal != N->getOperand(i))
2107       return SDValue();
2108   }
2109 
2110   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2111 
2112   unsigned ValSizeInBytes = EltSize;
2113   uint64_t Value = 0;
2114   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2115     Value = CN->getZExtValue();
2116   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2117     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2118     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2119   }
2120 
2121   // If the splat value is larger than the element value, then we can never do
2122   // this splat.  The only case that we could fit the replicated bits into our
2123   // immediate field for would be zero, and we prefer to use vxor for it.
2124   if (ValSizeInBytes < ByteSize) return SDValue();
2125 
2126   // If the element value is larger than the splat value, check if it consists
2127   // of a repeated bit pattern of size ByteSize.
2128   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2129     return SDValue();
2130 
2131   // Properly sign extend the value.
2132   int MaskVal = SignExtend32(Value, ByteSize * 8);
2133 
2134   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2135   if (MaskVal == 0) return SDValue();
2136 
2137   // Finally, if this value fits in a 5 bit sext field, return it
2138   if (SignExtend32<5>(MaskVal) == MaskVal)
2139     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2140   return SDValue();
2141 }
2142 
2143 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2144 /// amount, otherwise return -1.
2145 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2146   EVT VT = N->getValueType(0);
2147   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2148     return -1;
2149 
2150   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2151 
2152   // Find the first non-undef value in the shuffle mask.
2153   unsigned i;
2154   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2155     /*search*/;
2156 
2157   if (i == 4) return -1;  // all undef.
2158 
2159   // Otherwise, check to see if the rest of the elements are consecutively
2160   // numbered from this value.
2161   unsigned ShiftAmt = SVOp->getMaskElt(i);
2162   if (ShiftAmt < i) return -1;
2163   ShiftAmt -= i;
2164 
2165   // Check the rest of the elements to see if they are consecutive.
2166   for (++i; i != 4; ++i)
2167     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2168       return -1;
2169 
2170   return ShiftAmt;
2171 }
2172 
2173 //===----------------------------------------------------------------------===//
2174 //  Addressing Mode Selection
2175 //===----------------------------------------------------------------------===//
2176 
2177 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2178 /// or 64-bit immediate, and if the value can be accurately represented as a
2179 /// sign extension from a 16-bit value.  If so, this returns true and the
2180 /// immediate.
2181 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2182   if (!isa<ConstantSDNode>(N))
2183     return false;
2184 
2185   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2186   if (N->getValueType(0) == MVT::i32)
2187     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2188   else
2189     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2190 }
2191 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2192   return isIntS16Immediate(Op.getNode(), Imm);
2193 }
2194 
2195 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2196 /// can be represented as an indexed [r+r] operation.  Returns false if it
2197 /// can be more efficiently represented with [r+imm].
2198 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2199                                             SDValue &Index,
2200                                             SelectionDAG &DAG) const {
2201   int16_t imm = 0;
2202   if (N.getOpcode() == ISD::ADD) {
2203     if (isIntS16Immediate(N.getOperand(1), imm))
2204       return false;    // r+i
2205     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2206       return false;    // r+i
2207 
2208     Base = N.getOperand(0);
2209     Index = N.getOperand(1);
2210     return true;
2211   } else if (N.getOpcode() == ISD::OR) {
2212     if (isIntS16Immediate(N.getOperand(1), imm))
2213       return false;    // r+i can fold it if we can.
2214 
2215     // If this is an or of disjoint bitfields, we can codegen this as an add
2216     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2217     // disjoint.
2218     KnownBits LHSKnown, RHSKnown;
2219     DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2220 
2221     if (LHSKnown.Zero.getBoolValue()) {
2222       DAG.computeKnownBits(N.getOperand(1), RHSKnown);
2223       // If all of the bits are known zero on the LHS or RHS, the add won't
2224       // carry.
2225       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2226         Base = N.getOperand(0);
2227         Index = N.getOperand(1);
2228         return true;
2229       }
2230     }
2231   }
2232 
2233   return false;
2234 }
2235 
2236 // If we happen to be doing an i64 load or store into a stack slot that has
2237 // less than a 4-byte alignment, then the frame-index elimination may need to
2238 // use an indexed load or store instruction (because the offset may not be a
2239 // multiple of 4). The extra register needed to hold the offset comes from the
2240 // register scavenger, and it is possible that the scavenger will need to use
2241 // an emergency spill slot. As a result, we need to make sure that a spill slot
2242 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2243 // stack slot.
2244 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2245   // FIXME: This does not handle the LWA case.
2246   if (VT != MVT::i64)
2247     return;
2248 
2249   // NOTE: We'll exclude negative FIs here, which come from argument
2250   // lowering, because there are no known test cases triggering this problem
2251   // using packed structures (or similar). We can remove this exclusion if
2252   // we find such a test case. The reason why this is so test-case driven is
2253   // because this entire 'fixup' is only to prevent crashes (from the
2254   // register scavenger) on not-really-valid inputs. For example, if we have:
2255   //   %a = alloca i1
2256   //   %b = bitcast i1* %a to i64*
2257   //   store i64* a, i64 b
2258   // then the store should really be marked as 'align 1', but is not. If it
2259   // were marked as 'align 1' then the indexed form would have been
2260   // instruction-selected initially, and the problem this 'fixup' is preventing
2261   // won't happen regardless.
2262   if (FrameIdx < 0)
2263     return;
2264 
2265   MachineFunction &MF = DAG.getMachineFunction();
2266   MachineFrameInfo &MFI = MF.getFrameInfo();
2267 
2268   unsigned Align = MFI.getObjectAlignment(FrameIdx);
2269   if (Align >= 4)
2270     return;
2271 
2272   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2273   FuncInfo->setHasNonRISpills();
2274 }
2275 
2276 /// Returns true if the address N can be represented by a base register plus
2277 /// a signed 16-bit displacement [r+imm], and if it is not better
2278 /// represented as reg+reg.  If \p Alignment is non-zero, only accept
2279 /// displacements that are multiples of that value.
2280 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2281                                             SDValue &Base,
2282                                             SelectionDAG &DAG,
2283                                             unsigned Alignment) const {
2284   // FIXME dl should come from parent load or store, not from address
2285   SDLoc dl(N);
2286   // If this can be more profitably realized as r+r, fail.
2287   if (SelectAddressRegReg(N, Disp, Base, DAG))
2288     return false;
2289 
2290   if (N.getOpcode() == ISD::ADD) {
2291     int16_t imm = 0;
2292     if (isIntS16Immediate(N.getOperand(1), imm) &&
2293         (!Alignment || (imm % Alignment) == 0)) {
2294       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2295       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2296         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2297         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2298       } else {
2299         Base = N.getOperand(0);
2300       }
2301       return true; // [r+i]
2302     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2303       // Match LOAD (ADD (X, Lo(G))).
2304       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2305              && "Cannot handle constant offsets yet!");
2306       Disp = N.getOperand(1).getOperand(0);  // The global address.
2307       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2308              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2309              Disp.getOpcode() == ISD::TargetConstantPool ||
2310              Disp.getOpcode() == ISD::TargetJumpTable);
2311       Base = N.getOperand(0);
2312       return true;  // [&g+r]
2313     }
2314   } else if (N.getOpcode() == ISD::OR) {
2315     int16_t imm = 0;
2316     if (isIntS16Immediate(N.getOperand(1), imm) &&
2317         (!Alignment || (imm % Alignment) == 0)) {
2318       // If this is an or of disjoint bitfields, we can codegen this as an add
2319       // (for better address arithmetic) if the LHS and RHS of the OR are
2320       // provably disjoint.
2321       KnownBits LHSKnown;
2322       DAG.computeKnownBits(N.getOperand(0), LHSKnown);
2323 
2324       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2325         // If all of the bits are known zero on the LHS or RHS, the add won't
2326         // carry.
2327         if (FrameIndexSDNode *FI =
2328               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2329           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2330           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2331         } else {
2332           Base = N.getOperand(0);
2333         }
2334         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2335         return true;
2336       }
2337     }
2338   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2339     // Loading from a constant address.
2340 
2341     // If this address fits entirely in a 16-bit sext immediate field, codegen
2342     // this as "d, 0"
2343     int16_t Imm;
2344     if (isIntS16Immediate(CN, Imm) && (!Alignment || (Imm % Alignment) == 0)) {
2345       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2346       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2347                              CN->getValueType(0));
2348       return true;
2349     }
2350 
2351     // Handle 32-bit sext immediates with LIS + addr mode.
2352     if ((CN->getValueType(0) == MVT::i32 ||
2353          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2354         (!Alignment || (CN->getZExtValue() % Alignment) == 0)) {
2355       int Addr = (int)CN->getZExtValue();
2356 
2357       // Otherwise, break this down into an LIS + disp.
2358       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2359 
2360       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2361                                    MVT::i32);
2362       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2363       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2364       return true;
2365     }
2366   }
2367 
2368   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2369   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2370     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2371     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2372   } else
2373     Base = N;
2374   return true;      // [r+0]
2375 }
2376 
2377 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2378 /// represented as an indexed [r+r] operation.
2379 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2380                                                 SDValue &Index,
2381                                                 SelectionDAG &DAG) const {
2382   // Check to see if we can easily represent this as an [r+r] address.  This
2383   // will fail if it thinks that the address is more profitably represented as
2384   // reg+imm, e.g. where imm = 0.
2385   if (SelectAddressRegReg(N, Base, Index, DAG))
2386     return true;
2387 
2388   // If the address is the result of an add, we will utilize the fact that the
2389   // address calculation includes an implicit add.  However, we can reduce
2390   // register pressure if we do not materialize a constant just for use as the
2391   // index register.  We only get rid of the add if it is not an add of a
2392   // value and a 16-bit signed constant and both have a single use.
2393   int16_t imm = 0;
2394   if (N.getOpcode() == ISD::ADD &&
2395       (!isIntS16Immediate(N.getOperand(1), imm) ||
2396        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2397     Base = N.getOperand(0);
2398     Index = N.getOperand(1);
2399     return true;
2400   }
2401 
2402   // Otherwise, do it the hard way, using R0 as the base register.
2403   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2404                          N.getValueType());
2405   Index = N;
2406   return true;
2407 }
2408 
2409 /// Returns true if we should use a direct load into vector instruction
2410 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2411 static bool usePartialVectorLoads(SDNode *N) {
2412   if (!N->hasOneUse())
2413     return false;
2414 
2415   // If there are any other uses other than scalar to vector, then we should
2416   // keep it as a scalar load -> direct move pattern to prevent multiple
2417   // loads.  Currently, only check for i64 since we have lxsd/lfd to do this
2418   // efficiently, but no update equivalent.
2419   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2420     EVT MemVT = LD->getMemoryVT();
2421     if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) {
2422       SDNode *User = *(LD->use_begin());
2423       if (User->getOpcode() == ISD::SCALAR_TO_VECTOR)
2424         return true;
2425     }
2426   }
2427 
2428   return false;
2429 }
2430 
2431 /// getPreIndexedAddressParts - returns true by value, base pointer and
2432 /// offset pointer and addressing mode by reference if the node's address
2433 /// can be legally represented as pre-indexed load / store address.
2434 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2435                                                   SDValue &Offset,
2436                                                   ISD::MemIndexedMode &AM,
2437                                                   SelectionDAG &DAG) const {
2438   if (DisablePPCPreinc) return false;
2439 
2440   bool isLoad = true;
2441   SDValue Ptr;
2442   EVT VT;
2443   unsigned Alignment;
2444   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2445     Ptr = LD->getBasePtr();
2446     VT = LD->getMemoryVT();
2447     Alignment = LD->getAlignment();
2448   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2449     Ptr = ST->getBasePtr();
2450     VT  = ST->getMemoryVT();
2451     Alignment = ST->getAlignment();
2452     isLoad = false;
2453   } else
2454     return false;
2455 
2456   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2457   // instructions because we can fold these into a more efficient instruction
2458   // instead, (such as LXSD).
2459   if (isLoad && usePartialVectorLoads(N)) {
2460     return false;
2461   }
2462 
2463   // PowerPC doesn't have preinc load/store instructions for vectors (except
2464   // for QPX, which does have preinc r+r forms).
2465   if (VT.isVector()) {
2466     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2467       return false;
2468     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2469       AM = ISD::PRE_INC;
2470       return true;
2471     }
2472   }
2473 
2474   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2475     // Common code will reject creating a pre-inc form if the base pointer
2476     // is a frame index, or if N is a store and the base pointer is either
2477     // the same as or a predecessor of the value being stored.  Check for
2478     // those situations here, and try with swapped Base/Offset instead.
2479     bool Swap = false;
2480 
2481     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2482       Swap = true;
2483     else if (!isLoad) {
2484       SDValue Val = cast<StoreSDNode>(N)->getValue();
2485       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2486         Swap = true;
2487     }
2488 
2489     if (Swap)
2490       std::swap(Base, Offset);
2491 
2492     AM = ISD::PRE_INC;
2493     return true;
2494   }
2495 
2496   // LDU/STU can only handle immediates that are a multiple of 4.
2497   if (VT != MVT::i64) {
2498     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2499       return false;
2500   } else {
2501     // LDU/STU need an address with at least 4-byte alignment.
2502     if (Alignment < 4)
2503       return false;
2504 
2505     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2506       return false;
2507   }
2508 
2509   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2510     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2511     // sext i32 to i64 when addr mode is r+i.
2512     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2513         LD->getExtensionType() == ISD::SEXTLOAD &&
2514         isa<ConstantSDNode>(Offset))
2515       return false;
2516   }
2517 
2518   AM = ISD::PRE_INC;
2519   return true;
2520 }
2521 
2522 //===----------------------------------------------------------------------===//
2523 //  LowerOperation implementation
2524 //===----------------------------------------------------------------------===//
2525 
2526 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2527 /// and LoOpFlags to the target MO flags.
2528 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2529                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2530                                const GlobalValue *GV = nullptr) {
2531   HiOpFlags = PPCII::MO_HA;
2532   LoOpFlags = PPCII::MO_LO;
2533 
2534   // Don't use the pic base if not in PIC relocation model.
2535   if (IsPIC) {
2536     HiOpFlags |= PPCII::MO_PIC_FLAG;
2537     LoOpFlags |= PPCII::MO_PIC_FLAG;
2538   }
2539 
2540   // If this is a reference to a global value that requires a non-lazy-ptr, make
2541   // sure that instruction lowering adds it.
2542   if (GV && Subtarget.hasLazyResolverStub(GV)) {
2543     HiOpFlags |= PPCII::MO_NLP_FLAG;
2544     LoOpFlags |= PPCII::MO_NLP_FLAG;
2545 
2546     if (GV->hasHiddenVisibility()) {
2547       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2548       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2549     }
2550   }
2551 }
2552 
2553 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2554                              SelectionDAG &DAG) {
2555   SDLoc DL(HiPart);
2556   EVT PtrVT = HiPart.getValueType();
2557   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2558 
2559   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2560   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2561 
2562   // With PIC, the first instruction is actually "GR+hi(&G)".
2563   if (isPIC)
2564     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2565                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2566 
2567   // Generate non-pic code that has direct accesses to the constant pool.
2568   // The address of the global is just (hi(&g)+lo(&g)).
2569   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2570 }
2571 
2572 static void setUsesTOCBasePtr(MachineFunction &MF) {
2573   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2574   FuncInfo->setUsesTOCBasePtr();
2575 }
2576 
2577 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2578   setUsesTOCBasePtr(DAG.getMachineFunction());
2579 }
2580 
2581 static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit,
2582                            SDValue GA) {
2583   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2584   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
2585                 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2586 
2587   SDValue Ops[] = { GA, Reg };
2588   return DAG.getMemIntrinsicNode(
2589       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2590       MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2591       MachineMemOperand::MOLoad);
2592 }
2593 
2594 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2595                                              SelectionDAG &DAG) const {
2596   EVT PtrVT = Op.getValueType();
2597   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2598   const Constant *C = CP->getConstVal();
2599 
2600   // 64-bit SVR4 ABI code is always position-independent.
2601   // The actual address of the GlobalValue is stored in the TOC.
2602   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2603     setUsesTOCBasePtr(DAG);
2604     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2605     return getTOCEntry(DAG, SDLoc(CP), true, GA);
2606   }
2607 
2608   unsigned MOHiFlag, MOLoFlag;
2609   bool IsPIC = isPositionIndependent();
2610   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2611 
2612   if (IsPIC && Subtarget.isSVR4ABI()) {
2613     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2614                                            PPCII::MO_PIC_FLAG);
2615     return getTOCEntry(DAG, SDLoc(CP), false, GA);
2616   }
2617 
2618   SDValue CPIHi =
2619     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2620   SDValue CPILo =
2621     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2622   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2623 }
2624 
2625 // For 64-bit PowerPC, prefer the more compact relative encodings.
2626 // This trades 32 bits per jump table entry for one or two instructions
2627 // on the jump site.
2628 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2629   if (isJumpTableRelative())
2630     return MachineJumpTableInfo::EK_LabelDifference32;
2631 
2632   return TargetLowering::getJumpTableEncoding();
2633 }
2634 
2635 bool PPCTargetLowering::isJumpTableRelative() const {
2636   if (Subtarget.isPPC64())
2637     return true;
2638   return TargetLowering::isJumpTableRelative();
2639 }
2640 
2641 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2642                                                     SelectionDAG &DAG) const {
2643   if (!Subtarget.isPPC64())
2644     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2645 
2646   switch (getTargetMachine().getCodeModel()) {
2647   case CodeModel::Small:
2648   case CodeModel::Medium:
2649     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2650   default:
2651     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2652                        getPointerTy(DAG.getDataLayout()));
2653   }
2654 }
2655 
2656 const MCExpr *
2657 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2658                                                 unsigned JTI,
2659                                                 MCContext &Ctx) const {
2660   if (!Subtarget.isPPC64())
2661     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2662 
2663   switch (getTargetMachine().getCodeModel()) {
2664   case CodeModel::Small:
2665   case CodeModel::Medium:
2666     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2667   default:
2668     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2669   }
2670 }
2671 
2672 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2673   EVT PtrVT = Op.getValueType();
2674   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2675 
2676   // 64-bit SVR4 ABI code is always position-independent.
2677   // The actual address of the GlobalValue is stored in the TOC.
2678   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2679     setUsesTOCBasePtr(DAG);
2680     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2681     return getTOCEntry(DAG, SDLoc(JT), true, GA);
2682   }
2683 
2684   unsigned MOHiFlag, MOLoFlag;
2685   bool IsPIC = isPositionIndependent();
2686   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2687 
2688   if (IsPIC && Subtarget.isSVR4ABI()) {
2689     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2690                                         PPCII::MO_PIC_FLAG);
2691     return getTOCEntry(DAG, SDLoc(GA), false, GA);
2692   }
2693 
2694   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2695   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2696   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2697 }
2698 
2699 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2700                                              SelectionDAG &DAG) const {
2701   EVT PtrVT = Op.getValueType();
2702   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2703   const BlockAddress *BA = BASDN->getBlockAddress();
2704 
2705   // 64-bit SVR4 ABI code is always position-independent.
2706   // The actual BlockAddress is stored in the TOC.
2707   if (Subtarget.isSVR4ABI() && isPositionIndependent()) {
2708     if (Subtarget.isPPC64())
2709       setUsesTOCBasePtr(DAG);
2710     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2711     return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA);
2712   }
2713 
2714   unsigned MOHiFlag, MOLoFlag;
2715   bool IsPIC = isPositionIndependent();
2716   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2717   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2718   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2719   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2720 }
2721 
2722 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2723                                               SelectionDAG &DAG) const {
2724   // FIXME: TLS addresses currently use medium model code sequences,
2725   // which is the most useful form.  Eventually support for small and
2726   // large models could be added if users need it, at the cost of
2727   // additional complexity.
2728   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2729   if (DAG.getTarget().useEmulatedTLS())
2730     return LowerToTLSEmulatedModel(GA, DAG);
2731 
2732   SDLoc dl(GA);
2733   const GlobalValue *GV = GA->getGlobal();
2734   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2735   bool is64bit = Subtarget.isPPC64();
2736   const Module *M = DAG.getMachineFunction().getFunction().getParent();
2737   PICLevel::Level picLevel = M->getPICLevel();
2738 
2739   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
2740 
2741   if (Model == TLSModel::LocalExec) {
2742     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2743                                                PPCII::MO_TPREL_HA);
2744     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2745                                                PPCII::MO_TPREL_LO);
2746     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2747                              : DAG.getRegister(PPC::R2, MVT::i32);
2748 
2749     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2750     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2751   }
2752 
2753   if (Model == TLSModel::InitialExec) {
2754     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2755     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2756                                                 PPCII::MO_TLS);
2757     SDValue GOTPtr;
2758     if (is64bit) {
2759       setUsesTOCBasePtr(DAG);
2760       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2761       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2762                            PtrVT, GOTReg, TGA);
2763     } else
2764       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2765     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2766                                    PtrVT, TGA, GOTPtr);
2767     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2768   }
2769 
2770   if (Model == TLSModel::GeneralDynamic) {
2771     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2772     SDValue GOTPtr;
2773     if (is64bit) {
2774       setUsesTOCBasePtr(DAG);
2775       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2776       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2777                                    GOTReg, TGA);
2778     } else {
2779       if (picLevel == PICLevel::SmallPIC)
2780         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2781       else
2782         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2783     }
2784     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2785                        GOTPtr, TGA, TGA);
2786   }
2787 
2788   if (Model == TLSModel::LocalDynamic) {
2789     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2790     SDValue GOTPtr;
2791     if (is64bit) {
2792       setUsesTOCBasePtr(DAG);
2793       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2794       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2795                            GOTReg, TGA);
2796     } else {
2797       if (picLevel == PICLevel::SmallPIC)
2798         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2799       else
2800         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2801     }
2802     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2803                                   PtrVT, GOTPtr, TGA, TGA);
2804     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2805                                       PtrVT, TLSAddr, TGA);
2806     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2807   }
2808 
2809   llvm_unreachable("Unknown TLS model!");
2810 }
2811 
2812 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2813                                               SelectionDAG &DAG) const {
2814   EVT PtrVT = Op.getValueType();
2815   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2816   SDLoc DL(GSDN);
2817   const GlobalValue *GV = GSDN->getGlobal();
2818 
2819   // 64-bit SVR4 ABI code is always position-independent.
2820   // The actual address of the GlobalValue is stored in the TOC.
2821   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
2822     setUsesTOCBasePtr(DAG);
2823     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2824     return getTOCEntry(DAG, DL, true, GA);
2825   }
2826 
2827   unsigned MOHiFlag, MOLoFlag;
2828   bool IsPIC = isPositionIndependent();
2829   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2830 
2831   if (IsPIC && Subtarget.isSVR4ABI()) {
2832     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
2833                                             GSDN->getOffset(),
2834                                             PPCII::MO_PIC_FLAG);
2835     return getTOCEntry(DAG, DL, false, GA);
2836   }
2837 
2838   SDValue GAHi =
2839     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
2840   SDValue GALo =
2841     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
2842 
2843   SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
2844 
2845   // If the global reference is actually to a non-lazy-pointer, we have to do an
2846   // extra load to get the address of the global.
2847   if (MOHiFlag & PPCII::MO_NLP_FLAG)
2848     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2849   return Ptr;
2850 }
2851 
2852 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2853   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2854   SDLoc dl(Op);
2855 
2856   if (Op.getValueType() == MVT::v2i64) {
2857     // When the operands themselves are v2i64 values, we need to do something
2858     // special because VSX has no underlying comparison operations for these.
2859     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
2860       // Equality can be handled by casting to the legal type for Altivec
2861       // comparisons, everything else needs to be expanded.
2862       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
2863         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
2864                  DAG.getSetCC(dl, MVT::v4i32,
2865                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
2866                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
2867                    CC));
2868       }
2869 
2870       return SDValue();
2871     }
2872 
2873     // We handle most of these in the usual way.
2874     return Op;
2875   }
2876 
2877   // If we're comparing for equality to zero, expose the fact that this is
2878   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
2879   // fold the new nodes.
2880   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
2881     return V;
2882 
2883   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2884     // Leave comparisons against 0 and -1 alone for now, since they're usually
2885     // optimized.  FIXME: revisit this when we can custom lower all setcc
2886     // optimizations.
2887     if (C->isAllOnesValue() || C->isNullValue())
2888       return SDValue();
2889   }
2890 
2891   // If we have an integer seteq/setne, turn it into a compare against zero
2892   // by xor'ing the rhs with the lhs, which is faster than setting a
2893   // condition register, reading it back out, and masking the correct bit.  The
2894   // normal approach here uses sub to do this instead of xor.  Using xor exposes
2895   // the result to other bit-twiddling opportunities.
2896   EVT LHSVT = Op.getOperand(0).getValueType();
2897   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2898     EVT VT = Op.getValueType();
2899     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
2900                                 Op.getOperand(1));
2901     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
2902   }
2903   return SDValue();
2904 }
2905 
2906 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2907   SDNode *Node = Op.getNode();
2908   EVT VT = Node->getValueType(0);
2909   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2910   SDValue InChain = Node->getOperand(0);
2911   SDValue VAListPtr = Node->getOperand(1);
2912   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2913   SDLoc dl(Node);
2914 
2915   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
2916 
2917   // gpr_index
2918   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2919                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
2920   InChain = GprIndex.getValue(1);
2921 
2922   if (VT == MVT::i64) {
2923     // Check if GprIndex is even
2924     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
2925                                  DAG.getConstant(1, dl, MVT::i32));
2926     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
2927                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
2928     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
2929                                           DAG.getConstant(1, dl, MVT::i32));
2930     // Align GprIndex to be even if it isn't
2931     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
2932                            GprIndex);
2933   }
2934 
2935   // fpr index is 1 byte after gpr
2936   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2937                                DAG.getConstant(1, dl, MVT::i32));
2938 
2939   // fpr
2940   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
2941                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
2942   InChain = FprIndex.getValue(1);
2943 
2944   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2945                                        DAG.getConstant(8, dl, MVT::i32));
2946 
2947   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
2948                                         DAG.getConstant(4, dl, MVT::i32));
2949 
2950   // areas
2951   SDValue OverflowArea =
2952       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
2953   InChain = OverflowArea.getValue(1);
2954 
2955   SDValue RegSaveArea =
2956       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
2957   InChain = RegSaveArea.getValue(1);
2958 
2959   // select overflow_area if index > 8
2960   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
2961                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
2962 
2963   // adjustment constant gpr_index * 4/8
2964   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
2965                                     VT.isInteger() ? GprIndex : FprIndex,
2966                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
2967                                                     MVT::i32));
2968 
2969   // OurReg = RegSaveArea + RegConstant
2970   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
2971                                RegConstant);
2972 
2973   // Floating types are 32 bytes into RegSaveArea
2974   if (VT.isFloatingPoint())
2975     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
2976                          DAG.getConstant(32, dl, MVT::i32));
2977 
2978   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
2979   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
2980                                    VT.isInteger() ? GprIndex : FprIndex,
2981                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
2982                                                    MVT::i32));
2983 
2984   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
2985                               VT.isInteger() ? VAListPtr : FprPtr,
2986                               MachinePointerInfo(SV), MVT::i8);
2987 
2988   // determine if we should load from reg_save_area or overflow_area
2989   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
2990 
2991   // increase overflow_area by 4/8 if gpr/fpr > 8
2992   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
2993                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
2994                                           dl, MVT::i32));
2995 
2996   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
2997                              OverflowAreaPlusN);
2998 
2999   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3000                               MachinePointerInfo(), MVT::i32);
3001 
3002   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3003 }
3004 
3005 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3006   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3007 
3008   // We have to copy the entire va_list struct:
3009   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3010   return DAG.getMemcpy(Op.getOperand(0), Op,
3011                        Op.getOperand(1), Op.getOperand(2),
3012                        DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
3013                        false, MachinePointerInfo(), MachinePointerInfo());
3014 }
3015 
3016 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3017                                                   SelectionDAG &DAG) const {
3018   return Op.getOperand(0);
3019 }
3020 
3021 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3022                                                 SelectionDAG &DAG) const {
3023   SDValue Chain = Op.getOperand(0);
3024   SDValue Trmp = Op.getOperand(1); // trampoline
3025   SDValue FPtr = Op.getOperand(2); // nested function
3026   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3027   SDLoc dl(Op);
3028 
3029   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3030   bool isPPC64 = (PtrVT == MVT::i64);
3031   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3032 
3033   TargetLowering::ArgListTy Args;
3034   TargetLowering::ArgListEntry Entry;
3035 
3036   Entry.Ty = IntPtrTy;
3037   Entry.Node = Trmp; Args.push_back(Entry);
3038 
3039   // TrampSize == (isPPC64 ? 48 : 40);
3040   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3041                                isPPC64 ? MVT::i64 : MVT::i32);
3042   Args.push_back(Entry);
3043 
3044   Entry.Node = FPtr; Args.push_back(Entry);
3045   Entry.Node = Nest; Args.push_back(Entry);
3046 
3047   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3048   TargetLowering::CallLoweringInfo CLI(DAG);
3049   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3050       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3051       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3052 
3053   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3054   return CallResult.second;
3055 }
3056 
3057 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3058   MachineFunction &MF = DAG.getMachineFunction();
3059   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3060   EVT PtrVT = getPointerTy(MF.getDataLayout());
3061 
3062   SDLoc dl(Op);
3063 
3064   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
3065     // vastart just stores the address of the VarArgsFrameIndex slot into the
3066     // memory location argument.
3067     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3068     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3069     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3070                         MachinePointerInfo(SV));
3071   }
3072 
3073   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3074   // We suppose the given va_list is already allocated.
3075   //
3076   // typedef struct {
3077   //  char gpr;     /* index into the array of 8 GPRs
3078   //                 * stored in the register save area
3079   //                 * gpr=0 corresponds to r3,
3080   //                 * gpr=1 to r4, etc.
3081   //                 */
3082   //  char fpr;     /* index into the array of 8 FPRs
3083   //                 * stored in the register save area
3084   //                 * fpr=0 corresponds to f1,
3085   //                 * fpr=1 to f2, etc.
3086   //                 */
3087   //  char *overflow_arg_area;
3088   //                /* location on stack that holds
3089   //                 * the next overflow argument
3090   //                 */
3091   //  char *reg_save_area;
3092   //               /* where r3:r10 and f1:f8 (if saved)
3093   //                * are stored
3094   //                */
3095   // } va_list[1];
3096 
3097   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3098   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3099   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3100                                             PtrVT);
3101   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3102                                  PtrVT);
3103 
3104   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3105   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3106 
3107   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3108   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3109 
3110   uint64_t FPROffset = 1;
3111   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3112 
3113   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3114 
3115   // Store first byte : number of int regs
3116   SDValue firstStore =
3117       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3118                         MachinePointerInfo(SV), MVT::i8);
3119   uint64_t nextOffset = FPROffset;
3120   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3121                                   ConstFPROffset);
3122 
3123   // Store second byte : number of float regs
3124   SDValue secondStore =
3125       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3126                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3127   nextOffset += StackOffset;
3128   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3129 
3130   // Store second word : arguments given on stack
3131   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3132                                     MachinePointerInfo(SV, nextOffset));
3133   nextOffset += FrameOffset;
3134   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3135 
3136   // Store third word : arguments given in registers
3137   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3138                       MachinePointerInfo(SV, nextOffset));
3139 }
3140 
3141 #include "PPCGenCallingConv.inc"
3142 
3143 // Function whose sole purpose is to kill compiler warnings
3144 // stemming from unused functions included from PPCGenCallingConv.inc.
3145 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
3146   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
3147 }
3148 
3149 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
3150                                       CCValAssign::LocInfo &LocInfo,
3151                                       ISD::ArgFlagsTy &ArgFlags,
3152                                       CCState &State) {
3153   return true;
3154 }
3155 
3156 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
3157                                              MVT &LocVT,
3158                                              CCValAssign::LocInfo &LocInfo,
3159                                              ISD::ArgFlagsTy &ArgFlags,
3160                                              CCState &State) {
3161   static const MCPhysReg ArgRegs[] = {
3162     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3163     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3164   };
3165   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3166 
3167   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3168 
3169   // Skip one register if the first unallocated register has an even register
3170   // number and there are still argument registers available which have not been
3171   // allocated yet. RegNum is actually an index into ArgRegs, which means we
3172   // need to skip a register if RegNum is odd.
3173   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
3174     State.AllocateReg(ArgRegs[RegNum]);
3175   }
3176 
3177   // Always return false here, as this function only makes sure that the first
3178   // unallocated register has an odd register number and does not actually
3179   // allocate a register for the current argument.
3180   return false;
3181 }
3182 
3183 bool
3184 llvm::CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
3185                                                   MVT &LocVT,
3186                                                   CCValAssign::LocInfo &LocInfo,
3187                                                   ISD::ArgFlagsTy &ArgFlags,
3188                                                   CCState &State) {
3189   static const MCPhysReg ArgRegs[] = {
3190     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3191     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3192   };
3193   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3194 
3195   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3196   int RegsLeft = NumArgRegs - RegNum;
3197 
3198   // Skip if there is not enough registers left for long double type (4 gpr regs
3199   // in soft float mode) and put long double argument on the stack.
3200   if (RegNum != NumArgRegs && RegsLeft < 4) {
3201     for (int i = 0; i < RegsLeft; i++) {
3202       State.AllocateReg(ArgRegs[RegNum + i]);
3203     }
3204   }
3205 
3206   return false;
3207 }
3208 
3209 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
3210                                                MVT &LocVT,
3211                                                CCValAssign::LocInfo &LocInfo,
3212                                                ISD::ArgFlagsTy &ArgFlags,
3213                                                CCState &State) {
3214   static const MCPhysReg ArgRegs[] = {
3215     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3216     PPC::F8
3217   };
3218 
3219   const unsigned NumArgRegs = array_lengthof(ArgRegs);
3220 
3221   unsigned RegNum = State.getFirstUnallocated(ArgRegs);
3222 
3223   // If there is only one Floating-point register left we need to put both f64
3224   // values of a split ppc_fp128 value on the stack.
3225   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
3226     State.AllocateReg(ArgRegs[RegNum]);
3227   }
3228 
3229   // Always return false here, as this function only makes sure that the two f64
3230   // values a ppc_fp128 value is split into are both passed in registers or both
3231   // passed on the stack and does not actually allocate a register for the
3232   // current argument.
3233   return false;
3234 }
3235 
3236 /// FPR - The set of FP registers that should be allocated for arguments,
3237 /// on Darwin.
3238 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3239                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3240                                 PPC::F11, PPC::F12, PPC::F13};
3241 
3242 /// QFPR - The set of QPX registers that should be allocated for arguments.
3243 static const MCPhysReg QFPR[] = {
3244     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3245     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3246 
3247 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3248 /// the stack.
3249 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3250                                        unsigned PtrByteSize) {
3251   unsigned ArgSize = ArgVT.getStoreSize();
3252   if (Flags.isByVal())
3253     ArgSize = Flags.getByValSize();
3254 
3255   // Round up to multiples of the pointer size, except for array members,
3256   // which are always packed.
3257   if (!Flags.isInConsecutiveRegs())
3258     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3259 
3260   return ArgSize;
3261 }
3262 
3263 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3264 /// on the stack.
3265 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3266                                             ISD::ArgFlagsTy Flags,
3267                                             unsigned PtrByteSize) {
3268   unsigned Align = PtrByteSize;
3269 
3270   // Altivec parameters are padded to a 16 byte boundary.
3271   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3272       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3273       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3274       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3275     Align = 16;
3276   // QPX vector types stored in double-precision are padded to a 32 byte
3277   // boundary.
3278   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3279     Align = 32;
3280 
3281   // ByVal parameters are aligned as requested.
3282   if (Flags.isByVal()) {
3283     unsigned BVAlign = Flags.getByValAlign();
3284     if (BVAlign > PtrByteSize) {
3285       if (BVAlign % PtrByteSize != 0)
3286           llvm_unreachable(
3287             "ByVal alignment is not a multiple of the pointer size");
3288 
3289       Align = BVAlign;
3290     }
3291   }
3292 
3293   // Array members are always packed to their original alignment.
3294   if (Flags.isInConsecutiveRegs()) {
3295     // If the array member was split into multiple registers, the first
3296     // needs to be aligned to the size of the full type.  (Except for
3297     // ppcf128, which is only aligned as its f64 components.)
3298     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3299       Align = OrigVT.getStoreSize();
3300     else
3301       Align = ArgVT.getStoreSize();
3302   }
3303 
3304   return Align;
3305 }
3306 
3307 /// CalculateStackSlotUsed - Return whether this argument will use its
3308 /// stack slot (instead of being passed in registers).  ArgOffset,
3309 /// AvailableFPRs, and AvailableVRs must hold the current argument
3310 /// position, and will be updated to account for this argument.
3311 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3312                                    ISD::ArgFlagsTy Flags,
3313                                    unsigned PtrByteSize,
3314                                    unsigned LinkageSize,
3315                                    unsigned ParamAreaSize,
3316                                    unsigned &ArgOffset,
3317                                    unsigned &AvailableFPRs,
3318                                    unsigned &AvailableVRs, bool HasQPX) {
3319   bool UseMemory = false;
3320 
3321   // Respect alignment of argument on the stack.
3322   unsigned Align =
3323     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3324   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3325   // If there's no space left in the argument save area, we must
3326   // use memory (this check also catches zero-sized arguments).
3327   if (ArgOffset >= LinkageSize + ParamAreaSize)
3328     UseMemory = true;
3329 
3330   // Allocate argument on the stack.
3331   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3332   if (Flags.isInConsecutiveRegsLast())
3333     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3334   // If we overran the argument save area, we must use memory
3335   // (this check catches arguments passed partially in memory)
3336   if (ArgOffset > LinkageSize + ParamAreaSize)
3337     UseMemory = true;
3338 
3339   // However, if the argument is actually passed in an FPR or a VR,
3340   // we don't use memory after all.
3341   if (!Flags.isByVal()) {
3342     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3343         // QPX registers overlap with the scalar FP registers.
3344         (HasQPX && (ArgVT == MVT::v4f32 ||
3345                     ArgVT == MVT::v4f64 ||
3346                     ArgVT == MVT::v4i1)))
3347       if (AvailableFPRs > 0) {
3348         --AvailableFPRs;
3349         return false;
3350       }
3351     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3352         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3353         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3354         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3355       if (AvailableVRs > 0) {
3356         --AvailableVRs;
3357         return false;
3358       }
3359   }
3360 
3361   return UseMemory;
3362 }
3363 
3364 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3365 /// ensure minimum alignment required for target.
3366 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3367                                      unsigned NumBytes) {
3368   unsigned TargetAlign = Lowering->getStackAlignment();
3369   unsigned AlignMask = TargetAlign - 1;
3370   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3371   return NumBytes;
3372 }
3373 
3374 SDValue PPCTargetLowering::LowerFormalArguments(
3375     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3376     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3377     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3378   if (Subtarget.isSVR4ABI()) {
3379     if (Subtarget.isPPC64())
3380       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
3381                                          dl, DAG, InVals);
3382     else
3383       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
3384                                          dl, DAG, InVals);
3385   } else {
3386     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
3387                                        dl, DAG, InVals);
3388   }
3389 }
3390 
3391 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3392     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3393     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3394     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3395 
3396   // 32-bit SVR4 ABI Stack Frame Layout:
3397   //              +-----------------------------------+
3398   //        +-->  |            Back chain             |
3399   //        |     +-----------------------------------+
3400   //        |     | Floating-point register save area |
3401   //        |     +-----------------------------------+
3402   //        |     |    General register save area     |
3403   //        |     +-----------------------------------+
3404   //        |     |          CR save word             |
3405   //        |     +-----------------------------------+
3406   //        |     |         VRSAVE save word          |
3407   //        |     +-----------------------------------+
3408   //        |     |         Alignment padding         |
3409   //        |     +-----------------------------------+
3410   //        |     |     Vector register save area     |
3411   //        |     +-----------------------------------+
3412   //        |     |       Local variable space        |
3413   //        |     +-----------------------------------+
3414   //        |     |        Parameter list area        |
3415   //        |     +-----------------------------------+
3416   //        |     |           LR save word            |
3417   //        |     +-----------------------------------+
3418   // SP-->  +---  |            Back chain             |
3419   //              +-----------------------------------+
3420   //
3421   // Specifications:
3422   //   System V Application Binary Interface PowerPC Processor Supplement
3423   //   AltiVec Technology Programming Interface Manual
3424 
3425   MachineFunction &MF = DAG.getMachineFunction();
3426   MachineFrameInfo &MFI = MF.getFrameInfo();
3427   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3428 
3429   EVT PtrVT = getPointerTy(MF.getDataLayout());
3430   // Potential tail calls could cause overwriting of argument stack slots.
3431   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3432                        (CallConv == CallingConv::Fast));
3433   unsigned PtrByteSize = 4;
3434 
3435   // Assign locations to all of the incoming arguments.
3436   SmallVector<CCValAssign, 16> ArgLocs;
3437   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438                  *DAG.getContext());
3439 
3440   // Reserve space for the linkage area on the stack.
3441   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3442   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3443   if (useSoftFloat() || hasSPE())
3444     CCInfo.PreAnalyzeFormalArguments(Ins);
3445 
3446   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3447   CCInfo.clearWasPPCF128();
3448 
3449   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3450     CCValAssign &VA = ArgLocs[i];
3451 
3452     // Arguments stored in registers.
3453     if (VA.isRegLoc()) {
3454       const TargetRegisterClass *RC;
3455       EVT ValVT = VA.getValVT();
3456 
3457       switch (ValVT.getSimpleVT().SimpleTy) {
3458         default:
3459           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3460         case MVT::i1:
3461         case MVT::i32:
3462           RC = &PPC::GPRCRegClass;
3463           break;
3464         case MVT::f32:
3465           if (Subtarget.hasP8Vector())
3466             RC = &PPC::VSSRCRegClass;
3467           else if (Subtarget.hasSPE())
3468             RC = &PPC::SPE4RCRegClass;
3469           else
3470             RC = &PPC::F4RCRegClass;
3471           break;
3472         case MVT::f64:
3473           if (Subtarget.hasVSX())
3474             RC = &PPC::VSFRCRegClass;
3475           else if (Subtarget.hasSPE())
3476             RC = &PPC::SPERCRegClass;
3477           else
3478             RC = &PPC::F8RCRegClass;
3479           break;
3480         case MVT::v16i8:
3481         case MVT::v8i16:
3482         case MVT::v4i32:
3483           RC = &PPC::VRRCRegClass;
3484           break;
3485         case MVT::v4f32:
3486           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3487           break;
3488         case MVT::v2f64:
3489         case MVT::v2i64:
3490           RC = &PPC::VRRCRegClass;
3491           break;
3492         case MVT::v4f64:
3493           RC = &PPC::QFRCRegClass;
3494           break;
3495         case MVT::v4i1:
3496           RC = &PPC::QBRCRegClass;
3497           break;
3498       }
3499 
3500       // Transform the arguments stored in physical registers into virtual ones.
3501       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3502       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3503                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
3504 
3505       if (ValVT == MVT::i1)
3506         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3507 
3508       InVals.push_back(ArgValue);
3509     } else {
3510       // Argument stored in memory.
3511       assert(VA.isMemLoc());
3512 
3513       unsigned ArgSize = VA.getLocVT().getStoreSize();
3514       int FI = MFI.CreateFixedObject(ArgSize, VA.getLocMemOffset(),
3515                                      isImmutable);
3516 
3517       // Create load nodes to retrieve arguments from the stack.
3518       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3519       InVals.push_back(
3520           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3521     }
3522   }
3523 
3524   // Assign locations to all of the incoming aggregate by value arguments.
3525   // Aggregates passed by value are stored in the local variable space of the
3526   // caller's stack frame, right above the parameter list area.
3527   SmallVector<CCValAssign, 16> ByValArgLocs;
3528   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3529                       ByValArgLocs, *DAG.getContext());
3530 
3531   // Reserve stack space for the allocations in CCInfo.
3532   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3533 
3534   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3535 
3536   // Area that is at least reserved in the caller of this function.
3537   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3538   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3539 
3540   // Set the size that is at least reserved in caller of this function.  Tail
3541   // call optimized function's reserved stack space needs to be aligned so that
3542   // taking the difference between two stack areas will result in an aligned
3543   // stack.
3544   MinReservedArea =
3545       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3546   FuncInfo->setMinReservedArea(MinReservedArea);
3547 
3548   SmallVector<SDValue, 8> MemOps;
3549 
3550   // If the function takes variable number of arguments, make a frame index for
3551   // the start of the first vararg value... for expansion of llvm.va_start.
3552   if (isVarArg) {
3553     static const MCPhysReg GPArgRegs[] = {
3554       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3555       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3556     };
3557     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3558 
3559     static const MCPhysReg FPArgRegs[] = {
3560       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3561       PPC::F8
3562     };
3563     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3564 
3565     if (useSoftFloat() || hasSPE())
3566        NumFPArgRegs = 0;
3567 
3568     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3569     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3570 
3571     // Make room for NumGPArgRegs and NumFPArgRegs.
3572     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3573                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3574 
3575     FuncInfo->setVarArgsStackOffset(
3576       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3577                             CCInfo.getNextStackOffset(), true));
3578 
3579     FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3580     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3581 
3582     // The fixed integer arguments of a variadic function are stored to the
3583     // VarArgsFrameIndex on the stack so that they may be loaded by
3584     // dereferencing the result of va_next.
3585     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3586       // Get an existing live-in vreg, or add a new one.
3587       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3588       if (!VReg)
3589         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3590 
3591       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3592       SDValue Store =
3593           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3594       MemOps.push_back(Store);
3595       // Increment the address by four for the next argument to store
3596       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3597       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3598     }
3599 
3600     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3601     // is set.
3602     // The double arguments are stored to the VarArgsFrameIndex
3603     // on the stack.
3604     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3605       // Get an existing live-in vreg, or add a new one.
3606       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3607       if (!VReg)
3608         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3609 
3610       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3611       SDValue Store =
3612           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3613       MemOps.push_back(Store);
3614       // Increment the address by eight for the next argument to store
3615       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3616                                          PtrVT);
3617       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3618     }
3619   }
3620 
3621   if (!MemOps.empty())
3622     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3623 
3624   return Chain;
3625 }
3626 
3627 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3628 // value to MVT::i64 and then truncate to the correct register size.
3629 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3630                                              EVT ObjectVT, SelectionDAG &DAG,
3631                                              SDValue ArgVal,
3632                                              const SDLoc &dl) const {
3633   if (Flags.isSExt())
3634     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3635                          DAG.getValueType(ObjectVT));
3636   else if (Flags.isZExt())
3637     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3638                          DAG.getValueType(ObjectVT));
3639 
3640   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3641 }
3642 
3643 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3644     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3645     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3646     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3647   // TODO: add description of PPC stack frame format, or at least some docs.
3648   //
3649   bool isELFv2ABI = Subtarget.isELFv2ABI();
3650   bool isLittleEndian = Subtarget.isLittleEndian();
3651   MachineFunction &MF = DAG.getMachineFunction();
3652   MachineFrameInfo &MFI = MF.getFrameInfo();
3653   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3654 
3655   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3656          "fastcc not supported on varargs functions");
3657 
3658   EVT PtrVT = getPointerTy(MF.getDataLayout());
3659   // Potential tail calls could cause overwriting of argument stack slots.
3660   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3661                        (CallConv == CallingConv::Fast));
3662   unsigned PtrByteSize = 8;
3663   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3664 
3665   static const MCPhysReg GPR[] = {
3666     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3667     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3668   };
3669   static const MCPhysReg VR[] = {
3670     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3671     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3672   };
3673 
3674   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3675   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3676   const unsigned Num_VR_Regs  = array_lengthof(VR);
3677   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3678 
3679   // Do a first pass over the arguments to determine whether the ABI
3680   // guarantees that our caller has allocated the parameter save area
3681   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3682   // in the ELFv2 ABI, it is true if this is a vararg function or if
3683   // any parameter is located in a stack slot.
3684 
3685   bool HasParameterArea = !isELFv2ABI || isVarArg;
3686   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3687   unsigned NumBytes = LinkageSize;
3688   unsigned AvailableFPRs = Num_FPR_Regs;
3689   unsigned AvailableVRs = Num_VR_Regs;
3690   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3691     if (Ins[i].Flags.isNest())
3692       continue;
3693 
3694     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3695                                PtrByteSize, LinkageSize, ParamAreaSize,
3696                                NumBytes, AvailableFPRs, AvailableVRs,
3697                                Subtarget.hasQPX()))
3698       HasParameterArea = true;
3699   }
3700 
3701   // Add DAG nodes to load the arguments or copy them out of registers.  On
3702   // entry to a function on PPC, the arguments start after the linkage area,
3703   // although the first ones are often in registers.
3704 
3705   unsigned ArgOffset = LinkageSize;
3706   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3707   unsigned &QFPR_idx = FPR_idx;
3708   SmallVector<SDValue, 8> MemOps;
3709   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3710   unsigned CurArgIdx = 0;
3711   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3712     SDValue ArgVal;
3713     bool needsLoad = false;
3714     EVT ObjectVT = Ins[ArgNo].VT;
3715     EVT OrigVT = Ins[ArgNo].ArgVT;
3716     unsigned ObjSize = ObjectVT.getStoreSize();
3717     unsigned ArgSize = ObjSize;
3718     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3719     if (Ins[ArgNo].isOrigArg()) {
3720       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3721       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3722     }
3723     // We re-align the argument offset for each argument, except when using the
3724     // fast calling convention, when we need to make sure we do that only when
3725     // we'll actually use a stack slot.
3726     unsigned CurArgOffset, Align;
3727     auto ComputeArgOffset = [&]() {
3728       /* Respect alignment of argument on the stack.  */
3729       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3730       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3731       CurArgOffset = ArgOffset;
3732     };
3733 
3734     if (CallConv != CallingConv::Fast) {
3735       ComputeArgOffset();
3736 
3737       /* Compute GPR index associated with argument offset.  */
3738       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3739       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3740     }
3741 
3742     // FIXME the codegen can be much improved in some cases.
3743     // We do not have to keep everything in memory.
3744     if (Flags.isByVal()) {
3745       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3746 
3747       if (CallConv == CallingConv::Fast)
3748         ComputeArgOffset();
3749 
3750       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3751       ObjSize = Flags.getByValSize();
3752       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3753       // Empty aggregate parameters do not take up registers.  Examples:
3754       //   struct { } a;
3755       //   union  { } b;
3756       //   int c[0];
3757       // etc.  However, we have to provide a place-holder in InVals, so
3758       // pretend we have an 8-byte item at the current address for that
3759       // purpose.
3760       if (!ObjSize) {
3761         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3762         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3763         InVals.push_back(FIN);
3764         continue;
3765       }
3766 
3767       // Create a stack object covering all stack doublewords occupied
3768       // by the argument.  If the argument is (fully or partially) on
3769       // the stack, or if the argument is fully in registers but the
3770       // caller has allocated the parameter save anyway, we can refer
3771       // directly to the caller's stack frame.  Otherwise, create a
3772       // local copy in our own frame.
3773       int FI;
3774       if (HasParameterArea ||
3775           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3776         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3777       else
3778         FI = MFI.CreateStackObject(ArgSize, Align, false);
3779       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3780 
3781       // Handle aggregates smaller than 8 bytes.
3782       if (ObjSize < PtrByteSize) {
3783         // The value of the object is its address, which differs from the
3784         // address of the enclosing doubleword on big-endian systems.
3785         SDValue Arg = FIN;
3786         if (!isLittleEndian) {
3787           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3788           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3789         }
3790         InVals.push_back(Arg);
3791 
3792         if (GPR_idx != Num_GPR_Regs) {
3793           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3794           FuncInfo->addLiveInAttr(VReg, Flags);
3795           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3796           SDValue Store;
3797 
3798           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3799             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3800                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
3801             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3802                                       MachinePointerInfo(&*FuncArg), ObjType);
3803           } else {
3804             // For sizes that don't fit a truncating store (3, 5, 6, 7),
3805             // store the whole register as-is to the parameter save area
3806             // slot.
3807             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3808                                  MachinePointerInfo(&*FuncArg));
3809           }
3810 
3811           MemOps.push_back(Store);
3812         }
3813         // Whether we copied from a register or not, advance the offset
3814         // into the parameter save area by a full doubleword.
3815         ArgOffset += PtrByteSize;
3816         continue;
3817       }
3818 
3819       // The value of the object is its address, which is the address of
3820       // its first stack doubleword.
3821       InVals.push_back(FIN);
3822 
3823       // Store whatever pieces of the object are in registers to memory.
3824       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3825         if (GPR_idx == Num_GPR_Regs)
3826           break;
3827 
3828         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3829         FuncInfo->addLiveInAttr(VReg, Flags);
3830         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3831         SDValue Addr = FIN;
3832         if (j) {
3833           SDValue Off = DAG.getConstant(j, dl, PtrVT);
3834           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3835         }
3836         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3837                                      MachinePointerInfo(&*FuncArg, j));
3838         MemOps.push_back(Store);
3839         ++GPR_idx;
3840       }
3841       ArgOffset += ArgSize;
3842       continue;
3843     }
3844 
3845     switch (ObjectVT.getSimpleVT().SimpleTy) {
3846     default: llvm_unreachable("Unhandled argument type!");
3847     case MVT::i1:
3848     case MVT::i32:
3849     case MVT::i64:
3850       if (Flags.isNest()) {
3851         // The 'nest' parameter, if any, is passed in R11.
3852         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3853         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3854 
3855         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3856           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3857 
3858         break;
3859       }
3860 
3861       // These can be scalar arguments or elements of an integer array type
3862       // passed directly.  Clang may use those instead of "byval" aggregate
3863       // types to avoid forcing arguments to memory unnecessarily.
3864       if (GPR_idx != Num_GPR_Regs) {
3865         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3866         FuncInfo->addLiveInAttr(VReg, Flags);
3867         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3868 
3869         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3870           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3871           // value to MVT::i64 and then truncate to the correct register size.
3872           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3873       } else {
3874         if (CallConv == CallingConv::Fast)
3875           ComputeArgOffset();
3876 
3877         needsLoad = true;
3878         ArgSize = PtrByteSize;
3879       }
3880       if (CallConv != CallingConv::Fast || needsLoad)
3881         ArgOffset += 8;
3882       break;
3883 
3884     case MVT::f32:
3885     case MVT::f64:
3886       // These can be scalar arguments or elements of a float array type
3887       // passed directly.  The latter are used to implement ELFv2 homogenous
3888       // float aggregates.
3889       if (FPR_idx != Num_FPR_Regs) {
3890         unsigned VReg;
3891 
3892         if (ObjectVT == MVT::f32)
3893           VReg = MF.addLiveIn(FPR[FPR_idx],
3894                               Subtarget.hasP8Vector()
3895                                   ? &PPC::VSSRCRegClass
3896                                   : &PPC::F4RCRegClass);
3897         else
3898           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3899                                                 ? &PPC::VSFRCRegClass
3900                                                 : &PPC::F8RCRegClass);
3901 
3902         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3903         ++FPR_idx;
3904       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
3905         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
3906         // once we support fp <-> gpr moves.
3907 
3908         // This can only ever happen in the presence of f32 array types,
3909         // since otherwise we never run out of FPRs before running out
3910         // of GPRs.
3911         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3912         FuncInfo->addLiveInAttr(VReg, Flags);
3913         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3914 
3915         if (ObjectVT == MVT::f32) {
3916           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3917             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
3918                                  DAG.getConstant(32, dl, MVT::i32));
3919           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
3920         }
3921 
3922         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
3923       } else {
3924         if (CallConv == CallingConv::Fast)
3925           ComputeArgOffset();
3926 
3927         needsLoad = true;
3928       }
3929 
3930       // When passing an array of floats, the array occupies consecutive
3931       // space in the argument area; only round up to the next doubleword
3932       // at the end of the array.  Otherwise, each float takes 8 bytes.
3933       if (CallConv != CallingConv::Fast || needsLoad) {
3934         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
3935         ArgOffset += ArgSize;
3936         if (Flags.isInConsecutiveRegsLast())
3937           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3938       }
3939       break;
3940     case MVT::v4f32:
3941     case MVT::v4i32:
3942     case MVT::v8i16:
3943     case MVT::v16i8:
3944     case MVT::v2f64:
3945     case MVT::v2i64:
3946     case MVT::v1i128:
3947     case MVT::f128:
3948       if (!Subtarget.hasQPX()) {
3949         // These can be scalar arguments or elements of a vector array type
3950         // passed directly.  The latter are used to implement ELFv2 homogenous
3951         // vector aggregates.
3952         if (VR_idx != Num_VR_Regs) {
3953           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3954           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3955           ++VR_idx;
3956         } else {
3957           if (CallConv == CallingConv::Fast)
3958             ComputeArgOffset();
3959           needsLoad = true;
3960         }
3961         if (CallConv != CallingConv::Fast || needsLoad)
3962           ArgOffset += 16;
3963         break;
3964       } // not QPX
3965 
3966       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
3967              "Invalid QPX parameter type");
3968       /* fall through */
3969 
3970     case MVT::v4f64:
3971     case MVT::v4i1:
3972       // QPX vectors are treated like their scalar floating-point subregisters
3973       // (except that they're larger).
3974       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
3975       if (QFPR_idx != Num_QFPR_Regs) {
3976         const TargetRegisterClass *RC;
3977         switch (ObjectVT.getSimpleVT().SimpleTy) {
3978         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
3979         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
3980         default:         RC = &PPC::QBRCRegClass; break;
3981         }
3982 
3983         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
3984         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
3985         ++QFPR_idx;
3986       } else {
3987         if (CallConv == CallingConv::Fast)
3988           ComputeArgOffset();
3989         needsLoad = true;
3990       }
3991       if (CallConv != CallingConv::Fast || needsLoad)
3992         ArgOffset += Sz;
3993       break;
3994     }
3995 
3996     // We need to load the argument to a virtual register if we determined
3997     // above that we ran out of physical registers of the appropriate type.
3998     if (needsLoad) {
3999       if (ObjSize < ArgSize && !isLittleEndian)
4000         CurArgOffset += ArgSize - ObjSize;
4001       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4002       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4003       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4004     }
4005 
4006     InVals.push_back(ArgVal);
4007   }
4008 
4009   // Area that is at least reserved in the caller of this function.
4010   unsigned MinReservedArea;
4011   if (HasParameterArea)
4012     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4013   else
4014     MinReservedArea = LinkageSize;
4015 
4016   // Set the size that is at least reserved in caller of this function.  Tail
4017   // call optimized functions' reserved stack space needs to be aligned so that
4018   // taking the difference between two stack areas will result in an aligned
4019   // stack.
4020   MinReservedArea =
4021       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4022   FuncInfo->setMinReservedArea(MinReservedArea);
4023 
4024   // If the function takes variable number of arguments, make a frame index for
4025   // the start of the first vararg value... for expansion of llvm.va_start.
4026   if (isVarArg) {
4027     int Depth = ArgOffset;
4028 
4029     FuncInfo->setVarArgsFrameIndex(
4030       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4031     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4032 
4033     // If this function is vararg, store any remaining integer argument regs
4034     // to their spots on the stack so that they may be loaded by dereferencing
4035     // the result of va_next.
4036     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4037          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4038       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4039       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4040       SDValue Store =
4041           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4042       MemOps.push_back(Store);
4043       // Increment the address by four for the next argument to store
4044       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4045       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4046     }
4047   }
4048 
4049   if (!MemOps.empty())
4050     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4051 
4052   return Chain;
4053 }
4054 
4055 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4056     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4057     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4058     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4059   // TODO: add description of PPC stack frame format, or at least some docs.
4060   //
4061   MachineFunction &MF = DAG.getMachineFunction();
4062   MachineFrameInfo &MFI = MF.getFrameInfo();
4063   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4064 
4065   EVT PtrVT = getPointerTy(MF.getDataLayout());
4066   bool isPPC64 = PtrVT == MVT::i64;
4067   // Potential tail calls could cause overwriting of argument stack slots.
4068   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4069                        (CallConv == CallingConv::Fast));
4070   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4071   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4072   unsigned ArgOffset = LinkageSize;
4073   // Area that is at least reserved in caller of this function.
4074   unsigned MinReservedArea = ArgOffset;
4075 
4076   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4077     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4078     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4079   };
4080   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4081     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4082     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4083   };
4084   static const MCPhysReg VR[] = {
4085     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4086     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4087   };
4088 
4089   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4090   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4091   const unsigned Num_VR_Regs  = array_lengthof( VR);
4092 
4093   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4094 
4095   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4096 
4097   // In 32-bit non-varargs functions, the stack space for vectors is after the
4098   // stack space for non-vectors.  We do not use this space unless we have
4099   // too many vectors to fit in registers, something that only occurs in
4100   // constructed examples:), but we have to walk the arglist to figure
4101   // that out...for the pathological case, compute VecArgOffset as the
4102   // start of the vector parameter area.  Computing VecArgOffset is the
4103   // entire point of the following loop.
4104   unsigned VecArgOffset = ArgOffset;
4105   if (!isVarArg && !isPPC64) {
4106     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4107          ++ArgNo) {
4108       EVT ObjectVT = Ins[ArgNo].VT;
4109       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4110 
4111       if (Flags.isByVal()) {
4112         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4113         unsigned ObjSize = Flags.getByValSize();
4114         unsigned ArgSize =
4115                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4116         VecArgOffset += ArgSize;
4117         continue;
4118       }
4119 
4120       switch(ObjectVT.getSimpleVT().SimpleTy) {
4121       default: llvm_unreachable("Unhandled argument type!");
4122       case MVT::i1:
4123       case MVT::i32:
4124       case MVT::f32:
4125         VecArgOffset += 4;
4126         break;
4127       case MVT::i64:  // PPC64
4128       case MVT::f64:
4129         // FIXME: We are guaranteed to be !isPPC64 at this point.
4130         // Does MVT::i64 apply?
4131         VecArgOffset += 8;
4132         break;
4133       case MVT::v4f32:
4134       case MVT::v4i32:
4135       case MVT::v8i16:
4136       case MVT::v16i8:
4137         // Nothing to do, we're only looking at Nonvector args here.
4138         break;
4139       }
4140     }
4141   }
4142   // We've found where the vector parameter area in memory is.  Skip the
4143   // first 12 parameters; these don't use that memory.
4144   VecArgOffset = ((VecArgOffset+15)/16)*16;
4145   VecArgOffset += 12*16;
4146 
4147   // Add DAG nodes to load the arguments or copy them out of registers.  On
4148   // entry to a function on PPC, the arguments start after the linkage area,
4149   // although the first ones are often in registers.
4150 
4151   SmallVector<SDValue, 8> MemOps;
4152   unsigned nAltivecParamsAtEnd = 0;
4153   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4154   unsigned CurArgIdx = 0;
4155   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4156     SDValue ArgVal;
4157     bool needsLoad = false;
4158     EVT ObjectVT = Ins[ArgNo].VT;
4159     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4160     unsigned ArgSize = ObjSize;
4161     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4162     if (Ins[ArgNo].isOrigArg()) {
4163       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4164       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4165     }
4166     unsigned CurArgOffset = ArgOffset;
4167 
4168     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4169     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4170         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4171       if (isVarArg || isPPC64) {
4172         MinReservedArea = ((MinReservedArea+15)/16)*16;
4173         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4174                                                   Flags,
4175                                                   PtrByteSize);
4176       } else  nAltivecParamsAtEnd++;
4177     } else
4178       // Calculate min reserved area.
4179       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4180                                                 Flags,
4181                                                 PtrByteSize);
4182 
4183     // FIXME the codegen can be much improved in some cases.
4184     // We do not have to keep everything in memory.
4185     if (Flags.isByVal()) {
4186       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4187 
4188       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4189       ObjSize = Flags.getByValSize();
4190       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4191       // Objects of size 1 and 2 are right justified, everything else is
4192       // left justified.  This means the memory address is adjusted forwards.
4193       if (ObjSize==1 || ObjSize==2) {
4194         CurArgOffset = CurArgOffset + (4 - ObjSize);
4195       }
4196       // The value of the object is its address.
4197       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4198       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4199       InVals.push_back(FIN);
4200       if (ObjSize==1 || ObjSize==2) {
4201         if (GPR_idx != Num_GPR_Regs) {
4202           unsigned VReg;
4203           if (isPPC64)
4204             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4205           else
4206             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4207           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4208           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4209           SDValue Store =
4210               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4211                                 MachinePointerInfo(&*FuncArg), ObjType);
4212           MemOps.push_back(Store);
4213           ++GPR_idx;
4214         }
4215 
4216         ArgOffset += PtrByteSize;
4217 
4218         continue;
4219       }
4220       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4221         // Store whatever pieces of the object are in registers
4222         // to memory.  ArgOffset will be the address of the beginning
4223         // of the object.
4224         if (GPR_idx != Num_GPR_Regs) {
4225           unsigned VReg;
4226           if (isPPC64)
4227             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4228           else
4229             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4230           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4231           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4232           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4233           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4234                                        MachinePointerInfo(&*FuncArg, j));
4235           MemOps.push_back(Store);
4236           ++GPR_idx;
4237           ArgOffset += PtrByteSize;
4238         } else {
4239           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4240           break;
4241         }
4242       }
4243       continue;
4244     }
4245 
4246     switch (ObjectVT.getSimpleVT().SimpleTy) {
4247     default: llvm_unreachable("Unhandled argument type!");
4248     case MVT::i1:
4249     case MVT::i32:
4250       if (!isPPC64) {
4251         if (GPR_idx != Num_GPR_Regs) {
4252           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4253           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4254 
4255           if (ObjectVT == MVT::i1)
4256             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4257 
4258           ++GPR_idx;
4259         } else {
4260           needsLoad = true;
4261           ArgSize = PtrByteSize;
4262         }
4263         // All int arguments reserve stack space in the Darwin ABI.
4264         ArgOffset += PtrByteSize;
4265         break;
4266       }
4267       LLVM_FALLTHROUGH;
4268     case MVT::i64:  // PPC64
4269       if (GPR_idx != Num_GPR_Regs) {
4270         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4271         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4272 
4273         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4274           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4275           // value to MVT::i64 and then truncate to the correct register size.
4276           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4277 
4278         ++GPR_idx;
4279       } else {
4280         needsLoad = true;
4281         ArgSize = PtrByteSize;
4282       }
4283       // All int arguments reserve stack space in the Darwin ABI.
4284       ArgOffset += 8;
4285       break;
4286 
4287     case MVT::f32:
4288     case MVT::f64:
4289       // Every 4 bytes of argument space consumes one of the GPRs available for
4290       // argument passing.
4291       if (GPR_idx != Num_GPR_Regs) {
4292         ++GPR_idx;
4293         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4294           ++GPR_idx;
4295       }
4296       if (FPR_idx != Num_FPR_Regs) {
4297         unsigned VReg;
4298 
4299         if (ObjectVT == MVT::f32)
4300           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4301         else
4302           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4303 
4304         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4305         ++FPR_idx;
4306       } else {
4307         needsLoad = true;
4308       }
4309 
4310       // All FP arguments reserve stack space in the Darwin ABI.
4311       ArgOffset += isPPC64 ? 8 : ObjSize;
4312       break;
4313     case MVT::v4f32:
4314     case MVT::v4i32:
4315     case MVT::v8i16:
4316     case MVT::v16i8:
4317       // Note that vector arguments in registers don't reserve stack space,
4318       // except in varargs functions.
4319       if (VR_idx != Num_VR_Regs) {
4320         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4321         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4322         if (isVarArg) {
4323           while ((ArgOffset % 16) != 0) {
4324             ArgOffset += PtrByteSize;
4325             if (GPR_idx != Num_GPR_Regs)
4326               GPR_idx++;
4327           }
4328           ArgOffset += 16;
4329           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4330         }
4331         ++VR_idx;
4332       } else {
4333         if (!isVarArg && !isPPC64) {
4334           // Vectors go after all the nonvectors.
4335           CurArgOffset = VecArgOffset;
4336           VecArgOffset += 16;
4337         } else {
4338           // Vectors are aligned.
4339           ArgOffset = ((ArgOffset+15)/16)*16;
4340           CurArgOffset = ArgOffset;
4341           ArgOffset += 16;
4342         }
4343         needsLoad = true;
4344       }
4345       break;
4346     }
4347 
4348     // We need to load the argument to a virtual register if we determined above
4349     // that we ran out of physical registers of the appropriate type.
4350     if (needsLoad) {
4351       int FI = MFI.CreateFixedObject(ObjSize,
4352                                      CurArgOffset + (ArgSize - ObjSize),
4353                                      isImmutable);
4354       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4355       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4356     }
4357 
4358     InVals.push_back(ArgVal);
4359   }
4360 
4361   // Allow for Altivec parameters at the end, if needed.
4362   if (nAltivecParamsAtEnd) {
4363     MinReservedArea = ((MinReservedArea+15)/16)*16;
4364     MinReservedArea += 16*nAltivecParamsAtEnd;
4365   }
4366 
4367   // Area that is at least reserved in the caller of this function.
4368   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4369 
4370   // Set the size that is at least reserved in caller of this function.  Tail
4371   // call optimized functions' reserved stack space needs to be aligned so that
4372   // taking the difference between two stack areas will result in an aligned
4373   // stack.
4374   MinReservedArea =
4375       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4376   FuncInfo->setMinReservedArea(MinReservedArea);
4377 
4378   // If the function takes variable number of arguments, make a frame index for
4379   // the start of the first vararg value... for expansion of llvm.va_start.
4380   if (isVarArg) {
4381     int Depth = ArgOffset;
4382 
4383     FuncInfo->setVarArgsFrameIndex(
4384       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4385                             Depth, true));
4386     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4387 
4388     // If this function is vararg, store any remaining integer argument regs
4389     // to their spots on the stack so that they may be loaded by dereferencing
4390     // the result of va_next.
4391     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4392       unsigned VReg;
4393 
4394       if (isPPC64)
4395         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4396       else
4397         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4398 
4399       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4400       SDValue Store =
4401           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4402       MemOps.push_back(Store);
4403       // Increment the address by four for the next argument to store
4404       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4405       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4406     }
4407   }
4408 
4409   if (!MemOps.empty())
4410     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4411 
4412   return Chain;
4413 }
4414 
4415 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4416 /// adjusted to accommodate the arguments for the tailcall.
4417 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4418                                    unsigned ParamSize) {
4419 
4420   if (!isTailCall) return 0;
4421 
4422   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4423   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4424   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4425   // Remember only if the new adjustment is bigger.
4426   if (SPDiff < FI->getTailCallSPDelta())
4427     FI->setTailCallSPDelta(SPDiff);
4428 
4429   return SPDiff;
4430 }
4431 
4432 static bool isFunctionGlobalAddress(SDValue Callee);
4433 
4434 static bool
4435 callsShareTOCBase(const Function *Caller, SDValue Callee,
4436                     const TargetMachine &TM) {
4437   // If !G, Callee can be an external symbol.
4438   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4439   if (!G)
4440     return false;
4441 
4442   // The medium and large code models are expected to provide a sufficiently
4443   // large TOC to provide all data addressing needs of a module with a
4444   // single TOC. Since each module will be addressed with a single TOC then we
4445   // only need to check that caller and callee don't cross dso boundaries.
4446   if (CodeModel::Medium == TM.getCodeModel() ||
4447       CodeModel::Large == TM.getCodeModel())
4448     return TM.shouldAssumeDSOLocal(*Caller->getParent(), G->getGlobal());
4449 
4450   // Otherwise we need to ensure callee and caller are in the same section,
4451   // since the linker may allocate multiple TOCs, and we don't know which
4452   // sections will belong to the same TOC base.
4453 
4454   const GlobalValue *GV = G->getGlobal();
4455   if (!GV->isStrongDefinitionForLinker())
4456     return false;
4457 
4458   // Any explicitly-specified sections and section prefixes must also match.
4459   // Also, if we're using -ffunction-sections, then each function is always in
4460   // a different section (the same is true for COMDAT functions).
4461   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4462       GV->getSection() != Caller->getSection())
4463     return false;
4464   if (const auto *F = dyn_cast<Function>(GV)) {
4465     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4466       return false;
4467   }
4468 
4469   // If the callee might be interposed, then we can't assume the ultimate call
4470   // target will be in the same section. Even in cases where we can assume that
4471   // interposition won't happen, in any case where the linker might insert a
4472   // stub to allow for interposition, we must generate code as though
4473   // interposition might occur. To understand why this matters, consider a
4474   // situation where: a -> b -> c where the arrows indicate calls. b and c are
4475   // in the same section, but a is in a different module (i.e. has a different
4476   // TOC base pointer). If the linker allows for interposition between b and c,
4477   // then it will generate a stub for the call edge between b and c which will
4478   // save the TOC pointer into the designated stack slot allocated by b. If we
4479   // return true here, and therefore allow a tail call between b and c, that
4480   // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4481   // pointer into the stack slot allocated by a (where the a -> b stub saved
4482   // a's TOC base pointer). If we're not considering a tail call, but rather,
4483   // whether a nop is needed after the call instruction in b, because the linker
4484   // will insert a stub, it might complain about a missing nop if we omit it
4485   // (although many don't complain in this case).
4486   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4487     return false;
4488 
4489   return true;
4490 }
4491 
4492 static bool
4493 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4494                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4495   assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
4496 
4497   const unsigned PtrByteSize = 8;
4498   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4499 
4500   static const MCPhysReg GPR[] = {
4501     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4502     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4503   };
4504   static const MCPhysReg VR[] = {
4505     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4506     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4507   };
4508 
4509   const unsigned NumGPRs = array_lengthof(GPR);
4510   const unsigned NumFPRs = 13;
4511   const unsigned NumVRs = array_lengthof(VR);
4512   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4513 
4514   unsigned NumBytes = LinkageSize;
4515   unsigned AvailableFPRs = NumFPRs;
4516   unsigned AvailableVRs = NumVRs;
4517 
4518   for (const ISD::OutputArg& Param : Outs) {
4519     if (Param.Flags.isNest()) continue;
4520 
4521     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4522                                PtrByteSize, LinkageSize, ParamAreaSize,
4523                                NumBytes, AvailableFPRs, AvailableVRs,
4524                                Subtarget.hasQPX()))
4525       return true;
4526   }
4527   return false;
4528 }
4529 
4530 static bool
4531 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4532   if (CS.arg_size() != CallerFn->arg_size())
4533     return false;
4534 
4535   ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4536   ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4537   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4538 
4539   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4540     const Value* CalleeArg = *CalleeArgIter;
4541     const Value* CallerArg = &(*CallerArgIter);
4542     if (CalleeArg == CallerArg)
4543       continue;
4544 
4545     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4546     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4547     //      }
4548     // 1st argument of callee is undef and has the same type as caller.
4549     if (CalleeArg->getType() == CallerArg->getType() &&
4550         isa<UndefValue>(CalleeArg))
4551       continue;
4552 
4553     return false;
4554   }
4555 
4556   return true;
4557 }
4558 
4559 // Returns true if TCO is possible between the callers and callees
4560 // calling conventions.
4561 static bool
4562 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4563                                     CallingConv::ID CalleeCC) {
4564   // Tail calls are possible with fastcc and ccc.
4565   auto isTailCallableCC  = [] (CallingConv::ID CC){
4566       return  CC == CallingConv::C || CC == CallingConv::Fast;
4567   };
4568   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4569     return false;
4570 
4571   // We can safely tail call both fastcc and ccc callees from a c calling
4572   // convention caller. If the caller is fastcc, we may have less stack space
4573   // than a non-fastcc caller with the same signature so disable tail-calls in
4574   // that case.
4575   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4576 }
4577 
4578 bool
4579 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4580                                     SDValue Callee,
4581                                     CallingConv::ID CalleeCC,
4582                                     ImmutableCallSite CS,
4583                                     bool isVarArg,
4584                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
4585                                     const SmallVectorImpl<ISD::InputArg> &Ins,
4586                                     SelectionDAG& DAG) const {
4587   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4588 
4589   if (DisableSCO && !TailCallOpt) return false;
4590 
4591   // Variadic argument functions are not supported.
4592   if (isVarArg) return false;
4593 
4594   auto &Caller = DAG.getMachineFunction().getFunction();
4595   // Check that the calling conventions are compatible for tco.
4596   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4597     return false;
4598 
4599   // Caller contains any byval parameter is not supported.
4600   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4601     return false;
4602 
4603   // Callee contains any byval parameter is not supported, too.
4604   // Note: This is a quick work around, because in some cases, e.g.
4605   // caller's stack size > callee's stack size, we are still able to apply
4606   // sibling call optimization. For example, gcc is able to do SCO for caller1
4607   // in the following example, but not for caller2.
4608   //   struct test {
4609   //     long int a;
4610   //     char ary[56];
4611   //   } gTest;
4612   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4613   //     b->a = v.a;
4614   //     return 0;
4615   //   }
4616   //   void caller1(struct test a, struct test c, struct test *b) {
4617   //     callee(gTest, b); }
4618   //   void caller2(struct test *b) { callee(gTest, b); }
4619   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4620     return false;
4621 
4622   // If callee and caller use different calling conventions, we cannot pass
4623   // parameters on stack since offsets for the parameter area may be different.
4624   if (Caller.getCallingConv() != CalleeCC &&
4625       needStackSlotPassParameters(Subtarget, Outs))
4626     return false;
4627 
4628   // No TCO/SCO on indirect call because Caller have to restore its TOC
4629   if (!isFunctionGlobalAddress(Callee) &&
4630       !isa<ExternalSymbolSDNode>(Callee))
4631     return false;
4632 
4633   // If the caller and callee potentially have different TOC bases then we
4634   // cannot tail call since we need to restore the TOC pointer after the call.
4635   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4636   if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4637     return false;
4638 
4639   // TCO allows altering callee ABI, so we don't have to check further.
4640   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4641     return true;
4642 
4643   if (DisableSCO) return false;
4644 
4645   // If callee use the same argument list that caller is using, then we can
4646   // apply SCO on this case. If it is not, then we need to check if callee needs
4647   // stack for passing arguments.
4648   if (!hasSameArgumentList(&Caller, CS) &&
4649       needStackSlotPassParameters(Subtarget, Outs)) {
4650     return false;
4651   }
4652 
4653   return true;
4654 }
4655 
4656 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4657 /// for tail call optimization. Targets which want to do tail call
4658 /// optimization should implement this function.
4659 bool
4660 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4661                                                      CallingConv::ID CalleeCC,
4662                                                      bool isVarArg,
4663                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4664                                                      SelectionDAG& DAG) const {
4665   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4666     return false;
4667 
4668   // Variable argument functions are not supported.
4669   if (isVarArg)
4670     return false;
4671 
4672   MachineFunction &MF = DAG.getMachineFunction();
4673   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4674   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4675     // Functions containing by val parameters are not supported.
4676     for (unsigned i = 0; i != Ins.size(); i++) {
4677        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4678        if (Flags.isByVal()) return false;
4679     }
4680 
4681     // Non-PIC/GOT tail calls are supported.
4682     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4683       return true;
4684 
4685     // At the moment we can only do local tail calls (in same module, hidden
4686     // or protected) if we are generating PIC.
4687     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4688       return G->getGlobal()->hasHiddenVisibility()
4689           || G->getGlobal()->hasProtectedVisibility();
4690   }
4691 
4692   return false;
4693 }
4694 
4695 /// isCallCompatibleAddress - Return the immediate to use if the specified
4696 /// 32-bit value is representable in the immediate field of a BxA instruction.
4697 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4698   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4699   if (!C) return nullptr;
4700 
4701   int Addr = C->getZExtValue();
4702   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4703       SignExtend32<26>(Addr) != Addr)
4704     return nullptr;  // Top 6 bits have to be sext of immediate.
4705 
4706   return DAG
4707       .getConstant(
4708           (int)C->getZExtValue() >> 2, SDLoc(Op),
4709           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4710       .getNode();
4711 }
4712 
4713 namespace {
4714 
4715 struct TailCallArgumentInfo {
4716   SDValue Arg;
4717   SDValue FrameIdxOp;
4718   int FrameIdx = 0;
4719 
4720   TailCallArgumentInfo() = default;
4721 };
4722 
4723 } // end anonymous namespace
4724 
4725 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4726 static void StoreTailCallArgumentsToStackSlot(
4727     SelectionDAG &DAG, SDValue Chain,
4728     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4729     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4730   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4731     SDValue Arg = TailCallArgs[i].Arg;
4732     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4733     int FI = TailCallArgs[i].FrameIdx;
4734     // Store relative to framepointer.
4735     MemOpChains.push_back(DAG.getStore(
4736         Chain, dl, Arg, FIN,
4737         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4738   }
4739 }
4740 
4741 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4742 /// the appropriate stack slot for the tail call optimized function call.
4743 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4744                                              SDValue OldRetAddr, SDValue OldFP,
4745                                              int SPDiff, const SDLoc &dl) {
4746   if (SPDiff) {
4747     // Calculate the new stack slot for the return address.
4748     MachineFunction &MF = DAG.getMachineFunction();
4749     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4750     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4751     bool isPPC64 = Subtarget.isPPC64();
4752     int SlotSize = isPPC64 ? 8 : 4;
4753     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4754     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4755                                                          NewRetAddrLoc, true);
4756     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4757     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4758     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4759                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4760 
4761     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4762     // slot as the FP is never overwritten.
4763     if (Subtarget.isDarwinABI()) {
4764       int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4765       int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4766                                                          true);
4767       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4768       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4769                            MachinePointerInfo::getFixedStack(
4770                                DAG.getMachineFunction(), NewFPIdx));
4771     }
4772   }
4773   return Chain;
4774 }
4775 
4776 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4777 /// the position of the argument.
4778 static void
4779 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4780                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4781                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4782   int Offset = ArgOffset + SPDiff;
4783   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4784   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4785   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4786   SDValue FIN = DAG.getFrameIndex(FI, VT);
4787   TailCallArgumentInfo Info;
4788   Info.Arg = Arg;
4789   Info.FrameIdxOp = FIN;
4790   Info.FrameIdx = FI;
4791   TailCallArguments.push_back(Info);
4792 }
4793 
4794 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4795 /// stack slot. Returns the chain as result and the loaded frame pointers in
4796 /// LROpOut/FPOpout. Used when tail calling.
4797 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4798     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4799     SDValue &FPOpOut, const SDLoc &dl) const {
4800   if (SPDiff) {
4801     // Load the LR and FP stack slot for later adjusting.
4802     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4803     LROpOut = getReturnAddrFrameIndex(DAG);
4804     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4805     Chain = SDValue(LROpOut.getNode(), 1);
4806 
4807     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4808     // slot as the FP is never overwritten.
4809     if (Subtarget.isDarwinABI()) {
4810       FPOpOut = getFramePointerFrameIndex(DAG);
4811       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4812       Chain = SDValue(FPOpOut.getNode(), 1);
4813     }
4814   }
4815   return Chain;
4816 }
4817 
4818 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4819 /// by "Src" to address "Dst" of size "Size".  Alignment information is
4820 /// specified by the specific parameter attribute. The copy will be passed as
4821 /// a byval function parameter.
4822 /// Sometimes what we are copying is the end of a larger object, the part that
4823 /// does not fit in registers.
4824 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4825                                          SDValue Chain, ISD::ArgFlagsTy Flags,
4826                                          SelectionDAG &DAG, const SDLoc &dl) {
4827   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4828   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4829                        false, false, false, MachinePointerInfo(),
4830                        MachinePointerInfo());
4831 }
4832 
4833 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4834 /// tail calls.
4835 static void LowerMemOpCallTo(
4836     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4837     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4838     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4839     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4840   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4841   if (!isTailCall) {
4842     if (isVector) {
4843       SDValue StackPtr;
4844       if (isPPC64)
4845         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4846       else
4847         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4848       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4849                            DAG.getConstant(ArgOffset, dl, PtrVT));
4850     }
4851     MemOpChains.push_back(
4852         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4853     // Calculate and remember argument location.
4854   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4855                                   TailCallArguments);
4856 }
4857 
4858 static void
4859 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4860                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4861                 SDValue FPOp,
4862                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4863   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4864   // might overwrite each other in case of tail call optimization.
4865   SmallVector<SDValue, 8> MemOpChains2;
4866   // Do not flag preceding copytoreg stuff together with the following stuff.
4867   InFlag = SDValue();
4868   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4869                                     MemOpChains2, dl);
4870   if (!MemOpChains2.empty())
4871     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4872 
4873   // Store the return address to the appropriate stack slot.
4874   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4875 
4876   // Emit callseq_end just before tailcall node.
4877   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4878                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4879   InFlag = Chain.getValue(1);
4880 }
4881 
4882 // Is this global address that of a function that can be called by name? (as
4883 // opposed to something that must hold a descriptor for an indirect call).
4884 static bool isFunctionGlobalAddress(SDValue Callee) {
4885   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4886     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4887         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4888       return false;
4889 
4890     return G->getGlobal()->getValueType()->isFunctionTy();
4891   }
4892 
4893   return false;
4894 }
4895 
4896 static unsigned
4897 PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
4898             SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
4899             bool isPatchPoint, bool hasNest,
4900             SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
4901             SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
4902             ImmutableCallSite CS, const PPCSubtarget &Subtarget) {
4903   bool isPPC64 = Subtarget.isPPC64();
4904   bool isSVR4ABI = Subtarget.isSVR4ABI();
4905   bool isELFv2ABI = Subtarget.isELFv2ABI();
4906 
4907   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4908   NodeTys.push_back(MVT::Other);   // Returns a chain
4909   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
4910 
4911   unsigned CallOpc = PPCISD::CALL;
4912 
4913   bool needIndirectCall = true;
4914   if (!isSVR4ABI || !isPPC64)
4915     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
4916       // If this is an absolute destination address, use the munged value.
4917       Callee = SDValue(Dest, 0);
4918       needIndirectCall = false;
4919     }
4920 
4921   // PC-relative references to external symbols should go through $stub, unless
4922   // we're building with the leopard linker or later, which automatically
4923   // synthesizes these stubs.
4924   const TargetMachine &TM = DAG.getTarget();
4925   const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
4926   const GlobalValue *GV = nullptr;
4927   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
4928     GV = G->getGlobal();
4929   bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
4930   bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
4931 
4932   if (isFunctionGlobalAddress(Callee)) {
4933     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
4934     // A call to a TLS address is actually an indirect call to a
4935     // thread-specific pointer.
4936     unsigned OpFlags = 0;
4937     if (UsePlt)
4938       OpFlags = PPCII::MO_PLT;
4939 
4940     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
4941     // every direct call is) turn it into a TargetGlobalAddress /
4942     // TargetExternalSymbol node so that legalize doesn't hack it.
4943     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
4944                                         Callee.getValueType(), 0, OpFlags);
4945     needIndirectCall = false;
4946   }
4947 
4948   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
4949     unsigned char OpFlags = 0;
4950 
4951     if (UsePlt)
4952       OpFlags = PPCII::MO_PLT;
4953 
4954     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
4955                                          OpFlags);
4956     needIndirectCall = false;
4957   }
4958 
4959   if (isPatchPoint) {
4960     // We'll form an invalid direct call when lowering a patchpoint; the full
4961     // sequence for an indirect call is complicated, and many of the
4962     // instructions introduced might have side effects (and, thus, can't be
4963     // removed later). The call itself will be removed as soon as the
4964     // argument/return lowering is complete, so the fact that it has the wrong
4965     // kind of operands should not really matter.
4966     needIndirectCall = false;
4967   }
4968 
4969   if (needIndirectCall) {
4970     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
4971     // to do the call, we can't use PPCISD::CALL.
4972     SDValue MTCTROps[] = {Chain, Callee, InFlag};
4973 
4974     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4975       // Function pointers in the 64-bit SVR4 ABI do not point to the function
4976       // entry point, but to the function descriptor (the function entry point
4977       // address is part of the function descriptor though).
4978       // The function descriptor is a three doubleword structure with the
4979       // following fields: function entry point, TOC base address and
4980       // environment pointer.
4981       // Thus for a call through a function pointer, the following actions need
4982       // to be performed:
4983       //   1. Save the TOC of the caller in the TOC save area of its stack
4984       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
4985       //   2. Load the address of the function entry point from the function
4986       //      descriptor.
4987       //   3. Load the TOC of the callee from the function descriptor into r2.
4988       //   4. Load the environment pointer from the function descriptor into
4989       //      r11.
4990       //   5. Branch to the function entry point address.
4991       //   6. On return of the callee, the TOC of the caller needs to be
4992       //      restored (this is done in FinishCall()).
4993       //
4994       // The loads are scheduled at the beginning of the call sequence, and the
4995       // register copies are flagged together to ensure that no other
4996       // operations can be scheduled in between. E.g. without flagging the
4997       // copies together, a TOC access in the caller could be scheduled between
4998       // the assignment of the callee TOC and the branch to the callee, which
4999       // results in the TOC access going through the TOC of the callee instead
5000       // of going through the TOC of the caller, which leads to incorrect code.
5001 
5002       // Load the address of the function entry point from the function
5003       // descriptor.
5004       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
5005       if (LDChain.getValueType() == MVT::Glue)
5006         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
5007 
5008       auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5009                           ? (MachineMemOperand::MODereferenceable |
5010                              MachineMemOperand::MOInvariant)
5011                           : MachineMemOperand::MONone;
5012 
5013       MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5014       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
5015                                         /* Alignment = */ 8, MMOFlags);
5016 
5017       // Load environment pointer into r11.
5018       SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
5019       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
5020       SDValue LoadEnvPtr =
5021           DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
5022                       /* Alignment = */ 8, MMOFlags);
5023 
5024       SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
5025       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
5026       SDValue TOCPtr =
5027           DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
5028                       /* Alignment = */ 8, MMOFlags);
5029 
5030       setUsesTOCBasePtr(DAG);
5031       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
5032                                         InFlag);
5033       Chain = TOCVal.getValue(0);
5034       InFlag = TOCVal.getValue(1);
5035 
5036       // If the function call has an explicit 'nest' parameter, it takes the
5037       // place of the environment pointer.
5038       if (!hasNest) {
5039         SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
5040                                           InFlag);
5041 
5042         Chain = EnvVal.getValue(0);
5043         InFlag = EnvVal.getValue(1);
5044       }
5045 
5046       MTCTROps[0] = Chain;
5047       MTCTROps[1] = LoadFuncPtr;
5048       MTCTROps[2] = InFlag;
5049     }
5050 
5051     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
5052                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
5053     InFlag = Chain.getValue(1);
5054 
5055     NodeTys.clear();
5056     NodeTys.push_back(MVT::Other);
5057     NodeTys.push_back(MVT::Glue);
5058     Ops.push_back(Chain);
5059     CallOpc = PPCISD::BCTRL;
5060     Callee.setNode(nullptr);
5061     // Add use of X11 (holding environment pointer)
5062     if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
5063       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
5064     // Add CTR register as callee so a bctr can be emitted later.
5065     if (isTailCall)
5066       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
5067   }
5068 
5069   // If this is a direct call, pass the chain and the callee.
5070   if (Callee.getNode()) {
5071     Ops.push_back(Chain);
5072     Ops.push_back(Callee);
5073   }
5074   // If this is a tail call add stack pointer delta.
5075   if (isTailCall)
5076     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5077 
5078   // Add argument registers to the end of the list so that they are known live
5079   // into the call.
5080   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5081     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5082                                   RegsToPass[i].second.getValueType()));
5083 
5084   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
5085   // into the call.
5086   if (isSVR4ABI && isPPC64 && !isPatchPoint) {
5087     setUsesTOCBasePtr(DAG);
5088     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
5089   }
5090 
5091   return CallOpc;
5092 }
5093 
5094 SDValue PPCTargetLowering::LowerCallResult(
5095     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5096     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5097     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5098   SmallVector<CCValAssign, 16> RVLocs;
5099   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5100                     *DAG.getContext());
5101 
5102   CCRetInfo.AnalyzeCallResult(
5103       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5104                ? RetCC_PPC_Cold
5105                : RetCC_PPC);
5106 
5107   // Copy all of the result registers out of their specified physreg.
5108   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5109     CCValAssign &VA = RVLocs[i];
5110     assert(VA.isRegLoc() && "Can only return in registers!");
5111 
5112     SDValue Val = DAG.getCopyFromReg(Chain, dl,
5113                                      VA.getLocReg(), VA.getLocVT(), InFlag);
5114     Chain = Val.getValue(1);
5115     InFlag = Val.getValue(2);
5116 
5117     switch (VA.getLocInfo()) {
5118     default: llvm_unreachable("Unknown loc info!");
5119     case CCValAssign::Full: break;
5120     case CCValAssign::AExt:
5121       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5122       break;
5123     case CCValAssign::ZExt:
5124       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5125                         DAG.getValueType(VA.getValVT()));
5126       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5127       break;
5128     case CCValAssign::SExt:
5129       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5130                         DAG.getValueType(VA.getValVT()));
5131       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5132       break;
5133     }
5134 
5135     InVals.push_back(Val);
5136   }
5137 
5138   return Chain;
5139 }
5140 
5141 SDValue PPCTargetLowering::FinishCall(
5142     CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
5143     bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5144     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
5145     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5146     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5147     SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5148   std::vector<EVT> NodeTys;
5149   SmallVector<SDValue, 8> Ops;
5150   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
5151                                  SPDiff, isTailCall, isPatchPoint, hasNest,
5152                                  RegsToPass, Ops, NodeTys, CS, Subtarget);
5153 
5154   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5155   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
5156     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5157 
5158   // When performing tail call optimization the callee pops its arguments off
5159   // the stack. Account for this here so these bytes can be pushed back on in
5160   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5161   int BytesCalleePops =
5162     (CallConv == CallingConv::Fast &&
5163      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
5164 
5165   // Add a register mask operand representing the call-preserved registers.
5166   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5167   const uint32_t *Mask =
5168       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5169   assert(Mask && "Missing call preserved mask for calling convention");
5170   Ops.push_back(DAG.getRegisterMask(Mask));
5171 
5172   if (InFlag.getNode())
5173     Ops.push_back(InFlag);
5174 
5175   // Emit tail call.
5176   if (isTailCall) {
5177     assert(((Callee.getOpcode() == ISD::Register &&
5178              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5179             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5180             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5181             isa<ConstantSDNode>(Callee)) &&
5182     "Expecting an global address, external symbol, absolute value or register");
5183 
5184     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5185     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
5186   }
5187 
5188   // Add a NOP immediately after the branch instruction when using the 64-bit
5189   // SVR4 ABI. At link time, if caller and callee are in a different module and
5190   // thus have a different TOC, the call will be replaced with a call to a stub
5191   // function which saves the current TOC, loads the TOC of the callee and
5192   // branches to the callee. The NOP will be replaced with a load instruction
5193   // which restores the TOC of the caller from the TOC save slot of the current
5194   // stack frame. If caller and callee belong to the same module (and have the
5195   // same TOC), the NOP will remain unchanged.
5196 
5197   MachineFunction &MF = DAG.getMachineFunction();
5198   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
5199       !isPatchPoint) {
5200     if (CallOpc == PPCISD::BCTRL) {
5201       // This is a call through a function pointer.
5202       // Restore the caller TOC from the save area into R2.
5203       // See PrepareCall() for more information about calls through function
5204       // pointers in the 64-bit SVR4 ABI.
5205       // We are using a target-specific load with r2 hard coded, because the
5206       // result of a target-independent load would never go directly into r2,
5207       // since r2 is a reserved register (which prevents the register allocator
5208       // from allocating it), resulting in an additional register being
5209       // allocated and an unnecessary move instruction being generated.
5210       CallOpc = PPCISD::BCTRL_LOAD_TOC;
5211 
5212       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5213       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
5214       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5215       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5216       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
5217 
5218       // The address needs to go after the chain input but before the flag (or
5219       // any other variadic arguments).
5220       Ops.insert(std::next(Ops.begin()), AddTOC);
5221     } else if (CallOpc == PPCISD::CALL &&
5222       !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
5223       // Otherwise insert NOP for non-local calls.
5224       CallOpc = PPCISD::CALL_NOP;
5225     }
5226   }
5227 
5228   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
5229   InFlag = Chain.getValue(1);
5230 
5231   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5232                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5233                              InFlag, dl);
5234   if (!Ins.empty())
5235     InFlag = Chain.getValue(1);
5236 
5237   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
5238                          Ins, dl, DAG, InVals);
5239 }
5240 
5241 SDValue
5242 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5243                              SmallVectorImpl<SDValue> &InVals) const {
5244   SelectionDAG &DAG                     = CLI.DAG;
5245   SDLoc &dl                             = CLI.DL;
5246   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5247   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5248   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5249   SDValue Chain                         = CLI.Chain;
5250   SDValue Callee                        = CLI.Callee;
5251   bool &isTailCall                      = CLI.IsTailCall;
5252   CallingConv::ID CallConv              = CLI.CallConv;
5253   bool isVarArg                         = CLI.IsVarArg;
5254   bool isPatchPoint                     = CLI.IsPatchPoint;
5255   ImmutableCallSite CS                  = CLI.CS;
5256 
5257   if (isTailCall) {
5258     if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5259       isTailCall = false;
5260     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5261       isTailCall =
5262         IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5263                                                  isVarArg, Outs, Ins, DAG);
5264     else
5265       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5266                                                      Ins, DAG);
5267     if (isTailCall) {
5268       ++NumTailCalls;
5269       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5270         ++NumSiblingCalls;
5271 
5272       assert(isa<GlobalAddressSDNode>(Callee) &&
5273              "Callee should be an llvm::Function object.");
5274       LLVM_DEBUG(
5275           const GlobalValue *GV =
5276               cast<GlobalAddressSDNode>(Callee)->getGlobal();
5277           const unsigned Width =
5278               80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5279           dbgs() << "TCO caller: "
5280                  << left_justify(DAG.getMachineFunction().getName(), Width)
5281                  << ", callee linkage: " << GV->getVisibility() << ", "
5282                  << GV->getLinkage() << "\n");
5283     }
5284   }
5285 
5286   if (!isTailCall && CS && CS.isMustTailCall())
5287     report_fatal_error("failed to perform tail call elimination on a call "
5288                        "site marked musttail");
5289 
5290   // When long calls (i.e. indirect calls) are always used, calls are always
5291   // made via function pointer. If we have a function name, first translate it
5292   // into a pointer.
5293   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5294       !isTailCall)
5295     Callee = LowerGlobalAddress(Callee, DAG);
5296 
5297   if (Subtarget.isSVR4ABI()) {
5298     if (Subtarget.isPPC64())
5299       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5300                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5301                               dl, DAG, InVals, CS);
5302     else
5303       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5304                               isTailCall, isPatchPoint, Outs, OutVals, Ins,
5305                               dl, DAG, InVals, CS);
5306   }
5307 
5308   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5309                           isTailCall, isPatchPoint, Outs, OutVals, Ins,
5310                           dl, DAG, InVals, CS);
5311 }
5312 
5313 SDValue PPCTargetLowering::LowerCall_32SVR4(
5314     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5315     bool isTailCall, bool isPatchPoint,
5316     const SmallVectorImpl<ISD::OutputArg> &Outs,
5317     const SmallVectorImpl<SDValue> &OutVals,
5318     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5319     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5320     ImmutableCallSite CS) const {
5321   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5322   // of the 32-bit SVR4 ABI stack frame layout.
5323 
5324   assert((CallConv == CallingConv::C ||
5325           CallConv == CallingConv::Cold ||
5326           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5327 
5328   unsigned PtrByteSize = 4;
5329 
5330   MachineFunction &MF = DAG.getMachineFunction();
5331 
5332   // Mark this function as potentially containing a function that contains a
5333   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5334   // and restoring the callers stack pointer in this functions epilog. This is
5335   // done because by tail calling the called function might overwrite the value
5336   // in this function's (MF) stack pointer stack slot 0(SP).
5337   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5338       CallConv == CallingConv::Fast)
5339     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5340 
5341   // Count how many bytes are to be pushed on the stack, including the linkage
5342   // area, parameter list area and the part of the local variable space which
5343   // contains copies of aggregates which are passed by value.
5344 
5345   // Assign locations to all of the outgoing arguments.
5346   SmallVector<CCValAssign, 16> ArgLocs;
5347   PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5348 
5349   // Reserve space for the linkage area on the stack.
5350   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5351                        PtrByteSize);
5352   if (useSoftFloat())
5353     CCInfo.PreAnalyzeCallOperands(Outs);
5354 
5355   if (isVarArg) {
5356     // Handle fixed and variable vector arguments differently.
5357     // Fixed vector arguments go into registers as long as registers are
5358     // available. Variable vector arguments always go into memory.
5359     unsigned NumArgs = Outs.size();
5360 
5361     for (unsigned i = 0; i != NumArgs; ++i) {
5362       MVT ArgVT = Outs[i].VT;
5363       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5364       bool Result;
5365 
5366       if (Outs[i].IsFixed) {
5367         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5368                                CCInfo);
5369       } else {
5370         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5371                                       ArgFlags, CCInfo);
5372       }
5373 
5374       if (Result) {
5375 #ifndef NDEBUG
5376         errs() << "Call operand #" << i << " has unhandled type "
5377              << EVT(ArgVT).getEVTString() << "\n";
5378 #endif
5379         llvm_unreachable(nullptr);
5380       }
5381     }
5382   } else {
5383     // All arguments are treated the same.
5384     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5385   }
5386   CCInfo.clearWasPPCF128();
5387 
5388   // Assign locations to all of the outgoing aggregate by value arguments.
5389   SmallVector<CCValAssign, 16> ByValArgLocs;
5390   CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5391 
5392   // Reserve stack space for the allocations in CCInfo.
5393   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5394 
5395   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5396 
5397   // Size of the linkage area, parameter list area and the part of the local
5398   // space variable where copies of aggregates which are passed by value are
5399   // stored.
5400   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5401 
5402   // Calculate by how many bytes the stack has to be adjusted in case of tail
5403   // call optimization.
5404   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5405 
5406   // Adjust the stack pointer for the new arguments...
5407   // These operations are automatically eliminated by the prolog/epilog pass
5408   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5409   SDValue CallSeqStart = Chain;
5410 
5411   // Load the return address and frame pointer so it can be moved somewhere else
5412   // later.
5413   SDValue LROp, FPOp;
5414   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5415 
5416   // Set up a copy of the stack pointer for use loading and storing any
5417   // arguments that may not fit in the registers available for argument
5418   // passing.
5419   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5420 
5421   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5422   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5423   SmallVector<SDValue, 8> MemOpChains;
5424 
5425   bool seenFloatArg = false;
5426   // Walk the register/memloc assignments, inserting copies/loads.
5427   for (unsigned i = 0, j = 0, e = ArgLocs.size();
5428        i != e;
5429        ++i) {
5430     CCValAssign &VA = ArgLocs[i];
5431     SDValue Arg = OutVals[i];
5432     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5433 
5434     if (Flags.isByVal()) {
5435       // Argument is an aggregate which is passed by value, thus we need to
5436       // create a copy of it in the local variable space of the current stack
5437       // frame (which is the stack frame of the caller) and pass the address of
5438       // this copy to the callee.
5439       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5440       CCValAssign &ByValVA = ByValArgLocs[j++];
5441       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5442 
5443       // Memory reserved in the local variable space of the callers stack frame.
5444       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5445 
5446       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5447       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5448                            StackPtr, PtrOff);
5449 
5450       // Create a copy of the argument in the local area of the current
5451       // stack frame.
5452       SDValue MemcpyCall =
5453         CreateCopyOfByValArgument(Arg, PtrOff,
5454                                   CallSeqStart.getNode()->getOperand(0),
5455                                   Flags, DAG, dl);
5456 
5457       // This must go outside the CALLSEQ_START..END.
5458       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5459                                                      SDLoc(MemcpyCall));
5460       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5461                              NewCallSeqStart.getNode());
5462       Chain = CallSeqStart = NewCallSeqStart;
5463 
5464       // Pass the address of the aggregate copy on the stack either in a
5465       // physical register or in the parameter list area of the current stack
5466       // frame to the callee.
5467       Arg = PtrOff;
5468     }
5469 
5470     if (VA.isRegLoc()) {
5471       if (Arg.getValueType() == MVT::i1)
5472         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
5473 
5474       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5475       // Put argument in a physical register.
5476       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5477     } else {
5478       // Put argument in the parameter list area of the current stack frame.
5479       assert(VA.isMemLoc());
5480       unsigned LocMemOffset = VA.getLocMemOffset();
5481 
5482       if (!isTailCall) {
5483         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5484         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5485                              StackPtr, PtrOff);
5486 
5487         MemOpChains.push_back(
5488             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5489       } else {
5490         // Calculate and remember argument location.
5491         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5492                                  TailCallArguments);
5493       }
5494     }
5495   }
5496 
5497   if (!MemOpChains.empty())
5498     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5499 
5500   // Build a sequence of copy-to-reg nodes chained together with token chain
5501   // and flag operands which copy the outgoing args into the appropriate regs.
5502   SDValue InFlag;
5503   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5504     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5505                              RegsToPass[i].second, InFlag);
5506     InFlag = Chain.getValue(1);
5507   }
5508 
5509   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5510   // registers.
5511   if (isVarArg) {
5512     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5513     SDValue Ops[] = { Chain, InFlag };
5514 
5515     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5516                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5517 
5518     InFlag = Chain.getValue(1);
5519   }
5520 
5521   if (isTailCall)
5522     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5523                     TailCallArguments);
5524 
5525   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5526                     /* unused except on PPC64 ELFv1 */ false, DAG,
5527                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5528                     NumBytes, Ins, InVals, CS);
5529 }
5530 
5531 // Copy an argument into memory, being careful to do this outside the
5532 // call sequence for the call to which the argument belongs.
5533 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5534     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5535     SelectionDAG &DAG, const SDLoc &dl) const {
5536   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5537                         CallSeqStart.getNode()->getOperand(0),
5538                         Flags, DAG, dl);
5539   // The MEMCPY must go outside the CALLSEQ_START..END.
5540   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5541   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5542                                                  SDLoc(MemcpyCall));
5543   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5544                          NewCallSeqStart.getNode());
5545   return NewCallSeqStart;
5546 }
5547 
5548 SDValue PPCTargetLowering::LowerCall_64SVR4(
5549     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5550     bool isTailCall, bool isPatchPoint,
5551     const SmallVectorImpl<ISD::OutputArg> &Outs,
5552     const SmallVectorImpl<SDValue> &OutVals,
5553     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5554     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5555     ImmutableCallSite CS) const {
5556   bool isELFv2ABI = Subtarget.isELFv2ABI();
5557   bool isLittleEndian = Subtarget.isLittleEndian();
5558   unsigned NumOps = Outs.size();
5559   bool hasNest = false;
5560   bool IsSibCall = false;
5561 
5562   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5563   unsigned PtrByteSize = 8;
5564 
5565   MachineFunction &MF = DAG.getMachineFunction();
5566 
5567   if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5568     IsSibCall = true;
5569 
5570   // Mark this function as potentially containing a function that contains a
5571   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5572   // and restoring the callers stack pointer in this functions epilog. This is
5573   // done because by tail calling the called function might overwrite the value
5574   // in this function's (MF) stack pointer stack slot 0(SP).
5575   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5576       CallConv == CallingConv::Fast)
5577     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5578 
5579   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5580          "fastcc not supported on varargs functions");
5581 
5582   // Count how many bytes are to be pushed on the stack, including the linkage
5583   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5584   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5585   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5586   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5587   unsigned NumBytes = LinkageSize;
5588   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5589   unsigned &QFPR_idx = FPR_idx;
5590 
5591   static const MCPhysReg GPR[] = {
5592     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5593     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5594   };
5595   static const MCPhysReg VR[] = {
5596     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5597     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5598   };
5599 
5600   const unsigned NumGPRs = array_lengthof(GPR);
5601   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5602   const unsigned NumVRs  = array_lengthof(VR);
5603   const unsigned NumQFPRs = NumFPRs;
5604 
5605   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5606   // can be passed to the callee in registers.
5607   // For the fast calling convention, there is another check below.
5608   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5609   bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5610   if (!HasParameterArea) {
5611     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5612     unsigned AvailableFPRs = NumFPRs;
5613     unsigned AvailableVRs = NumVRs;
5614     unsigned NumBytesTmp = NumBytes;
5615     for (unsigned i = 0; i != NumOps; ++i) {
5616       if (Outs[i].Flags.isNest()) continue;
5617       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5618                                 PtrByteSize, LinkageSize, ParamAreaSize,
5619                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
5620                                 Subtarget.hasQPX()))
5621         HasParameterArea = true;
5622     }
5623   }
5624 
5625   // When using the fast calling convention, we don't provide backing for
5626   // arguments that will be in registers.
5627   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5628 
5629   // Avoid allocating parameter area for fastcc functions if all the arguments
5630   // can be passed in the registers.
5631   if (CallConv == CallingConv::Fast)
5632     HasParameterArea = false;
5633 
5634   // Add up all the space actually used.
5635   for (unsigned i = 0; i != NumOps; ++i) {
5636     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5637     EVT ArgVT = Outs[i].VT;
5638     EVT OrigVT = Outs[i].ArgVT;
5639 
5640     if (Flags.isNest())
5641       continue;
5642 
5643     if (CallConv == CallingConv::Fast) {
5644       if (Flags.isByVal()) {
5645         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5646         if (NumGPRsUsed > NumGPRs)
5647           HasParameterArea = true;
5648       } else {
5649         switch (ArgVT.getSimpleVT().SimpleTy) {
5650         default: llvm_unreachable("Unexpected ValueType for argument!");
5651         case MVT::i1:
5652         case MVT::i32:
5653         case MVT::i64:
5654           if (++NumGPRsUsed <= NumGPRs)
5655             continue;
5656           break;
5657         case MVT::v4i32:
5658         case MVT::v8i16:
5659         case MVT::v16i8:
5660         case MVT::v2f64:
5661         case MVT::v2i64:
5662         case MVT::v1i128:
5663         case MVT::f128:
5664           if (++NumVRsUsed <= NumVRs)
5665             continue;
5666           break;
5667         case MVT::v4f32:
5668           // When using QPX, this is handled like a FP register, otherwise, it
5669           // is an Altivec register.
5670           if (Subtarget.hasQPX()) {
5671             if (++NumFPRsUsed <= NumFPRs)
5672               continue;
5673           } else {
5674             if (++NumVRsUsed <= NumVRs)
5675               continue;
5676           }
5677           break;
5678         case MVT::f32:
5679         case MVT::f64:
5680         case MVT::v4f64: // QPX
5681         case MVT::v4i1:  // QPX
5682           if (++NumFPRsUsed <= NumFPRs)
5683             continue;
5684           break;
5685         }
5686         HasParameterArea = true;
5687       }
5688     }
5689 
5690     /* Respect alignment of argument on the stack.  */
5691     unsigned Align =
5692       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5693     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5694 
5695     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5696     if (Flags.isInConsecutiveRegsLast())
5697       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5698   }
5699 
5700   unsigned NumBytesActuallyUsed = NumBytes;
5701 
5702   // In the old ELFv1 ABI,
5703   // the prolog code of the callee may store up to 8 GPR argument registers to
5704   // the stack, allowing va_start to index over them in memory if its varargs.
5705   // Because we cannot tell if this is needed on the caller side, we have to
5706   // conservatively assume that it is needed.  As such, make sure we have at
5707   // least enough stack space for the caller to store the 8 GPRs.
5708   // In the ELFv2 ABI, we allocate the parameter area iff a callee
5709   // really requires memory operands, e.g. a vararg function.
5710   if (HasParameterArea)
5711     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5712   else
5713     NumBytes = LinkageSize;
5714 
5715   // Tail call needs the stack to be aligned.
5716   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5717       CallConv == CallingConv::Fast)
5718     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5719 
5720   int SPDiff = 0;
5721 
5722   // Calculate by how many bytes the stack has to be adjusted in case of tail
5723   // call optimization.
5724   if (!IsSibCall)
5725     SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5726 
5727   // To protect arguments on the stack from being clobbered in a tail call,
5728   // force all the loads to happen before doing any other lowering.
5729   if (isTailCall)
5730     Chain = DAG.getStackArgumentTokenFactor(Chain);
5731 
5732   // Adjust the stack pointer for the new arguments...
5733   // These operations are automatically eliminated by the prolog/epilog pass
5734   if (!IsSibCall)
5735     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5736   SDValue CallSeqStart = Chain;
5737 
5738   // Load the return address and frame pointer so it can be move somewhere else
5739   // later.
5740   SDValue LROp, FPOp;
5741   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5742 
5743   // Set up a copy of the stack pointer for use loading and storing any
5744   // arguments that may not fit in the registers available for argument
5745   // passing.
5746   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5747 
5748   // Figure out which arguments are going to go in registers, and which in
5749   // memory.  Also, if this is a vararg function, floating point operations
5750   // must be stored to our stack, and loaded into integer regs as well, if
5751   // any integer regs are available for argument passing.
5752   unsigned ArgOffset = LinkageSize;
5753 
5754   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5755   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5756 
5757   SmallVector<SDValue, 8> MemOpChains;
5758   for (unsigned i = 0; i != NumOps; ++i) {
5759     SDValue Arg = OutVals[i];
5760     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5761     EVT ArgVT = Outs[i].VT;
5762     EVT OrigVT = Outs[i].ArgVT;
5763 
5764     // PtrOff will be used to store the current argument to the stack if a
5765     // register cannot be found for it.
5766     SDValue PtrOff;
5767 
5768     // We re-align the argument offset for each argument, except when using the
5769     // fast calling convention, when we need to make sure we do that only when
5770     // we'll actually use a stack slot.
5771     auto ComputePtrOff = [&]() {
5772       /* Respect alignment of argument on the stack.  */
5773       unsigned Align =
5774         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5775       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5776 
5777       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5778 
5779       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5780     };
5781 
5782     if (CallConv != CallingConv::Fast) {
5783       ComputePtrOff();
5784 
5785       /* Compute GPR index associated with argument offset.  */
5786       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5787       GPR_idx = std::min(GPR_idx, NumGPRs);
5788     }
5789 
5790     // Promote integers to 64-bit values.
5791     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5792       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5793       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5794       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5795     }
5796 
5797     // FIXME memcpy is used way more than necessary.  Correctness first.
5798     // Note: "by value" is code for passing a structure by value, not
5799     // basic types.
5800     if (Flags.isByVal()) {
5801       // Note: Size includes alignment padding, so
5802       //   struct x { short a; char b; }
5803       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
5804       // These are the proper values we need for right-justifying the
5805       // aggregate in a parameter register.
5806       unsigned Size = Flags.getByValSize();
5807 
5808       // An empty aggregate parameter takes up no storage and no
5809       // registers.
5810       if (Size == 0)
5811         continue;
5812 
5813       if (CallConv == CallingConv::Fast)
5814         ComputePtrOff();
5815 
5816       // All aggregates smaller than 8 bytes must be passed right-justified.
5817       if (Size==1 || Size==2 || Size==4) {
5818         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
5819         if (GPR_idx != NumGPRs) {
5820           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
5821                                         MachinePointerInfo(), VT);
5822           MemOpChains.push_back(Load.getValue(1));
5823           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5824 
5825           ArgOffset += PtrByteSize;
5826           continue;
5827         }
5828       }
5829 
5830       if (GPR_idx == NumGPRs && Size < 8) {
5831         SDValue AddPtr = PtrOff;
5832         if (!isLittleEndian) {
5833           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
5834                                           PtrOff.getValueType());
5835           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5836         }
5837         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5838                                                           CallSeqStart,
5839                                                           Flags, DAG, dl);
5840         ArgOffset += PtrByteSize;
5841         continue;
5842       }
5843       // Copy entire object into memory.  There are cases where gcc-generated
5844       // code assumes it is there, even if it could be put entirely into
5845       // registers.  (This is not what the doc says.)
5846 
5847       // FIXME: The above statement is likely due to a misunderstanding of the
5848       // documents.  All arguments must be copied into the parameter area BY
5849       // THE CALLEE in the event that the callee takes the address of any
5850       // formal argument.  That has not yet been implemented.  However, it is
5851       // reasonable to use the stack area as a staging area for the register
5852       // load.
5853 
5854       // Skip this for small aggregates, as we will use the same slot for a
5855       // right-justified copy, below.
5856       if (Size >= 8)
5857         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5858                                                           CallSeqStart,
5859                                                           Flags, DAG, dl);
5860 
5861       // When a register is available, pass a small aggregate right-justified.
5862       if (Size < 8 && GPR_idx != NumGPRs) {
5863         // The easiest way to get this right-justified in a register
5864         // is to copy the structure into the rightmost portion of a
5865         // local variable slot, then load the whole slot into the
5866         // register.
5867         // FIXME: The memcpy seems to produce pretty awful code for
5868         // small aggregates, particularly for packed ones.
5869         // FIXME: It would be preferable to use the slot in the
5870         // parameter save area instead of a new local variable.
5871         SDValue AddPtr = PtrOff;
5872         if (!isLittleEndian) {
5873           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
5874           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
5875         }
5876         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5877                                                           CallSeqStart,
5878                                                           Flags, DAG, dl);
5879 
5880         // Load the slot into the register.
5881         SDValue Load =
5882             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
5883         MemOpChains.push_back(Load.getValue(1));
5884         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5885 
5886         // Done with this argument.
5887         ArgOffset += PtrByteSize;
5888         continue;
5889       }
5890 
5891       // For aggregates larger than PtrByteSize, copy the pieces of the
5892       // object that fit into registers from the parameter save area.
5893       for (unsigned j=0; j<Size; j+=PtrByteSize) {
5894         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
5895         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
5896         if (GPR_idx != NumGPRs) {
5897           SDValue Load =
5898               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
5899           MemOpChains.push_back(Load.getValue(1));
5900           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
5901           ArgOffset += PtrByteSize;
5902         } else {
5903           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5904           break;
5905         }
5906       }
5907       continue;
5908     }
5909 
5910     switch (Arg.getSimpleValueType().SimpleTy) {
5911     default: llvm_unreachable("Unexpected ValueType for argument!");
5912     case MVT::i1:
5913     case MVT::i32:
5914     case MVT::i64:
5915       if (Flags.isNest()) {
5916         // The 'nest' parameter, if any, is passed in R11.
5917         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
5918         hasNest = true;
5919         break;
5920       }
5921 
5922       // These can be scalar arguments or elements of an integer array type
5923       // passed directly.  Clang may use those instead of "byval" aggregate
5924       // types to avoid forcing arguments to memory unnecessarily.
5925       if (GPR_idx != NumGPRs) {
5926         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
5927       } else {
5928         if (CallConv == CallingConv::Fast)
5929           ComputePtrOff();
5930 
5931         assert(HasParameterArea &&
5932                "Parameter area must exist to pass an argument in memory.");
5933         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
5934                          true, isTailCall, false, MemOpChains,
5935                          TailCallArguments, dl);
5936         if (CallConv == CallingConv::Fast)
5937           ArgOffset += PtrByteSize;
5938       }
5939       if (CallConv != CallingConv::Fast)
5940         ArgOffset += PtrByteSize;
5941       break;
5942     case MVT::f32:
5943     case MVT::f64: {
5944       // These can be scalar arguments or elements of a float array type
5945       // passed directly.  The latter are used to implement ELFv2 homogenous
5946       // float aggregates.
5947 
5948       // Named arguments go into FPRs first, and once they overflow, the
5949       // remaining arguments go into GPRs and then the parameter save area.
5950       // Unnamed arguments for vararg functions always go to GPRs and
5951       // then the parameter save area.  For now, put all arguments to vararg
5952       // routines always in both locations (FPR *and* GPR or stack slot).
5953       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5954       bool NeededLoad = false;
5955 
5956       // First load the argument into the next available FPR.
5957       if (FPR_idx != NumFPRs)
5958         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
5959 
5960       // Next, load the argument into GPR or stack slot if needed.
5961       if (!NeedGPROrStack)
5962         ;
5963       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
5964         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
5965         // once we support fp <-> gpr moves.
5966 
5967         // In the non-vararg case, this can only ever happen in the
5968         // presence of f32 array types, since otherwise we never run
5969         // out of FPRs before running out of GPRs.
5970         SDValue ArgVal;
5971 
5972         // Double values are always passed in a single GPR.
5973         if (Arg.getValueType() != MVT::f32) {
5974           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
5975 
5976         // Non-array float values are extended and passed in a GPR.
5977         } else if (!Flags.isInConsecutiveRegs()) {
5978           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5979           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5980 
5981         // If we have an array of floats, we collect every odd element
5982         // together with its predecessor into one GPR.
5983         } else if (ArgOffset % PtrByteSize != 0) {
5984           SDValue Lo, Hi;
5985           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
5986           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5987           if (!isLittleEndian)
5988             std::swap(Lo, Hi);
5989           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5990 
5991         // The final element, if even, goes into the first half of a GPR.
5992         } else if (Flags.isInConsecutiveRegsLast()) {
5993           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
5994           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
5995           if (!isLittleEndian)
5996             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
5997                                  DAG.getConstant(32, dl, MVT::i32));
5998 
5999         // Non-final even elements are skipped; they will be handled
6000         // together the with subsequent argument on the next go-around.
6001         } else
6002           ArgVal = SDValue();
6003 
6004         if (ArgVal.getNode())
6005           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6006       } else {
6007         if (CallConv == CallingConv::Fast)
6008           ComputePtrOff();
6009 
6010         // Single-precision floating-point values are mapped to the
6011         // second (rightmost) word of the stack doubleword.
6012         if (Arg.getValueType() == MVT::f32 &&
6013             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6014           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6015           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6016         }
6017 
6018         assert(HasParameterArea &&
6019                "Parameter area must exist to pass an argument in memory.");
6020         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6021                          true, isTailCall, false, MemOpChains,
6022                          TailCallArguments, dl);
6023 
6024         NeededLoad = true;
6025       }
6026       // When passing an array of floats, the array occupies consecutive
6027       // space in the argument area; only round up to the next doubleword
6028       // at the end of the array.  Otherwise, each float takes 8 bytes.
6029       if (CallConv != CallingConv::Fast || NeededLoad) {
6030         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6031                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6032         if (Flags.isInConsecutiveRegsLast())
6033           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6034       }
6035       break;
6036     }
6037     case MVT::v4f32:
6038     case MVT::v4i32:
6039     case MVT::v8i16:
6040     case MVT::v16i8:
6041     case MVT::v2f64:
6042     case MVT::v2i64:
6043     case MVT::v1i128:
6044     case MVT::f128:
6045       if (!Subtarget.hasQPX()) {
6046       // These can be scalar arguments or elements of a vector array type
6047       // passed directly.  The latter are used to implement ELFv2 homogenous
6048       // vector aggregates.
6049 
6050       // For a varargs call, named arguments go into VRs or on the stack as
6051       // usual; unnamed arguments always go to the stack or the corresponding
6052       // GPRs when within range.  For now, we always put the value in both
6053       // locations (or even all three).
6054       if (isVarArg) {
6055         assert(HasParameterArea &&
6056                "Parameter area must exist if we have a varargs call.");
6057         // We could elide this store in the case where the object fits
6058         // entirely in R registers.  Maybe later.
6059         SDValue Store =
6060             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6061         MemOpChains.push_back(Store);
6062         if (VR_idx != NumVRs) {
6063           SDValue Load =
6064               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6065           MemOpChains.push_back(Load.getValue(1));
6066           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6067         }
6068         ArgOffset += 16;
6069         for (unsigned i=0; i<16; i+=PtrByteSize) {
6070           if (GPR_idx == NumGPRs)
6071             break;
6072           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6073                                    DAG.getConstant(i, dl, PtrVT));
6074           SDValue Load =
6075               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6076           MemOpChains.push_back(Load.getValue(1));
6077           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6078         }
6079         break;
6080       }
6081 
6082       // Non-varargs Altivec params go into VRs or on the stack.
6083       if (VR_idx != NumVRs) {
6084         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6085       } else {
6086         if (CallConv == CallingConv::Fast)
6087           ComputePtrOff();
6088 
6089         assert(HasParameterArea &&
6090                "Parameter area must exist to pass an argument in memory.");
6091         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6092                          true, isTailCall, true, MemOpChains,
6093                          TailCallArguments, dl);
6094         if (CallConv == CallingConv::Fast)
6095           ArgOffset += 16;
6096       }
6097 
6098       if (CallConv != CallingConv::Fast)
6099         ArgOffset += 16;
6100       break;
6101       } // not QPX
6102 
6103       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6104              "Invalid QPX parameter type");
6105 
6106       /* fall through */
6107     case MVT::v4f64:
6108     case MVT::v4i1: {
6109       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6110       if (isVarArg) {
6111         assert(HasParameterArea &&
6112                "Parameter area must exist if we have a varargs call.");
6113         // We could elide this store in the case where the object fits
6114         // entirely in R registers.  Maybe later.
6115         SDValue Store =
6116             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6117         MemOpChains.push_back(Store);
6118         if (QFPR_idx != NumQFPRs) {
6119           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6120                                      PtrOff, MachinePointerInfo());
6121           MemOpChains.push_back(Load.getValue(1));
6122           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6123         }
6124         ArgOffset += (IsF32 ? 16 : 32);
6125         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6126           if (GPR_idx == NumGPRs)
6127             break;
6128           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6129                                    DAG.getConstant(i, dl, PtrVT));
6130           SDValue Load =
6131               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6132           MemOpChains.push_back(Load.getValue(1));
6133           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6134         }
6135         break;
6136       }
6137 
6138       // Non-varargs QPX params go into registers or on the stack.
6139       if (QFPR_idx != NumQFPRs) {
6140         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6141       } else {
6142         if (CallConv == CallingConv::Fast)
6143           ComputePtrOff();
6144 
6145         assert(HasParameterArea &&
6146                "Parameter area must exist to pass an argument in memory.");
6147         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6148                          true, isTailCall, true, MemOpChains,
6149                          TailCallArguments, dl);
6150         if (CallConv == CallingConv::Fast)
6151           ArgOffset += (IsF32 ? 16 : 32);
6152       }
6153 
6154       if (CallConv != CallingConv::Fast)
6155         ArgOffset += (IsF32 ? 16 : 32);
6156       break;
6157       }
6158     }
6159   }
6160 
6161   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6162          "mismatch in size of parameter area");
6163   (void)NumBytesActuallyUsed;
6164 
6165   if (!MemOpChains.empty())
6166     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6167 
6168   // Check if this is an indirect call (MTCTR/BCTRL).
6169   // See PrepareCall() for more information about calls through function
6170   // pointers in the 64-bit SVR4 ABI.
6171   if (!isTailCall && !isPatchPoint &&
6172       !isFunctionGlobalAddress(Callee) &&
6173       !isa<ExternalSymbolSDNode>(Callee)) {
6174     // Load r2 into a virtual register and store it to the TOC save area.
6175     setUsesTOCBasePtr(DAG);
6176     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6177     // TOC save area offset.
6178     unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6179     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6180     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6181     Chain = DAG.getStore(
6182         Val.getValue(1), dl, Val, AddPtr,
6183         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6184     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6185     // This does not mean the MTCTR instruction must use R12; it's easier
6186     // to model this as an extra parameter, so do that.
6187     if (isELFv2ABI && !isPatchPoint)
6188       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6189   }
6190 
6191   // Build a sequence of copy-to-reg nodes chained together with token chain
6192   // and flag operands which copy the outgoing args into the appropriate regs.
6193   SDValue InFlag;
6194   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6195     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6196                              RegsToPass[i].second, InFlag);
6197     InFlag = Chain.getValue(1);
6198   }
6199 
6200   if (isTailCall && !IsSibCall)
6201     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6202                     TailCallArguments);
6203 
6204   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6205                     DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6206                     SPDiff, NumBytes, Ins, InVals, CS);
6207 }
6208 
6209 SDValue PPCTargetLowering::LowerCall_Darwin(
6210     SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6211     bool isTailCall, bool isPatchPoint,
6212     const SmallVectorImpl<ISD::OutputArg> &Outs,
6213     const SmallVectorImpl<SDValue> &OutVals,
6214     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6215     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6216     ImmutableCallSite CS) const {
6217   unsigned NumOps = Outs.size();
6218 
6219   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6220   bool isPPC64 = PtrVT == MVT::i64;
6221   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6222 
6223   MachineFunction &MF = DAG.getMachineFunction();
6224 
6225   // Mark this function as potentially containing a function that contains a
6226   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6227   // and restoring the callers stack pointer in this functions epilog. This is
6228   // done because by tail calling the called function might overwrite the value
6229   // in this function's (MF) stack pointer stack slot 0(SP).
6230   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6231       CallConv == CallingConv::Fast)
6232     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6233 
6234   // Count how many bytes are to be pushed on the stack, including the linkage
6235   // area, and parameter passing area.  We start with 24/48 bytes, which is
6236   // prereserved space for [SP][CR][LR][3 x unused].
6237   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6238   unsigned NumBytes = LinkageSize;
6239 
6240   // Add up all the space actually used.
6241   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6242   // they all go in registers, but we must reserve stack space for them for
6243   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6244   // assigned stack space in order, with padding so Altivec parameters are
6245   // 16-byte aligned.
6246   unsigned nAltivecParamsAtEnd = 0;
6247   for (unsigned i = 0; i != NumOps; ++i) {
6248     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6249     EVT ArgVT = Outs[i].VT;
6250     // Varargs Altivec parameters are padded to a 16 byte boundary.
6251     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6252         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6253         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6254       if (!isVarArg && !isPPC64) {
6255         // Non-varargs Altivec parameters go after all the non-Altivec
6256         // parameters; handle those later so we know how much padding we need.
6257         nAltivecParamsAtEnd++;
6258         continue;
6259       }
6260       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6261       NumBytes = ((NumBytes+15)/16)*16;
6262     }
6263     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6264   }
6265 
6266   // Allow for Altivec parameters at the end, if needed.
6267   if (nAltivecParamsAtEnd) {
6268     NumBytes = ((NumBytes+15)/16)*16;
6269     NumBytes += 16*nAltivecParamsAtEnd;
6270   }
6271 
6272   // The prolog code of the callee may store up to 8 GPR argument registers to
6273   // the stack, allowing va_start to index over them in memory if its varargs.
6274   // Because we cannot tell if this is needed on the caller side, we have to
6275   // conservatively assume that it is needed.  As such, make sure we have at
6276   // least enough stack space for the caller to store the 8 GPRs.
6277   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6278 
6279   // Tail call needs the stack to be aligned.
6280   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6281       CallConv == CallingConv::Fast)
6282     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6283 
6284   // Calculate by how many bytes the stack has to be adjusted in case of tail
6285   // call optimization.
6286   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6287 
6288   // To protect arguments on the stack from being clobbered in a tail call,
6289   // force all the loads to happen before doing any other lowering.
6290   if (isTailCall)
6291     Chain = DAG.getStackArgumentTokenFactor(Chain);
6292 
6293   // Adjust the stack pointer for the new arguments...
6294   // These operations are automatically eliminated by the prolog/epilog pass
6295   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6296   SDValue CallSeqStart = Chain;
6297 
6298   // Load the return address and frame pointer so it can be move somewhere else
6299   // later.
6300   SDValue LROp, FPOp;
6301   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6302 
6303   // Set up a copy of the stack pointer for use loading and storing any
6304   // arguments that may not fit in the registers available for argument
6305   // passing.
6306   SDValue StackPtr;
6307   if (isPPC64)
6308     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6309   else
6310     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6311 
6312   // Figure out which arguments are going to go in registers, and which in
6313   // memory.  Also, if this is a vararg function, floating point operations
6314   // must be stored to our stack, and loaded into integer regs as well, if
6315   // any integer regs are available for argument passing.
6316   unsigned ArgOffset = LinkageSize;
6317   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6318 
6319   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6320     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6321     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6322   };
6323   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6324     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6325     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6326   };
6327   static const MCPhysReg VR[] = {
6328     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6329     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6330   };
6331   const unsigned NumGPRs = array_lengthof(GPR_32);
6332   const unsigned NumFPRs = 13;
6333   const unsigned NumVRs  = array_lengthof(VR);
6334 
6335   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6336 
6337   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6338   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6339 
6340   SmallVector<SDValue, 8> MemOpChains;
6341   for (unsigned i = 0; i != NumOps; ++i) {
6342     SDValue Arg = OutVals[i];
6343     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6344 
6345     // PtrOff will be used to store the current argument to the stack if a
6346     // register cannot be found for it.
6347     SDValue PtrOff;
6348 
6349     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6350 
6351     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6352 
6353     // On PPC64, promote integers to 64-bit values.
6354     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6355       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6356       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6357       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6358     }
6359 
6360     // FIXME memcpy is used way more than necessary.  Correctness first.
6361     // Note: "by value" is code for passing a structure by value, not
6362     // basic types.
6363     if (Flags.isByVal()) {
6364       unsigned Size = Flags.getByValSize();
6365       // Very small objects are passed right-justified.  Everything else is
6366       // passed left-justified.
6367       if (Size==1 || Size==2) {
6368         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6369         if (GPR_idx != NumGPRs) {
6370           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6371                                         MachinePointerInfo(), VT);
6372           MemOpChains.push_back(Load.getValue(1));
6373           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6374 
6375           ArgOffset += PtrByteSize;
6376         } else {
6377           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6378                                           PtrOff.getValueType());
6379           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6380           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6381                                                             CallSeqStart,
6382                                                             Flags, DAG, dl);
6383           ArgOffset += PtrByteSize;
6384         }
6385         continue;
6386       }
6387       // Copy entire object into memory.  There are cases where gcc-generated
6388       // code assumes it is there, even if it could be put entirely into
6389       // registers.  (This is not what the doc says.)
6390       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6391                                                         CallSeqStart,
6392                                                         Flags, DAG, dl);
6393 
6394       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6395       // copy the pieces of the object that fit into registers from the
6396       // parameter save area.
6397       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6398         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6399         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6400         if (GPR_idx != NumGPRs) {
6401           SDValue Load =
6402               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6403           MemOpChains.push_back(Load.getValue(1));
6404           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6405           ArgOffset += PtrByteSize;
6406         } else {
6407           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6408           break;
6409         }
6410       }
6411       continue;
6412     }
6413 
6414     switch (Arg.getSimpleValueType().SimpleTy) {
6415     default: llvm_unreachable("Unexpected ValueType for argument!");
6416     case MVT::i1:
6417     case MVT::i32:
6418     case MVT::i64:
6419       if (GPR_idx != NumGPRs) {
6420         if (Arg.getValueType() == MVT::i1)
6421           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6422 
6423         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6424       } else {
6425         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6426                          isPPC64, isTailCall, false, MemOpChains,
6427                          TailCallArguments, dl);
6428       }
6429       ArgOffset += PtrByteSize;
6430       break;
6431     case MVT::f32:
6432     case MVT::f64:
6433       if (FPR_idx != NumFPRs) {
6434         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6435 
6436         if (isVarArg) {
6437           SDValue Store =
6438               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6439           MemOpChains.push_back(Store);
6440 
6441           // Float varargs are always shadowed in available integer registers
6442           if (GPR_idx != NumGPRs) {
6443             SDValue Load =
6444                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6445             MemOpChains.push_back(Load.getValue(1));
6446             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6447           }
6448           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6449             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6450             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6451             SDValue Load =
6452                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6453             MemOpChains.push_back(Load.getValue(1));
6454             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6455           }
6456         } else {
6457           // If we have any FPRs remaining, we may also have GPRs remaining.
6458           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6459           // GPRs.
6460           if (GPR_idx != NumGPRs)
6461             ++GPR_idx;
6462           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6463               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6464             ++GPR_idx;
6465         }
6466       } else
6467         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6468                          isPPC64, isTailCall, false, MemOpChains,
6469                          TailCallArguments, dl);
6470       if (isPPC64)
6471         ArgOffset += 8;
6472       else
6473         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6474       break;
6475     case MVT::v4f32:
6476     case MVT::v4i32:
6477     case MVT::v8i16:
6478     case MVT::v16i8:
6479       if (isVarArg) {
6480         // These go aligned on the stack, or in the corresponding R registers
6481         // when within range.  The Darwin PPC ABI doc claims they also go in
6482         // V registers; in fact gcc does this only for arguments that are
6483         // prototyped, not for those that match the ...  We do it for all
6484         // arguments, seems to work.
6485         while (ArgOffset % 16 !=0) {
6486           ArgOffset += PtrByteSize;
6487           if (GPR_idx != NumGPRs)
6488             GPR_idx++;
6489         }
6490         // We could elide this store in the case where the object fits
6491         // entirely in R registers.  Maybe later.
6492         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6493                              DAG.getConstant(ArgOffset, dl, PtrVT));
6494         SDValue Store =
6495             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6496         MemOpChains.push_back(Store);
6497         if (VR_idx != NumVRs) {
6498           SDValue Load =
6499               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6500           MemOpChains.push_back(Load.getValue(1));
6501           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6502         }
6503         ArgOffset += 16;
6504         for (unsigned i=0; i<16; i+=PtrByteSize) {
6505           if (GPR_idx == NumGPRs)
6506             break;
6507           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6508                                    DAG.getConstant(i, dl, PtrVT));
6509           SDValue Load =
6510               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6511           MemOpChains.push_back(Load.getValue(1));
6512           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6513         }
6514         break;
6515       }
6516 
6517       // Non-varargs Altivec params generally go in registers, but have
6518       // stack space allocated at the end.
6519       if (VR_idx != NumVRs) {
6520         // Doesn't have GPR space allocated.
6521         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6522       } else if (nAltivecParamsAtEnd==0) {
6523         // We are emitting Altivec params in order.
6524         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6525                          isPPC64, isTailCall, true, MemOpChains,
6526                          TailCallArguments, dl);
6527         ArgOffset += 16;
6528       }
6529       break;
6530     }
6531   }
6532   // If all Altivec parameters fit in registers, as they usually do,
6533   // they get stack space following the non-Altivec parameters.  We
6534   // don't track this here because nobody below needs it.
6535   // If there are more Altivec parameters than fit in registers emit
6536   // the stores here.
6537   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6538     unsigned j = 0;
6539     // Offset is aligned; skip 1st 12 params which go in V registers.
6540     ArgOffset = ((ArgOffset+15)/16)*16;
6541     ArgOffset += 12*16;
6542     for (unsigned i = 0; i != NumOps; ++i) {
6543       SDValue Arg = OutVals[i];
6544       EVT ArgType = Outs[i].VT;
6545       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6546           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6547         if (++j > NumVRs) {
6548           SDValue PtrOff;
6549           // We are emitting Altivec params in order.
6550           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6551                            isPPC64, isTailCall, true, MemOpChains,
6552                            TailCallArguments, dl);
6553           ArgOffset += 16;
6554         }
6555       }
6556     }
6557   }
6558 
6559   if (!MemOpChains.empty())
6560     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6561 
6562   // On Darwin, R12 must contain the address of an indirect callee.  This does
6563   // not mean the MTCTR instruction must use R12; it's easier to model this as
6564   // an extra parameter, so do that.
6565   if (!isTailCall &&
6566       !isFunctionGlobalAddress(Callee) &&
6567       !isa<ExternalSymbolSDNode>(Callee) &&
6568       !isBLACompatibleAddress(Callee, DAG))
6569     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6570                                                    PPC::R12), Callee));
6571 
6572   // Build a sequence of copy-to-reg nodes chained together with token chain
6573   // and flag operands which copy the outgoing args into the appropriate regs.
6574   SDValue InFlag;
6575   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6576     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6577                              RegsToPass[i].second, InFlag);
6578     InFlag = Chain.getValue(1);
6579   }
6580 
6581   if (isTailCall)
6582     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6583                     TailCallArguments);
6584 
6585   return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6586                     /* unused except on PPC64 ELFv1 */ false, DAG,
6587                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6588                     NumBytes, Ins, InVals, CS);
6589 }
6590 
6591 bool
6592 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
6593                                   MachineFunction &MF, bool isVarArg,
6594                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
6595                                   LLVMContext &Context) const {
6596   SmallVector<CCValAssign, 16> RVLocs;
6597   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6598   return CCInfo.CheckReturn(
6599       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6600                 ? RetCC_PPC_Cold
6601                 : RetCC_PPC);
6602 }
6603 
6604 SDValue
6605 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6606                                bool isVarArg,
6607                                const SmallVectorImpl<ISD::OutputArg> &Outs,
6608                                const SmallVectorImpl<SDValue> &OutVals,
6609                                const SDLoc &dl, SelectionDAG &DAG) const {
6610   SmallVector<CCValAssign, 16> RVLocs;
6611   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6612                  *DAG.getContext());
6613   CCInfo.AnalyzeReturn(Outs,
6614                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
6615                            ? RetCC_PPC_Cold
6616                            : RetCC_PPC);
6617 
6618   SDValue Flag;
6619   SmallVector<SDValue, 4> RetOps(1, Chain);
6620 
6621   // Copy the result values into the output registers.
6622   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6623     CCValAssign &VA = RVLocs[i];
6624     assert(VA.isRegLoc() && "Can only return in registers!");
6625 
6626     SDValue Arg = OutVals[i];
6627 
6628     switch (VA.getLocInfo()) {
6629     default: llvm_unreachable("Unknown loc info!");
6630     case CCValAssign::Full: break;
6631     case CCValAssign::AExt:
6632       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
6633       break;
6634     case CCValAssign::ZExt:
6635       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
6636       break;
6637     case CCValAssign::SExt:
6638       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
6639       break;
6640     }
6641 
6642     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
6643     Flag = Chain.getValue(1);
6644     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6645   }
6646 
6647   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
6648   const MCPhysReg *I =
6649     TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
6650   if (I) {
6651     for (; *I; ++I) {
6652 
6653       if (PPC::G8RCRegClass.contains(*I))
6654         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
6655       else if (PPC::F8RCRegClass.contains(*I))
6656         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
6657       else if (PPC::CRRCRegClass.contains(*I))
6658         RetOps.push_back(DAG.getRegister(*I, MVT::i1));
6659       else if (PPC::VRRCRegClass.contains(*I))
6660         RetOps.push_back(DAG.getRegister(*I, MVT::Other));
6661       else
6662         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
6663     }
6664   }
6665 
6666   RetOps[0] = Chain;  // Update chain.
6667 
6668   // Add the flag if we have it.
6669   if (Flag.getNode())
6670     RetOps.push_back(Flag);
6671 
6672   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
6673 }
6674 
6675 SDValue
6676 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
6677                                                 SelectionDAG &DAG) const {
6678   SDLoc dl(Op);
6679 
6680   // Get the correct type for integers.
6681   EVT IntVT = Op.getValueType();
6682 
6683   // Get the inputs.
6684   SDValue Chain = Op.getOperand(0);
6685   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6686   // Build a DYNAREAOFFSET node.
6687   SDValue Ops[2] = {Chain, FPSIdx};
6688   SDVTList VTs = DAG.getVTList(IntVT);
6689   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
6690 }
6691 
6692 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
6693                                              SelectionDAG &DAG) const {
6694   // When we pop the dynamic allocation we need to restore the SP link.
6695   SDLoc dl(Op);
6696 
6697   // Get the correct type for pointers.
6698   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6699 
6700   // Construct the stack pointer operand.
6701   bool isPPC64 = Subtarget.isPPC64();
6702   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6703   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
6704 
6705   // Get the operands for the STACKRESTORE.
6706   SDValue Chain = Op.getOperand(0);
6707   SDValue SaveSP = Op.getOperand(1);
6708 
6709   // Load the old link SP.
6710   SDValue LoadLinkSP =
6711       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
6712 
6713   // Restore the stack pointer.
6714   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
6715 
6716   // Store the old link SP.
6717   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
6718 }
6719 
6720 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
6721   MachineFunction &MF = DAG.getMachineFunction();
6722   bool isPPC64 = Subtarget.isPPC64();
6723   EVT PtrVT = getPointerTy(MF.getDataLayout());
6724 
6725   // Get current frame pointer save index.  The users of this index will be
6726   // primarily DYNALLOC instructions.
6727   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6728   int RASI = FI->getReturnAddrSaveIndex();
6729 
6730   // If the frame pointer save index hasn't been defined yet.
6731   if (!RASI) {
6732     // Find out what the fix offset of the frame pointer save area.
6733     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
6734     // Allocate the frame index for frame pointer save area.
6735     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
6736     // Save the result.
6737     FI->setReturnAddrSaveIndex(RASI);
6738   }
6739   return DAG.getFrameIndex(RASI, PtrVT);
6740 }
6741 
6742 SDValue
6743 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
6744   MachineFunction &MF = DAG.getMachineFunction();
6745   bool isPPC64 = Subtarget.isPPC64();
6746   EVT PtrVT = getPointerTy(MF.getDataLayout());
6747 
6748   // Get current frame pointer save index.  The users of this index will be
6749   // primarily DYNALLOC instructions.
6750   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
6751   int FPSI = FI->getFramePointerSaveIndex();
6752 
6753   // If the frame pointer save index hasn't been defined yet.
6754   if (!FPSI) {
6755     // Find out what the fix offset of the frame pointer save area.
6756     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
6757     // Allocate the frame index for frame pointer save area.
6758     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
6759     // Save the result.
6760     FI->setFramePointerSaveIndex(FPSI);
6761   }
6762   return DAG.getFrameIndex(FPSI, PtrVT);
6763 }
6764 
6765 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
6766                                                    SelectionDAG &DAG) const {
6767   // Get the inputs.
6768   SDValue Chain = Op.getOperand(0);
6769   SDValue Size  = Op.getOperand(1);
6770   SDLoc dl(Op);
6771 
6772   // Get the correct type for pointers.
6773   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6774   // Negate the size.
6775   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
6776                                 DAG.getConstant(0, dl, PtrVT), Size);
6777   // Construct a node for the frame pointer save index.
6778   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6779   // Build a DYNALLOC node.
6780   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6781   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
6782   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
6783 }
6784 
6785 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
6786                                                      SelectionDAG &DAG) const {
6787   MachineFunction &MF = DAG.getMachineFunction();
6788 
6789   bool isPPC64 = Subtarget.isPPC64();
6790   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6791 
6792   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
6793   return DAG.getFrameIndex(FI, PtrVT);
6794 }
6795 
6796 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
6797                                                SelectionDAG &DAG) const {
6798   SDLoc DL(Op);
6799   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
6800                      DAG.getVTList(MVT::i32, MVT::Other),
6801                      Op.getOperand(0), Op.getOperand(1));
6802 }
6803 
6804 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
6805                                                 SelectionDAG &DAG) const {
6806   SDLoc DL(Op);
6807   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
6808                      Op.getOperand(0), Op.getOperand(1));
6809 }
6810 
6811 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6812   if (Op.getValueType().isVector())
6813     return LowerVectorLoad(Op, DAG);
6814 
6815   assert(Op.getValueType() == MVT::i1 &&
6816          "Custom lowering only for i1 loads");
6817 
6818   // First, load 8 bits into 32 bits, then truncate to 1 bit.
6819 
6820   SDLoc dl(Op);
6821   LoadSDNode *LD = cast<LoadSDNode>(Op);
6822 
6823   SDValue Chain = LD->getChain();
6824   SDValue BasePtr = LD->getBasePtr();
6825   MachineMemOperand *MMO = LD->getMemOperand();
6826 
6827   SDValue NewLD =
6828       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
6829                      BasePtr, MVT::i8, MMO);
6830   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
6831 
6832   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
6833   return DAG.getMergeValues(Ops, dl);
6834 }
6835 
6836 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6837   if (Op.getOperand(1).getValueType().isVector())
6838     return LowerVectorStore(Op, DAG);
6839 
6840   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
6841          "Custom lowering only for i1 stores");
6842 
6843   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
6844 
6845   SDLoc dl(Op);
6846   StoreSDNode *ST = cast<StoreSDNode>(Op);
6847 
6848   SDValue Chain = ST->getChain();
6849   SDValue BasePtr = ST->getBasePtr();
6850   SDValue Value = ST->getValue();
6851   MachineMemOperand *MMO = ST->getMemOperand();
6852 
6853   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
6854                       Value);
6855   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
6856 }
6857 
6858 // FIXME: Remove this once the ANDI glue bug is fixed:
6859 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
6860   assert(Op.getValueType() == MVT::i1 &&
6861          "Custom lowering only for i1 results");
6862 
6863   SDLoc DL(Op);
6864   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
6865                      Op.getOperand(0));
6866 }
6867 
6868 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
6869 /// possible.
6870 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
6871   // Not FP? Not a fsel.
6872   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
6873       !Op.getOperand(2).getValueType().isFloatingPoint())
6874     return Op;
6875 
6876   // We might be able to do better than this under some circumstances, but in
6877   // general, fsel-based lowering of select is a finite-math-only optimization.
6878   // For more information, see section F.3 of the 2.06 ISA specification.
6879   if (!DAG.getTarget().Options.NoInfsFPMath ||
6880       !DAG.getTarget().Options.NoNaNsFPMath)
6881     return Op;
6882   // TODO: Propagate flags from the select rather than global settings.
6883   SDNodeFlags Flags;
6884   Flags.setNoInfs(true);
6885   Flags.setNoNaNs(true);
6886 
6887   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
6888 
6889   EVT ResVT = Op.getValueType();
6890   EVT CmpVT = Op.getOperand(0).getValueType();
6891   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
6892   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
6893   SDLoc dl(Op);
6894 
6895   // If the RHS of the comparison is a 0.0, we don't need to do the
6896   // subtraction at all.
6897   SDValue Sel1;
6898   if (isFloatingPointZero(RHS))
6899     switch (CC) {
6900     default: break;       // SETUO etc aren't handled by fsel.
6901     case ISD::SETNE:
6902       std::swap(TV, FV);
6903       LLVM_FALLTHROUGH;
6904     case ISD::SETEQ:
6905       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6906         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6907       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6908       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6909         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6910       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6911                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
6912     case ISD::SETULT:
6913     case ISD::SETLT:
6914       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6915       LLVM_FALLTHROUGH;
6916     case ISD::SETOGE:
6917     case ISD::SETGE:
6918       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6919         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6920       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
6921     case ISD::SETUGT:
6922     case ISD::SETGT:
6923       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
6924       LLVM_FALLTHROUGH;
6925     case ISD::SETOLE:
6926     case ISD::SETLE:
6927       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
6928         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
6929       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6930                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
6931     }
6932 
6933   SDValue Cmp;
6934   switch (CC) {
6935   default: break;       // SETUO etc aren't handled by fsel.
6936   case ISD::SETNE:
6937     std::swap(TV, FV);
6938     LLVM_FALLTHROUGH;
6939   case ISD::SETEQ:
6940     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6941     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6942       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6943     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6944     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
6945       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
6946     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
6947                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
6948   case ISD::SETULT:
6949   case ISD::SETLT:
6950     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6951     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6952       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6953     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6954   case ISD::SETOGE:
6955   case ISD::SETGE:
6956     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
6957     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6958       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6959     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6960   case ISD::SETUGT:
6961   case ISD::SETGT:
6962     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6963     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6964       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6965     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
6966   case ISD::SETOLE:
6967   case ISD::SETLE:
6968     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
6969     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
6970       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
6971     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
6972   }
6973   return Op;
6974 }
6975 
6976 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
6977                                                SelectionDAG &DAG,
6978                                                const SDLoc &dl) const {
6979   assert(Op.getOperand(0).getValueType().isFloatingPoint());
6980   SDValue Src = Op.getOperand(0);
6981   if (Src.getValueType() == MVT::f32)
6982     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
6983 
6984   SDValue Tmp;
6985   switch (Op.getSimpleValueType().SimpleTy) {
6986   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
6987   case MVT::i32:
6988     Tmp = DAG.getNode(
6989         Op.getOpcode() == ISD::FP_TO_SINT
6990             ? PPCISD::FCTIWZ
6991             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
6992         dl, MVT::f64, Src);
6993     break;
6994   case MVT::i64:
6995     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
6996            "i64 FP_TO_UINT is supported only with FPCVT");
6997     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
6998                                                         PPCISD::FCTIDUZ,
6999                       dl, MVT::f64, Src);
7000     break;
7001   }
7002 
7003   // Convert the FP value to an int value through memory.
7004   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7005     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7006   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7007   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7008   MachinePointerInfo MPI =
7009       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7010 
7011   // Emit a store to the stack slot.
7012   SDValue Chain;
7013   if (i32Stack) {
7014     MachineFunction &MF = DAG.getMachineFunction();
7015     MachineMemOperand *MMO =
7016       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
7017     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7018     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7019               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7020   } else
7021     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
7022 
7023   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
7024   // add in a bias on big endian.
7025   if (Op.getValueType() == MVT::i32 && !i32Stack) {
7026     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7027                         DAG.getConstant(4, dl, FIPtr.getValueType()));
7028     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7029   }
7030 
7031   RLI.Chain = Chain;
7032   RLI.Ptr = FIPtr;
7033   RLI.MPI = MPI;
7034 }
7035 
7036 /// Custom lowers floating point to integer conversions to use
7037 /// the direct move instructions available in ISA 2.07 to avoid the
7038 /// need for load/store combinations.
7039 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7040                                                     SelectionDAG &DAG,
7041                                                     const SDLoc &dl) const {
7042   assert(Op.getOperand(0).getValueType().isFloatingPoint());
7043   SDValue Src = Op.getOperand(0);
7044 
7045   if (Src.getValueType() == MVT::f32)
7046     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7047 
7048   SDValue Tmp;
7049   switch (Op.getSimpleValueType().SimpleTy) {
7050   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7051   case MVT::i32:
7052     Tmp = DAG.getNode(
7053         Op.getOpcode() == ISD::FP_TO_SINT
7054             ? PPCISD::FCTIWZ
7055             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7056         dl, MVT::f64, Src);
7057     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7058     break;
7059   case MVT::i64:
7060     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7061            "i64 FP_TO_UINT is supported only with FPCVT");
7062     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7063                                                         PPCISD::FCTIDUZ,
7064                       dl, MVT::f64, Src);
7065     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7066     break;
7067   }
7068   return Tmp;
7069 }
7070 
7071 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7072                                           const SDLoc &dl) const {
7073 
7074   // FP to INT conversions are legal for f128.
7075   if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7076     return Op;
7077 
7078   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7079   // PPC (the libcall is not available).
7080   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7081     if (Op.getValueType() == MVT::i32) {
7082       if (Op.getOpcode() == ISD::FP_TO_SINT) {
7083         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7084                                  MVT::f64, Op.getOperand(0),
7085                                  DAG.getIntPtrConstant(0, dl));
7086         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7087                                  MVT::f64, Op.getOperand(0),
7088                                  DAG.getIntPtrConstant(1, dl));
7089 
7090         // Add the two halves of the long double in round-to-zero mode.
7091         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7092 
7093         // Now use a smaller FP_TO_SINT.
7094         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7095       }
7096       if (Op.getOpcode() == ISD::FP_TO_UINT) {
7097         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7098         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7099         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
7100         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7101         // FIXME: generated code sucks.
7102         // TODO: Are there fast-math-flags to propagate to this FSUB?
7103         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
7104                                    Op.getOperand(0), Tmp);
7105         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7106         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
7107                            DAG.getConstant(0x80000000, dl, MVT::i32));
7108         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
7109                                     Op.getOperand(0));
7110         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
7111                                ISD::SETGE);
7112       }
7113     }
7114 
7115     return SDValue();
7116   }
7117 
7118   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7119     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7120 
7121   ReuseLoadInfo RLI;
7122   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7123 
7124   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7125                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7126 }
7127 
7128 // We're trying to insert a regular store, S, and then a load, L. If the
7129 // incoming value, O, is a load, we might just be able to have our load use the
7130 // address used by O. However, we don't know if anything else will store to
7131 // that address before we can load from it. To prevent this situation, we need
7132 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7133 // the same chain operand as O, we create a token factor from the chain results
7134 // of O and L, and we replace all uses of O's chain result with that token
7135 // factor (see spliceIntoChain below for this last part).
7136 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7137                                             ReuseLoadInfo &RLI,
7138                                             SelectionDAG &DAG,
7139                                             ISD::LoadExtType ET) const {
7140   SDLoc dl(Op);
7141   if (ET == ISD::NON_EXTLOAD &&
7142       (Op.getOpcode() == ISD::FP_TO_UINT ||
7143        Op.getOpcode() == ISD::FP_TO_SINT) &&
7144       isOperationLegalOrCustom(Op.getOpcode(),
7145                                Op.getOperand(0).getValueType())) {
7146 
7147     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7148     return true;
7149   }
7150 
7151   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7152   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7153       LD->isNonTemporal())
7154     return false;
7155   if (LD->getMemoryVT() != MemVT)
7156     return false;
7157 
7158   RLI.Ptr = LD->getBasePtr();
7159   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7160     assert(LD->getAddressingMode() == ISD::PRE_INC &&
7161            "Non-pre-inc AM on PPC?");
7162     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7163                           LD->getOffset());
7164   }
7165 
7166   RLI.Chain = LD->getChain();
7167   RLI.MPI = LD->getPointerInfo();
7168   RLI.IsDereferenceable = LD->isDereferenceable();
7169   RLI.IsInvariant = LD->isInvariant();
7170   RLI.Alignment = LD->getAlignment();
7171   RLI.AAInfo = LD->getAAInfo();
7172   RLI.Ranges = LD->getRanges();
7173 
7174   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7175   return true;
7176 }
7177 
7178 // Given the head of the old chain, ResChain, insert a token factor containing
7179 // it and NewResChain, and make users of ResChain now be users of that token
7180 // factor.
7181 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7182 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7183                                         SDValue NewResChain,
7184                                         SelectionDAG &DAG) const {
7185   if (!ResChain)
7186     return;
7187 
7188   SDLoc dl(NewResChain);
7189 
7190   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7191                            NewResChain, DAG.getUNDEF(MVT::Other));
7192   assert(TF.getNode() != NewResChain.getNode() &&
7193          "A new TF really is required here");
7194 
7195   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7196   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7197 }
7198 
7199 /// Analyze profitability of direct move
7200 /// prefer float load to int load plus direct move
7201 /// when there is no integer use of int load
7202 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7203   SDNode *Origin = Op.getOperand(0).getNode();
7204   if (Origin->getOpcode() != ISD::LOAD)
7205     return true;
7206 
7207   // If there is no LXSIBZX/LXSIHZX, like Power8,
7208   // prefer direct move if the memory size is 1 or 2 bytes.
7209   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7210   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7211     return true;
7212 
7213   for (SDNode::use_iterator UI = Origin->use_begin(),
7214                             UE = Origin->use_end();
7215        UI != UE; ++UI) {
7216 
7217     // Only look at the users of the loaded value.
7218     if (UI.getUse().get().getResNo() != 0)
7219       continue;
7220 
7221     if (UI->getOpcode() != ISD::SINT_TO_FP &&
7222         UI->getOpcode() != ISD::UINT_TO_FP)
7223       return true;
7224   }
7225 
7226   return false;
7227 }
7228 
7229 /// Custom lowers integer to floating point conversions to use
7230 /// the direct move instructions available in ISA 2.07 to avoid the
7231 /// need for load/store combinations.
7232 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7233                                                     SelectionDAG &DAG,
7234                                                     const SDLoc &dl) const {
7235   assert((Op.getValueType() == MVT::f32 ||
7236           Op.getValueType() == MVT::f64) &&
7237          "Invalid floating point type as target of conversion");
7238   assert(Subtarget.hasFPCVT() &&
7239          "Int to FP conversions with direct moves require FPCVT");
7240   SDValue FP;
7241   SDValue Src = Op.getOperand(0);
7242   bool SinglePrec = Op.getValueType() == MVT::f32;
7243   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7244   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7245   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7246                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7247 
7248   if (WordInt) {
7249     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7250                      dl, MVT::f64, Src);
7251     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7252   }
7253   else {
7254     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7255     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7256   }
7257 
7258   return FP;
7259 }
7260 
7261 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
7262                                           SelectionDAG &DAG) const {
7263   SDLoc dl(Op);
7264 
7265   // Conversions to f128 are legal.
7266   if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
7267     return Op;
7268 
7269   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
7270     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
7271       return SDValue();
7272 
7273     SDValue Value = Op.getOperand(0);
7274     // The values are now known to be -1 (false) or 1 (true). To convert this
7275     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
7276     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
7277     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
7278 
7279     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
7280 
7281     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7282 
7283     if (Op.getValueType() != MVT::v4f64)
7284       Value = DAG.getNode(ISD::FP_ROUND, dl,
7285                           Op.getValueType(), Value,
7286                           DAG.getIntPtrConstant(1, dl));
7287     return Value;
7288   }
7289 
7290   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
7291   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
7292     return SDValue();
7293 
7294   if (Op.getOperand(0).getValueType() == MVT::i1)
7295     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
7296                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
7297                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
7298 
7299   // If we have direct moves, we can do all the conversion, skip the store/load
7300   // however, without FPCVT we can't do most conversions.
7301   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
7302       Subtarget.isPPC64() && Subtarget.hasFPCVT())
7303     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
7304 
7305   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
7306          "UINT_TO_FP is supported only with FPCVT");
7307 
7308   // If we have FCFIDS, then use it when converting to single-precision.
7309   // Otherwise, convert to double-precision and then round.
7310   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7311                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
7312                                                             : PPCISD::FCFIDS)
7313                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
7314                                                             : PPCISD::FCFID);
7315   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
7316                   ? MVT::f32
7317                   : MVT::f64;
7318 
7319   if (Op.getOperand(0).getValueType() == MVT::i64) {
7320     SDValue SINT = Op.getOperand(0);
7321     // When converting to single-precision, we actually need to convert
7322     // to double-precision first and then round to single-precision.
7323     // To avoid double-rounding effects during that operation, we have
7324     // to prepare the input operand.  Bits that might be truncated when
7325     // converting to double-precision are replaced by a bit that won't
7326     // be lost at this stage, but is below the single-precision rounding
7327     // position.
7328     //
7329     // However, if -enable-unsafe-fp-math is in effect, accept double
7330     // rounding to avoid the extra overhead.
7331     if (Op.getValueType() == MVT::f32 &&
7332         !Subtarget.hasFPCVT() &&
7333         !DAG.getTarget().Options.UnsafeFPMath) {
7334 
7335       // Twiddle input to make sure the low 11 bits are zero.  (If this
7336       // is the case, we are guaranteed the value will fit into the 53 bit
7337       // mantissa of an IEEE double-precision value without rounding.)
7338       // If any of those low 11 bits were not zero originally, make sure
7339       // bit 12 (value 2048) is set instead, so that the final rounding
7340       // to single-precision gets the correct result.
7341       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7342                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
7343       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
7344                           Round, DAG.getConstant(2047, dl, MVT::i64));
7345       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
7346       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
7347                           Round, DAG.getConstant(-2048, dl, MVT::i64));
7348 
7349       // However, we cannot use that value unconditionally: if the magnitude
7350       // of the input value is small, the bit-twiddling we did above might
7351       // end up visibly changing the output.  Fortunately, in that case, we
7352       // don't need to twiddle bits since the original input will convert
7353       // exactly to double-precision floating-point already.  Therefore,
7354       // construct a conditional to use the original value if the top 11
7355       // bits are all sign-bit copies, and use the rounded value computed
7356       // above otherwise.
7357       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
7358                                  SINT, DAG.getConstant(53, dl, MVT::i32));
7359       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
7360                          Cond, DAG.getConstant(1, dl, MVT::i64));
7361       Cond = DAG.getSetCC(dl, MVT::i32,
7362                           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
7363 
7364       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
7365     }
7366 
7367     ReuseLoadInfo RLI;
7368     SDValue Bits;
7369 
7370     MachineFunction &MF = DAG.getMachineFunction();
7371     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
7372       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7373                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7374       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7375     } else if (Subtarget.hasLFIWAX() &&
7376                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
7377       MachineMemOperand *MMO =
7378         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7379                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7380       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7381       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
7382                                      DAG.getVTList(MVT::f64, MVT::Other),
7383                                      Ops, MVT::i32, MMO);
7384       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7385     } else if (Subtarget.hasFPCVT() &&
7386                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
7387       MachineMemOperand *MMO =
7388         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7389                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7390       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7391       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
7392                                      DAG.getVTList(MVT::f64, MVT::Other),
7393                                      Ops, MVT::i32, MMO);
7394       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
7395     } else if (((Subtarget.hasLFIWAX() &&
7396                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
7397                 (Subtarget.hasFPCVT() &&
7398                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
7399                SINT.getOperand(0).getValueType() == MVT::i32) {
7400       MachineFrameInfo &MFI = MF.getFrameInfo();
7401       EVT PtrVT = getPointerTy(DAG.getDataLayout());
7402 
7403       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7404       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7405 
7406       SDValue Store =
7407           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
7408                        MachinePointerInfo::getFixedStack(
7409                            DAG.getMachineFunction(), FrameIdx));
7410 
7411       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7412              "Expected an i32 store");
7413 
7414       RLI.Ptr = FIdx;
7415       RLI.Chain = Store;
7416       RLI.MPI =
7417           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7418       RLI.Alignment = 4;
7419 
7420       MachineMemOperand *MMO =
7421         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7422                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7423       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7424       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
7425                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
7426                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
7427                                      Ops, MVT::i32, MMO);
7428     } else
7429       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
7430 
7431     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
7432 
7433     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7434       FP = DAG.getNode(ISD::FP_ROUND, dl,
7435                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
7436     return FP;
7437   }
7438 
7439   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
7440          "Unhandled INT_TO_FP type in custom expander!");
7441   // Since we only generate this in 64-bit mode, we can take advantage of
7442   // 64-bit registers.  In particular, sign extend the input value into the
7443   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
7444   // then lfd it and fcfid it.
7445   MachineFunction &MF = DAG.getMachineFunction();
7446   MachineFrameInfo &MFI = MF.getFrameInfo();
7447   EVT PtrVT = getPointerTy(MF.getDataLayout());
7448 
7449   SDValue Ld;
7450   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
7451     ReuseLoadInfo RLI;
7452     bool ReusingLoad;
7453     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
7454                                             DAG))) {
7455       int FrameIdx = MFI.CreateStackObject(4, 4, false);
7456       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7457 
7458       SDValue Store =
7459           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
7460                        MachinePointerInfo::getFixedStack(
7461                            DAG.getMachineFunction(), FrameIdx));
7462 
7463       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
7464              "Expected an i32 store");
7465 
7466       RLI.Ptr = FIdx;
7467       RLI.Chain = Store;
7468       RLI.MPI =
7469           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7470       RLI.Alignment = 4;
7471     }
7472 
7473     MachineMemOperand *MMO =
7474       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
7475                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
7476     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
7477     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
7478                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
7479                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
7480                                  Ops, MVT::i32, MMO);
7481     if (ReusingLoad)
7482       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
7483   } else {
7484     assert(Subtarget.isPPC64() &&
7485            "i32->FP without LFIWAX supported only on PPC64");
7486 
7487     int FrameIdx = MFI.CreateStackObject(8, 8, false);
7488     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7489 
7490     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
7491                                 Op.getOperand(0));
7492 
7493     // STD the extended value into the stack slot.
7494     SDValue Store = DAG.getStore(
7495         DAG.getEntryNode(), dl, Ext64, FIdx,
7496         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7497 
7498     // Load the value as a double.
7499     Ld = DAG.getLoad(
7500         MVT::f64, dl, Store, FIdx,
7501         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
7502   }
7503 
7504   // FCFID it and return it.
7505   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
7506   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
7507     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
7508                      DAG.getIntPtrConstant(0, dl));
7509   return FP;
7510 }
7511 
7512 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
7513                                             SelectionDAG &DAG) const {
7514   SDLoc dl(Op);
7515   /*
7516    The rounding mode is in bits 30:31 of FPSR, and has the following
7517    settings:
7518      00 Round to nearest
7519      01 Round to 0
7520      10 Round to +inf
7521      11 Round to -inf
7522 
7523   FLT_ROUNDS, on the other hand, expects the following:
7524     -1 Undefined
7525      0 Round to 0
7526      1 Round to nearest
7527      2 Round to +inf
7528      3 Round to -inf
7529 
7530   To perform the conversion, we do:
7531     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
7532   */
7533 
7534   MachineFunction &MF = DAG.getMachineFunction();
7535   EVT VT = Op.getValueType();
7536   EVT PtrVT = getPointerTy(MF.getDataLayout());
7537 
7538   // Save FP Control Word to register
7539   EVT NodeTys[] = {
7540     MVT::f64,    // return register
7541     MVT::Glue    // unused in this context
7542   };
7543   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
7544 
7545   // Save FP register to stack slot
7546   int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
7547   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
7548   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
7549                                MachinePointerInfo());
7550 
7551   // Load FP Control Word from low 32 bits of stack slot.
7552   SDValue Four = DAG.getConstant(4, dl, PtrVT);
7553   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
7554   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
7555 
7556   // Transform as necessary
7557   SDValue CWD1 =
7558     DAG.getNode(ISD::AND, dl, MVT::i32,
7559                 CWD, DAG.getConstant(3, dl, MVT::i32));
7560   SDValue CWD2 =
7561     DAG.getNode(ISD::SRL, dl, MVT::i32,
7562                 DAG.getNode(ISD::AND, dl, MVT::i32,
7563                             DAG.getNode(ISD::XOR, dl, MVT::i32,
7564                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
7565                             DAG.getConstant(3, dl, MVT::i32)),
7566                 DAG.getConstant(1, dl, MVT::i32));
7567 
7568   SDValue RetVal =
7569     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
7570 
7571   return DAG.getNode((VT.getSizeInBits() < 16 ?
7572                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
7573 }
7574 
7575 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7576   EVT VT = Op.getValueType();
7577   unsigned BitWidth = VT.getSizeInBits();
7578   SDLoc dl(Op);
7579   assert(Op.getNumOperands() == 3 &&
7580          VT == Op.getOperand(1).getValueType() &&
7581          "Unexpected SHL!");
7582 
7583   // Expand into a bunch of logical ops.  Note that these ops
7584   // depend on the PPC behavior for oversized shift amounts.
7585   SDValue Lo = Op.getOperand(0);
7586   SDValue Hi = Op.getOperand(1);
7587   SDValue Amt = Op.getOperand(2);
7588   EVT AmtVT = Amt.getValueType();
7589 
7590   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7591                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7592   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
7593   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
7594   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
7595   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7596                              DAG.getConstant(-BitWidth, dl, AmtVT));
7597   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
7598   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7599   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
7600   SDValue OutOps[] = { OutLo, OutHi };
7601   return DAG.getMergeValues(OutOps, dl);
7602 }
7603 
7604 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
7605   EVT VT = Op.getValueType();
7606   SDLoc dl(Op);
7607   unsigned BitWidth = VT.getSizeInBits();
7608   assert(Op.getNumOperands() == 3 &&
7609          VT == Op.getOperand(1).getValueType() &&
7610          "Unexpected SRL!");
7611 
7612   // Expand into a bunch of logical ops.  Note that these ops
7613   // depend on the PPC behavior for oversized shift amounts.
7614   SDValue Lo = Op.getOperand(0);
7615   SDValue Hi = Op.getOperand(1);
7616   SDValue Amt = Op.getOperand(2);
7617   EVT AmtVT = Amt.getValueType();
7618 
7619   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7620                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7621   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7622   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7623   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7624   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7625                              DAG.getConstant(-BitWidth, dl, AmtVT));
7626   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
7627   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
7628   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
7629   SDValue OutOps[] = { OutLo, OutHi };
7630   return DAG.getMergeValues(OutOps, dl);
7631 }
7632 
7633 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
7634   SDLoc dl(Op);
7635   EVT VT = Op.getValueType();
7636   unsigned BitWidth = VT.getSizeInBits();
7637   assert(Op.getNumOperands() == 3 &&
7638          VT == Op.getOperand(1).getValueType() &&
7639          "Unexpected SRA!");
7640 
7641   // Expand into a bunch of logical ops, followed by a select_cc.
7642   SDValue Lo = Op.getOperand(0);
7643   SDValue Hi = Op.getOperand(1);
7644   SDValue Amt = Op.getOperand(2);
7645   EVT AmtVT = Amt.getValueType();
7646 
7647   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
7648                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
7649   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
7650   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
7651   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
7652   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
7653                              DAG.getConstant(-BitWidth, dl, AmtVT));
7654   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
7655   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
7656   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
7657                                   Tmp4, Tmp6, ISD::SETLE);
7658   SDValue OutOps[] = { OutLo, OutHi };
7659   return DAG.getMergeValues(OutOps, dl);
7660 }
7661 
7662 //===----------------------------------------------------------------------===//
7663 // Vector related lowering.
7664 //
7665 
7666 /// BuildSplatI - Build a canonical splati of Val with an element size of
7667 /// SplatSize.  Cast the result to VT.
7668 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
7669                            SelectionDAG &DAG, const SDLoc &dl) {
7670   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
7671 
7672   static const MVT VTys[] = { // canonical VT to use for each size.
7673     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
7674   };
7675 
7676   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
7677 
7678   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
7679   if (Val == -1)
7680     SplatSize = 1;
7681 
7682   EVT CanonicalVT = VTys[SplatSize-1];
7683 
7684   // Build a canonical splat for this value.
7685   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
7686 }
7687 
7688 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
7689 /// specified intrinsic ID.
7690 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
7691                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
7692   if (DestVT == MVT::Other) DestVT = Op.getValueType();
7693   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7694                      DAG.getConstant(IID, dl, MVT::i32), Op);
7695 }
7696 
7697 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
7698 /// specified intrinsic ID.
7699 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
7700                                 SelectionDAG &DAG, const SDLoc &dl,
7701                                 EVT DestVT = MVT::Other) {
7702   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
7703   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7704                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
7705 }
7706 
7707 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
7708 /// specified intrinsic ID.
7709 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
7710                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
7711                                 EVT DestVT = MVT::Other) {
7712   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
7713   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
7714                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
7715 }
7716 
7717 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
7718 /// amount.  The result has the specified value type.
7719 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
7720                            SelectionDAG &DAG, const SDLoc &dl) {
7721   // Force LHS/RHS to be the right type.
7722   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
7723   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
7724 
7725   int Ops[16];
7726   for (unsigned i = 0; i != 16; ++i)
7727     Ops[i] = i + Amt;
7728   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
7729   return DAG.getNode(ISD::BITCAST, dl, VT, T);
7730 }
7731 
7732 /// Do we have an efficient pattern in a .td file for this node?
7733 ///
7734 /// \param V - pointer to the BuildVectorSDNode being matched
7735 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
7736 ///
7737 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
7738 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
7739 /// the opposite is true (expansion is beneficial) are:
7740 /// - The node builds a vector out of integers that are not 32 or 64-bits
7741 /// - The node builds a vector out of constants
7742 /// - The node is a "load-and-splat"
7743 /// In all other cases, we will choose to keep the BUILD_VECTOR.
7744 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
7745                                             bool HasDirectMove,
7746                                             bool HasP8Vector) {
7747   EVT VecVT = V->getValueType(0);
7748   bool RightType = VecVT == MVT::v2f64 ||
7749     (HasP8Vector && VecVT == MVT::v4f32) ||
7750     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
7751   if (!RightType)
7752     return false;
7753 
7754   bool IsSplat = true;
7755   bool IsLoad = false;
7756   SDValue Op0 = V->getOperand(0);
7757 
7758   // This function is called in a block that confirms the node is not a constant
7759   // splat. So a constant BUILD_VECTOR here means the vector is built out of
7760   // different constants.
7761   if (V->isConstant())
7762     return false;
7763   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
7764     if (V->getOperand(i).isUndef())
7765       return false;
7766     // We want to expand nodes that represent load-and-splat even if the
7767     // loaded value is a floating point truncation or conversion to int.
7768     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
7769         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
7770          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7771         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
7772          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
7773         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
7774          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
7775       IsLoad = true;
7776     // If the operands are different or the input is not a load and has more
7777     // uses than just this BV node, then it isn't a splat.
7778     if (V->getOperand(i) != Op0 ||
7779         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
7780       IsSplat = false;
7781   }
7782   return !(IsSplat && IsLoad);
7783 }
7784 
7785 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
7786 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
7787 
7788   SDLoc dl(Op);
7789   SDValue Op0 = Op->getOperand(0);
7790 
7791   if (!EnableQuadPrecision ||
7792       (Op.getValueType() != MVT::f128 ) ||
7793       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
7794       (Op0.getOperand(0).getValueType() !=  MVT::i64) ||
7795       (Op0.getOperand(1).getValueType() != MVT::i64))
7796     return SDValue();
7797 
7798   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
7799                      Op0.getOperand(1));
7800 }
7801 
7802 // If this is a case we can't handle, return null and let the default
7803 // expansion code take care of it.  If we CAN select this case, and if it
7804 // selects to a single instruction, return Op.  Otherwise, if we can codegen
7805 // this case more efficiently than a constant pool load, lower it to the
7806 // sequence of ops that should be used.
7807 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
7808                                              SelectionDAG &DAG) const {
7809   SDLoc dl(Op);
7810   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7811   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7812 
7813   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
7814     // We first build an i32 vector, load it into a QPX register,
7815     // then convert it to a floating-point vector and compare it
7816     // to a zero vector to get the boolean result.
7817     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7818     int FrameIdx = MFI.CreateStackObject(16, 16, false);
7819     MachinePointerInfo PtrInfo =
7820         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
7821     EVT PtrVT = getPointerTy(DAG.getDataLayout());
7822     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
7823 
7824     assert(BVN->getNumOperands() == 4 &&
7825       "BUILD_VECTOR for v4i1 does not have 4 operands");
7826 
7827     bool IsConst = true;
7828     for (unsigned i = 0; i < 4; ++i) {
7829       if (BVN->getOperand(i).isUndef()) continue;
7830       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
7831         IsConst = false;
7832         break;
7833       }
7834     }
7835 
7836     if (IsConst) {
7837       Constant *One =
7838         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
7839       Constant *NegOne =
7840         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
7841 
7842       Constant *CV[4];
7843       for (unsigned i = 0; i < 4; ++i) {
7844         if (BVN->getOperand(i).isUndef())
7845           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
7846         else if (isNullConstant(BVN->getOperand(i)))
7847           CV[i] = NegOne;
7848         else
7849           CV[i] = One;
7850       }
7851 
7852       Constant *CP = ConstantVector::get(CV);
7853       SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
7854                                           16 /* alignment */);
7855 
7856       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
7857       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
7858       return DAG.getMemIntrinsicNode(
7859           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
7860           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
7861     }
7862 
7863     SmallVector<SDValue, 4> Stores;
7864     for (unsigned i = 0; i < 4; ++i) {
7865       if (BVN->getOperand(i).isUndef()) continue;
7866 
7867       unsigned Offset = 4*i;
7868       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
7869       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
7870 
7871       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
7872       if (StoreSize > 4) {
7873         Stores.push_back(
7874             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
7875                               PtrInfo.getWithOffset(Offset), MVT::i32));
7876       } else {
7877         SDValue StoreValue = BVN->getOperand(i);
7878         if (StoreSize < 4)
7879           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
7880 
7881         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
7882                                       PtrInfo.getWithOffset(Offset)));
7883       }
7884     }
7885 
7886     SDValue StoreChain;
7887     if (!Stores.empty())
7888       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
7889     else
7890       StoreChain = DAG.getEntryNode();
7891 
7892     // Now load from v4i32 into the QPX register; this will extend it to
7893     // v4i64 but not yet convert it to a floating point. Nevertheless, this
7894     // is typed as v4f64 because the QPX register integer states are not
7895     // explicitly represented.
7896 
7897     SDValue Ops[] = {StoreChain,
7898                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
7899                      FIdx};
7900     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
7901 
7902     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
7903       dl, VTs, Ops, MVT::v4i32, PtrInfo);
7904     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
7905       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
7906       LoadedVect);
7907 
7908     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
7909 
7910     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
7911   }
7912 
7913   // All other QPX vectors are handled by generic code.
7914   if (Subtarget.hasQPX())
7915     return SDValue();
7916 
7917   // Check if this is a splat of a constant value.
7918   APInt APSplatBits, APSplatUndef;
7919   unsigned SplatBitSize;
7920   bool HasAnyUndefs;
7921   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
7922                              HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
7923       SplatBitSize > 32) {
7924     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
7925     // lowered to VSX instructions under certain conditions.
7926     // Without VSX, there is no pattern more efficient than expanding the node.
7927     if (Subtarget.hasVSX() &&
7928         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
7929                                         Subtarget.hasP8Vector()))
7930       return Op;
7931     return SDValue();
7932   }
7933 
7934   unsigned SplatBits = APSplatBits.getZExtValue();
7935   unsigned SplatUndef = APSplatUndef.getZExtValue();
7936   unsigned SplatSize = SplatBitSize / 8;
7937 
7938   // First, handle single instruction cases.
7939 
7940   // All zeros?
7941   if (SplatBits == 0) {
7942     // Canonicalize all zero vectors to be v4i32.
7943     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
7944       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
7945       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
7946     }
7947     return Op;
7948   }
7949 
7950   // We have XXSPLTIB for constant splats one byte wide
7951   if (Subtarget.hasP9Vector() && SplatSize == 1) {
7952     // This is a splat of 1-byte elements with some elements potentially undef.
7953     // Rather than trying to match undef in the SDAG patterns, ensure that all
7954     // elements are the same constant.
7955     if (HasAnyUndefs || ISD::isBuildVectorAllOnes(BVN)) {
7956       SmallVector<SDValue, 16> Ops(16, DAG.getConstant(SplatBits,
7957                                                        dl, MVT::i32));
7958       SDValue NewBV = DAG.getBuildVector(MVT::v16i8, dl, Ops);
7959       if (Op.getValueType() != MVT::v16i8)
7960         return DAG.getBitcast(Op.getValueType(), NewBV);
7961       return NewBV;
7962     }
7963 
7964     // BuildVectorSDNode::isConstantSplat() is actually pretty smart. It'll
7965     // detect that constant splats like v8i16: 0xABAB are really just splats
7966     // of a 1-byte constant. In this case, we need to convert the node to a
7967     // splat of v16i8 and a bitcast.
7968     if (Op.getValueType() != MVT::v16i8)
7969       return DAG.getBitcast(Op.getValueType(),
7970                             DAG.getConstant(SplatBits, dl, MVT::v16i8));
7971 
7972     return Op;
7973   }
7974 
7975   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
7976   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
7977                     (32-SplatBitSize));
7978   if (SextVal >= -16 && SextVal <= 15)
7979     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
7980 
7981   // Two instruction sequences.
7982 
7983   // If this value is in the range [-32,30] and is even, use:
7984   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
7985   // If this value is in the range [17,31] and is odd, use:
7986   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
7987   // If this value is in the range [-31,-17] and is odd, use:
7988   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
7989   // Note the last two are three-instruction sequences.
7990   if (SextVal >= -32 && SextVal <= 31) {
7991     // To avoid having these optimizations undone by constant folding,
7992     // we convert to a pseudo that will be expanded later into one of
7993     // the above forms.
7994     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
7995     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
7996               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
7997     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
7998     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
7999     if (VT == Op.getValueType())
8000       return RetVal;
8001     else
8002       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8003   }
8004 
8005   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
8006   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
8007   // for fneg/fabs.
8008   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8009     // Make -1 and vspltisw -1:
8010     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
8011 
8012     // Make the VSLW intrinsic, computing 0x8000_0000.
8013     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8014                                    OnesV, DAG, dl);
8015 
8016     // xor by OnesV to invert it.
8017     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8018     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8019   }
8020 
8021   // Check to see if this is a wide variety of vsplti*, binop self cases.
8022   static const signed char SplatCsts[] = {
8023     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8024     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8025   };
8026 
8027   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8028     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8029     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
8030     int i = SplatCsts[idx];
8031 
8032     // Figure out what shift amount will be used by altivec if shifted by i in
8033     // this splat size.
8034     unsigned TypeShiftAmt = i & (SplatBitSize-1);
8035 
8036     // vsplti + shl self.
8037     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8038       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8039       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8040         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8041         Intrinsic::ppc_altivec_vslw
8042       };
8043       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8044       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8045     }
8046 
8047     // vsplti + srl self.
8048     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8049       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8050       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8051         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8052         Intrinsic::ppc_altivec_vsrw
8053       };
8054       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8055       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8056     }
8057 
8058     // vsplti + sra self.
8059     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8060       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8061       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8062         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
8063         Intrinsic::ppc_altivec_vsraw
8064       };
8065       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8066       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8067     }
8068 
8069     // vsplti + rol self.
8070     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8071                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8072       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8073       static const unsigned IIDs[] = { // Intrinsic to use for each size.
8074         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8075         Intrinsic::ppc_altivec_vrlw
8076       };
8077       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8078       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8079     }
8080 
8081     // t = vsplti c, result = vsldoi t, t, 1
8082     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8083       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8084       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8085       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8086     }
8087     // t = vsplti c, result = vsldoi t, t, 2
8088     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8089       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8090       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8091       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8092     }
8093     // t = vsplti c, result = vsldoi t, t, 3
8094     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8095       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8096       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8097       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8098     }
8099   }
8100 
8101   return SDValue();
8102 }
8103 
8104 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8105 /// the specified operations to build the shuffle.
8106 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8107                                       SDValue RHS, SelectionDAG &DAG,
8108                                       const SDLoc &dl) {
8109   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8110   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8111   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8112 
8113   enum {
8114     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8115     OP_VMRGHW,
8116     OP_VMRGLW,
8117     OP_VSPLTISW0,
8118     OP_VSPLTISW1,
8119     OP_VSPLTISW2,
8120     OP_VSPLTISW3,
8121     OP_VSLDOI4,
8122     OP_VSLDOI8,
8123     OP_VSLDOI12
8124   };
8125 
8126   if (OpNum == OP_COPY) {
8127     if (LHSID == (1*9+2)*9+3) return LHS;
8128     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8129     return RHS;
8130   }
8131 
8132   SDValue OpLHS, OpRHS;
8133   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8134   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8135 
8136   int ShufIdxs[16];
8137   switch (OpNum) {
8138   default: llvm_unreachable("Unknown i32 permute!");
8139   case OP_VMRGHW:
8140     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
8141     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8142     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
8143     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8144     break;
8145   case OP_VMRGLW:
8146     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8147     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8148     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8149     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8150     break;
8151   case OP_VSPLTISW0:
8152     for (unsigned i = 0; i != 16; ++i)
8153       ShufIdxs[i] = (i&3)+0;
8154     break;
8155   case OP_VSPLTISW1:
8156     for (unsigned i = 0; i != 16; ++i)
8157       ShufIdxs[i] = (i&3)+4;
8158     break;
8159   case OP_VSPLTISW2:
8160     for (unsigned i = 0; i != 16; ++i)
8161       ShufIdxs[i] = (i&3)+8;
8162     break;
8163   case OP_VSPLTISW3:
8164     for (unsigned i = 0; i != 16; ++i)
8165       ShufIdxs[i] = (i&3)+12;
8166     break;
8167   case OP_VSLDOI4:
8168     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8169   case OP_VSLDOI8:
8170     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8171   case OP_VSLDOI12:
8172     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8173   }
8174   EVT VT = OpLHS.getValueType();
8175   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8176   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8177   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8178   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8179 }
8180 
8181 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8182 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8183 /// SDValue.
8184 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8185                                            SelectionDAG &DAG) const {
8186   const unsigned BytesInVector = 16;
8187   bool IsLE = Subtarget.isLittleEndian();
8188   SDLoc dl(N);
8189   SDValue V1 = N->getOperand(0);
8190   SDValue V2 = N->getOperand(1);
8191   unsigned ShiftElts = 0, InsertAtByte = 0;
8192   bool Swap = false;
8193 
8194   // Shifts required to get the byte we want at element 7.
8195   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
8196                                    0, 15, 14, 13, 12, 11, 10, 9};
8197   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
8198                                 1, 2,  3,  4,  5,  6,  7,  8};
8199 
8200   ArrayRef<int> Mask = N->getMask();
8201   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
8202 
8203   // For each mask element, find out if we're just inserting something
8204   // from V2 into V1 or vice versa.
8205   // Possible permutations inserting an element from V2 into V1:
8206   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8207   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
8208   //   ...
8209   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
8210   // Inserting from V1 into V2 will be similar, except mask range will be
8211   // [16,31].
8212 
8213   bool FoundCandidate = false;
8214   // If both vector operands for the shuffle are the same vector, the mask
8215   // will contain only elements from the first one and the second one will be
8216   // undef.
8217   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
8218   // Go through the mask of half-words to find an element that's being moved
8219   // from one vector to the other.
8220   for (unsigned i = 0; i < BytesInVector; ++i) {
8221     unsigned CurrentElement = Mask[i];
8222     // If 2nd operand is undefined, we should only look for element 7 in the
8223     // Mask.
8224     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
8225       continue;
8226 
8227     bool OtherElementsInOrder = true;
8228     // Examine the other elements in the Mask to see if they're in original
8229     // order.
8230     for (unsigned j = 0; j < BytesInVector; ++j) {
8231       if (j == i)
8232         continue;
8233       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
8234       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
8235       // in which we always assume we're always picking from the 1st operand.
8236       int MaskOffset =
8237           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
8238       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
8239         OtherElementsInOrder = false;
8240         break;
8241       }
8242     }
8243     // If other elements are in original order, we record the number of shifts
8244     // we need to get the element we want into element 7. Also record which byte
8245     // in the vector we should insert into.
8246     if (OtherElementsInOrder) {
8247       // If 2nd operand is undefined, we assume no shifts and no swapping.
8248       if (V2.isUndef()) {
8249         ShiftElts = 0;
8250         Swap = false;
8251       } else {
8252         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
8253         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
8254                          : BigEndianShifts[CurrentElement & 0xF];
8255         Swap = CurrentElement < BytesInVector;
8256       }
8257       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
8258       FoundCandidate = true;
8259       break;
8260     }
8261   }
8262 
8263   if (!FoundCandidate)
8264     return SDValue();
8265 
8266   // Candidate found, construct the proper SDAG sequence with VINSERTB,
8267   // optionally with VECSHL if shift is required.
8268   if (Swap)
8269     std::swap(V1, V2);
8270   if (V2.isUndef())
8271     V2 = V1;
8272   if (ShiftElts) {
8273     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8274                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8275     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
8276                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
8277   }
8278   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
8279                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
8280 }
8281 
8282 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
8283 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
8284 /// SDValue.
8285 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
8286                                            SelectionDAG &DAG) const {
8287   const unsigned NumHalfWords = 8;
8288   const unsigned BytesInVector = NumHalfWords * 2;
8289   // Check that the shuffle is on half-words.
8290   if (!isNByteElemShuffleMask(N, 2, 1))
8291     return SDValue();
8292 
8293   bool IsLE = Subtarget.isLittleEndian();
8294   SDLoc dl(N);
8295   SDValue V1 = N->getOperand(0);
8296   SDValue V2 = N->getOperand(1);
8297   unsigned ShiftElts = 0, InsertAtByte = 0;
8298   bool Swap = false;
8299 
8300   // Shifts required to get the half-word we want at element 3.
8301   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
8302   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
8303 
8304   uint32_t Mask = 0;
8305   uint32_t OriginalOrderLow = 0x1234567;
8306   uint32_t OriginalOrderHigh = 0x89ABCDEF;
8307   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
8308   // 32-bit space, only need 4-bit nibbles per element.
8309   for (unsigned i = 0; i < NumHalfWords; ++i) {
8310     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8311     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
8312   }
8313 
8314   // For each mask element, find out if we're just inserting something
8315   // from V2 into V1 or vice versa.  Possible permutations inserting an element
8316   // from V2 into V1:
8317   //   X, 1, 2, 3, 4, 5, 6, 7
8318   //   0, X, 2, 3, 4, 5, 6, 7
8319   //   0, 1, X, 3, 4, 5, 6, 7
8320   //   0, 1, 2, X, 4, 5, 6, 7
8321   //   0, 1, 2, 3, X, 5, 6, 7
8322   //   0, 1, 2, 3, 4, X, 6, 7
8323   //   0, 1, 2, 3, 4, 5, X, 7
8324   //   0, 1, 2, 3, 4, 5, 6, X
8325   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
8326 
8327   bool FoundCandidate = false;
8328   // Go through the mask of half-words to find an element that's being moved
8329   // from one vector to the other.
8330   for (unsigned i = 0; i < NumHalfWords; ++i) {
8331     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
8332     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
8333     uint32_t MaskOtherElts = ~(0xF << MaskShift);
8334     uint32_t TargetOrder = 0x0;
8335 
8336     // If both vector operands for the shuffle are the same vector, the mask
8337     // will contain only elements from the first one and the second one will be
8338     // undef.
8339     if (V2.isUndef()) {
8340       ShiftElts = 0;
8341       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
8342       TargetOrder = OriginalOrderLow;
8343       Swap = false;
8344       // Skip if not the correct element or mask of other elements don't equal
8345       // to our expected order.
8346       if (MaskOneElt == VINSERTHSrcElem &&
8347           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8348         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8349         FoundCandidate = true;
8350         break;
8351       }
8352     } else { // If both operands are defined.
8353       // Target order is [8,15] if the current mask is between [0,7].
8354       TargetOrder =
8355           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
8356       // Skip if mask of other elements don't equal our expected order.
8357       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
8358         // We only need the last 3 bits for the number of shifts.
8359         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
8360                          : BigEndianShifts[MaskOneElt & 0x7];
8361         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
8362         Swap = MaskOneElt < NumHalfWords;
8363         FoundCandidate = true;
8364         break;
8365       }
8366     }
8367   }
8368 
8369   if (!FoundCandidate)
8370     return SDValue();
8371 
8372   // Candidate found, construct the proper SDAG sequence with VINSERTH,
8373   // optionally with VECSHL if shift is required.
8374   if (Swap)
8375     std::swap(V1, V2);
8376   if (V2.isUndef())
8377     V2 = V1;
8378   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8379   if (ShiftElts) {
8380     // Double ShiftElts because we're left shifting on v16i8 type.
8381     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
8382                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
8383     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
8384     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8385                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8386     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8387   }
8388   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
8389   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
8390                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
8391   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8392 }
8393 
8394 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
8395 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
8396 /// return the code it can be lowered into.  Worst case, it can always be
8397 /// lowered into a vperm.
8398 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
8399                                                SelectionDAG &DAG) const {
8400   SDLoc dl(Op);
8401   SDValue V1 = Op.getOperand(0);
8402   SDValue V2 = Op.getOperand(1);
8403   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8404   EVT VT = Op.getValueType();
8405   bool isLittleEndian = Subtarget.isLittleEndian();
8406 
8407   unsigned ShiftElts, InsertAtByte;
8408   bool Swap = false;
8409   if (Subtarget.hasP9Vector() &&
8410       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
8411                            isLittleEndian)) {
8412     if (Swap)
8413       std::swap(V1, V2);
8414     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8415     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
8416     if (ShiftElts) {
8417       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
8418                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
8419       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
8420                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
8421       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8422     }
8423     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
8424                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
8425     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
8426   }
8427 
8428   if (Subtarget.hasP9Altivec()) {
8429     SDValue NewISDNode;
8430     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
8431       return NewISDNode;
8432 
8433     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
8434       return NewISDNode;
8435   }
8436 
8437   if (Subtarget.hasVSX() &&
8438       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8439     if (Swap)
8440       std::swap(V1, V2);
8441     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8442     SDValue Conv2 =
8443         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
8444 
8445     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
8446                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8447     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
8448   }
8449 
8450   if (Subtarget.hasVSX() &&
8451     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
8452     if (Swap)
8453       std::swap(V1, V2);
8454     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8455     SDValue Conv2 =
8456         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
8457 
8458     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
8459                               DAG.getConstant(ShiftElts, dl, MVT::i32));
8460     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
8461   }
8462 
8463   if (Subtarget.hasP9Vector()) {
8464      if (PPC::isXXBRHShuffleMask(SVOp)) {
8465       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
8466       SDValue ReveHWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v8i16, Conv);
8467       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
8468     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
8469       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8470       SDValue ReveWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v4i32, Conv);
8471       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
8472     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
8473       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
8474       SDValue ReveDWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Conv);
8475       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
8476     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
8477       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
8478       SDValue ReveQWord = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v1i128, Conv);
8479       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
8480     }
8481   }
8482 
8483   if (Subtarget.hasVSX()) {
8484     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
8485       int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
8486 
8487       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
8488       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
8489                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
8490       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
8491     }
8492 
8493     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
8494     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
8495       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
8496       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
8497       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
8498     }
8499   }
8500 
8501   if (Subtarget.hasQPX()) {
8502     if (VT.getVectorNumElements() != 4)
8503       return SDValue();
8504 
8505     if (V2.isUndef()) V2 = V1;
8506 
8507     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
8508     if (AlignIdx != -1) {
8509       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
8510                          DAG.getConstant(AlignIdx, dl, MVT::i32));
8511     } else if (SVOp->isSplat()) {
8512       int SplatIdx = SVOp->getSplatIndex();
8513       if (SplatIdx >= 4) {
8514         std::swap(V1, V2);
8515         SplatIdx -= 4;
8516       }
8517 
8518       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
8519                          DAG.getConstant(SplatIdx, dl, MVT::i32));
8520     }
8521 
8522     // Lower this into a qvgpci/qvfperm pair.
8523 
8524     // Compute the qvgpci literal
8525     unsigned idx = 0;
8526     for (unsigned i = 0; i < 4; ++i) {
8527       int m = SVOp->getMaskElt(i);
8528       unsigned mm = m >= 0 ? (unsigned) m : i;
8529       idx |= mm << (3-i)*3;
8530     }
8531 
8532     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
8533                              DAG.getConstant(idx, dl, MVT::i32));
8534     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
8535   }
8536 
8537   // Cases that are handled by instructions that take permute immediates
8538   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
8539   // selected by the instruction selector.
8540   if (V2.isUndef()) {
8541     if (PPC::isSplatShuffleMask(SVOp, 1) ||
8542         PPC::isSplatShuffleMask(SVOp, 2) ||
8543         PPC::isSplatShuffleMask(SVOp, 4) ||
8544         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
8545         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
8546         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
8547         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
8548         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
8549         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
8550         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
8551         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
8552         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
8553         (Subtarget.hasP8Altivec() && (
8554          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
8555          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
8556          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
8557       return Op;
8558     }
8559   }
8560 
8561   // Altivec has a variety of "shuffle immediates" that take two vector inputs
8562   // and produce a fixed permutation.  If any of these match, do not lower to
8563   // VPERM.
8564   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
8565   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8566       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8567       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
8568       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8569       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8570       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8571       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
8572       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
8573       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
8574       (Subtarget.hasP8Altivec() && (
8575        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
8576        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
8577        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
8578     return Op;
8579 
8580   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
8581   // perfect shuffle table to emit an optimal matching sequence.
8582   ArrayRef<int> PermMask = SVOp->getMask();
8583 
8584   unsigned PFIndexes[4];
8585   bool isFourElementShuffle = true;
8586   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
8587     unsigned EltNo = 8;   // Start out undef.
8588     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
8589       if (PermMask[i*4+j] < 0)
8590         continue;   // Undef, ignore it.
8591 
8592       unsigned ByteSource = PermMask[i*4+j];
8593       if ((ByteSource & 3) != j) {
8594         isFourElementShuffle = false;
8595         break;
8596       }
8597 
8598       if (EltNo == 8) {
8599         EltNo = ByteSource/4;
8600       } else if (EltNo != ByteSource/4) {
8601         isFourElementShuffle = false;
8602         break;
8603       }
8604     }
8605     PFIndexes[i] = EltNo;
8606   }
8607 
8608   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
8609   // perfect shuffle vector to determine if it is cost effective to do this as
8610   // discrete instructions, or whether we should use a vperm.
8611   // For now, we skip this for little endian until such time as we have a
8612   // little-endian perfect shuffle table.
8613   if (isFourElementShuffle && !isLittleEndian) {
8614     // Compute the index in the perfect shuffle table.
8615     unsigned PFTableIndex =
8616       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8617 
8618     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8619     unsigned Cost  = (PFEntry >> 30);
8620 
8621     // Determining when to avoid vperm is tricky.  Many things affect the cost
8622     // of vperm, particularly how many times the perm mask needs to be computed.
8623     // For example, if the perm mask can be hoisted out of a loop or is already
8624     // used (perhaps because there are multiple permutes with the same shuffle
8625     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
8626     // the loop requires an extra register.
8627     //
8628     // As a compromise, we only emit discrete instructions if the shuffle can be
8629     // generated in 3 or fewer operations.  When we have loop information
8630     // available, if this block is within a loop, we should avoid using vperm
8631     // for 3-operation perms and use a constant pool load instead.
8632     if (Cost < 3)
8633       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8634   }
8635 
8636   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
8637   // vector that will get spilled to the constant pool.
8638   if (V2.isUndef()) V2 = V1;
8639 
8640   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
8641   // that it is in input element units, not in bytes.  Convert now.
8642 
8643   // For little endian, the order of the input vectors is reversed, and
8644   // the permutation mask is complemented with respect to 31.  This is
8645   // necessary to produce proper semantics with the big-endian-biased vperm
8646   // instruction.
8647   EVT EltVT = V1.getValueType().getVectorElementType();
8648   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
8649 
8650   SmallVector<SDValue, 16> ResultMask;
8651   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8652     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
8653 
8654     for (unsigned j = 0; j != BytesPerElement; ++j)
8655       if (isLittleEndian)
8656         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
8657                                              dl, MVT::i32));
8658       else
8659         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
8660                                              MVT::i32));
8661   }
8662 
8663   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
8664   if (isLittleEndian)
8665     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8666                        V2, V1, VPermMask);
8667   else
8668     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
8669                        V1, V2, VPermMask);
8670 }
8671 
8672 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
8673 /// vector comparison.  If it is, return true and fill in Opc/isDot with
8674 /// information about the intrinsic.
8675 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
8676                                  bool &isDot, const PPCSubtarget &Subtarget) {
8677   unsigned IntrinsicID =
8678       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
8679   CompareOpc = -1;
8680   isDot = false;
8681   switch (IntrinsicID) {
8682   default:
8683     return false;
8684   // Comparison predicates.
8685   case Intrinsic::ppc_altivec_vcmpbfp_p:
8686     CompareOpc = 966;
8687     isDot = true;
8688     break;
8689   case Intrinsic::ppc_altivec_vcmpeqfp_p:
8690     CompareOpc = 198;
8691     isDot = true;
8692     break;
8693   case Intrinsic::ppc_altivec_vcmpequb_p:
8694     CompareOpc = 6;
8695     isDot = true;
8696     break;
8697   case Intrinsic::ppc_altivec_vcmpequh_p:
8698     CompareOpc = 70;
8699     isDot = true;
8700     break;
8701   case Intrinsic::ppc_altivec_vcmpequw_p:
8702     CompareOpc = 134;
8703     isDot = true;
8704     break;
8705   case Intrinsic::ppc_altivec_vcmpequd_p:
8706     if (Subtarget.hasP8Altivec()) {
8707       CompareOpc = 199;
8708       isDot = true;
8709     } else
8710       return false;
8711     break;
8712   case Intrinsic::ppc_altivec_vcmpneb_p:
8713   case Intrinsic::ppc_altivec_vcmpneh_p:
8714   case Intrinsic::ppc_altivec_vcmpnew_p:
8715   case Intrinsic::ppc_altivec_vcmpnezb_p:
8716   case Intrinsic::ppc_altivec_vcmpnezh_p:
8717   case Intrinsic::ppc_altivec_vcmpnezw_p:
8718     if (Subtarget.hasP9Altivec()) {
8719       switch (IntrinsicID) {
8720       default:
8721         llvm_unreachable("Unknown comparison intrinsic.");
8722       case Intrinsic::ppc_altivec_vcmpneb_p:
8723         CompareOpc = 7;
8724         break;
8725       case Intrinsic::ppc_altivec_vcmpneh_p:
8726         CompareOpc = 71;
8727         break;
8728       case Intrinsic::ppc_altivec_vcmpnew_p:
8729         CompareOpc = 135;
8730         break;
8731       case Intrinsic::ppc_altivec_vcmpnezb_p:
8732         CompareOpc = 263;
8733         break;
8734       case Intrinsic::ppc_altivec_vcmpnezh_p:
8735         CompareOpc = 327;
8736         break;
8737       case Intrinsic::ppc_altivec_vcmpnezw_p:
8738         CompareOpc = 391;
8739         break;
8740       }
8741       isDot = true;
8742     } else
8743       return false;
8744     break;
8745   case Intrinsic::ppc_altivec_vcmpgefp_p:
8746     CompareOpc = 454;
8747     isDot = true;
8748     break;
8749   case Intrinsic::ppc_altivec_vcmpgtfp_p:
8750     CompareOpc = 710;
8751     isDot = true;
8752     break;
8753   case Intrinsic::ppc_altivec_vcmpgtsb_p:
8754     CompareOpc = 774;
8755     isDot = true;
8756     break;
8757   case Intrinsic::ppc_altivec_vcmpgtsh_p:
8758     CompareOpc = 838;
8759     isDot = true;
8760     break;
8761   case Intrinsic::ppc_altivec_vcmpgtsw_p:
8762     CompareOpc = 902;
8763     isDot = true;
8764     break;
8765   case Intrinsic::ppc_altivec_vcmpgtsd_p:
8766     if (Subtarget.hasP8Altivec()) {
8767       CompareOpc = 967;
8768       isDot = true;
8769     } else
8770       return false;
8771     break;
8772   case Intrinsic::ppc_altivec_vcmpgtub_p:
8773     CompareOpc = 518;
8774     isDot = true;
8775     break;
8776   case Intrinsic::ppc_altivec_vcmpgtuh_p:
8777     CompareOpc = 582;
8778     isDot = true;
8779     break;
8780   case Intrinsic::ppc_altivec_vcmpgtuw_p:
8781     CompareOpc = 646;
8782     isDot = true;
8783     break;
8784   case Intrinsic::ppc_altivec_vcmpgtud_p:
8785     if (Subtarget.hasP8Altivec()) {
8786       CompareOpc = 711;
8787       isDot = true;
8788     } else
8789       return false;
8790     break;
8791 
8792   // VSX predicate comparisons use the same infrastructure
8793   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8794   case Intrinsic::ppc_vsx_xvcmpgedp_p:
8795   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8796   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8797   case Intrinsic::ppc_vsx_xvcmpgesp_p:
8798   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8799     if (Subtarget.hasVSX()) {
8800       switch (IntrinsicID) {
8801       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
8802         CompareOpc = 99;
8803         break;
8804       case Intrinsic::ppc_vsx_xvcmpgedp_p:
8805         CompareOpc = 115;
8806         break;
8807       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
8808         CompareOpc = 107;
8809         break;
8810       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
8811         CompareOpc = 67;
8812         break;
8813       case Intrinsic::ppc_vsx_xvcmpgesp_p:
8814         CompareOpc = 83;
8815         break;
8816       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
8817         CompareOpc = 75;
8818         break;
8819       }
8820       isDot = true;
8821     } else
8822       return false;
8823     break;
8824 
8825   // Normal Comparisons.
8826   case Intrinsic::ppc_altivec_vcmpbfp:
8827     CompareOpc = 966;
8828     break;
8829   case Intrinsic::ppc_altivec_vcmpeqfp:
8830     CompareOpc = 198;
8831     break;
8832   case Intrinsic::ppc_altivec_vcmpequb:
8833     CompareOpc = 6;
8834     break;
8835   case Intrinsic::ppc_altivec_vcmpequh:
8836     CompareOpc = 70;
8837     break;
8838   case Intrinsic::ppc_altivec_vcmpequw:
8839     CompareOpc = 134;
8840     break;
8841   case Intrinsic::ppc_altivec_vcmpequd:
8842     if (Subtarget.hasP8Altivec())
8843       CompareOpc = 199;
8844     else
8845       return false;
8846     break;
8847   case Intrinsic::ppc_altivec_vcmpneb:
8848   case Intrinsic::ppc_altivec_vcmpneh:
8849   case Intrinsic::ppc_altivec_vcmpnew:
8850   case Intrinsic::ppc_altivec_vcmpnezb:
8851   case Intrinsic::ppc_altivec_vcmpnezh:
8852   case Intrinsic::ppc_altivec_vcmpnezw:
8853     if (Subtarget.hasP9Altivec())
8854       switch (IntrinsicID) {
8855       default:
8856         llvm_unreachable("Unknown comparison intrinsic.");
8857       case Intrinsic::ppc_altivec_vcmpneb:
8858         CompareOpc = 7;
8859         break;
8860       case Intrinsic::ppc_altivec_vcmpneh:
8861         CompareOpc = 71;
8862         break;
8863       case Intrinsic::ppc_altivec_vcmpnew:
8864         CompareOpc = 135;
8865         break;
8866       case Intrinsic::ppc_altivec_vcmpnezb:
8867         CompareOpc = 263;
8868         break;
8869       case Intrinsic::ppc_altivec_vcmpnezh:
8870         CompareOpc = 327;
8871         break;
8872       case Intrinsic::ppc_altivec_vcmpnezw:
8873         CompareOpc = 391;
8874         break;
8875       }
8876     else
8877       return false;
8878     break;
8879   case Intrinsic::ppc_altivec_vcmpgefp:
8880     CompareOpc = 454;
8881     break;
8882   case Intrinsic::ppc_altivec_vcmpgtfp:
8883     CompareOpc = 710;
8884     break;
8885   case Intrinsic::ppc_altivec_vcmpgtsb:
8886     CompareOpc = 774;
8887     break;
8888   case Intrinsic::ppc_altivec_vcmpgtsh:
8889     CompareOpc = 838;
8890     break;
8891   case Intrinsic::ppc_altivec_vcmpgtsw:
8892     CompareOpc = 902;
8893     break;
8894   case Intrinsic::ppc_altivec_vcmpgtsd:
8895     if (Subtarget.hasP8Altivec())
8896       CompareOpc = 967;
8897     else
8898       return false;
8899     break;
8900   case Intrinsic::ppc_altivec_vcmpgtub:
8901     CompareOpc = 518;
8902     break;
8903   case Intrinsic::ppc_altivec_vcmpgtuh:
8904     CompareOpc = 582;
8905     break;
8906   case Intrinsic::ppc_altivec_vcmpgtuw:
8907     CompareOpc = 646;
8908     break;
8909   case Intrinsic::ppc_altivec_vcmpgtud:
8910     if (Subtarget.hasP8Altivec())
8911       CompareOpc = 711;
8912     else
8913       return false;
8914     break;
8915   }
8916   return true;
8917 }
8918 
8919 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
8920 /// lower, do it, otherwise return null.
8921 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
8922                                                    SelectionDAG &DAG) const {
8923   unsigned IntrinsicID =
8924     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8925 
8926   SDLoc dl(Op);
8927 
8928   if (IntrinsicID == Intrinsic::thread_pointer) {
8929     // Reads the thread pointer register, used for __builtin_thread_pointer.
8930     if (Subtarget.isPPC64())
8931       return DAG.getRegister(PPC::X13, MVT::i64);
8932     return DAG.getRegister(PPC::R2, MVT::i32);
8933   }
8934 
8935   // We are looking for absolute values here.
8936   // The idea is to try to fit one of two patterns:
8937   //  max (a, (0-a))  OR  max ((0-a), a)
8938   if (Subtarget.hasP9Vector() &&
8939       (IntrinsicID == Intrinsic::ppc_altivec_vmaxsw ||
8940        IntrinsicID == Intrinsic::ppc_altivec_vmaxsh ||
8941        IntrinsicID == Intrinsic::ppc_altivec_vmaxsb)) {
8942     SDValue V1 = Op.getOperand(1);
8943     SDValue V2 = Op.getOperand(2);
8944     if (V1.getSimpleValueType() == V2.getSimpleValueType() &&
8945         (V1.getSimpleValueType() == MVT::v4i32 ||
8946          V1.getSimpleValueType() == MVT::v8i16 ||
8947          V1.getSimpleValueType() == MVT::v16i8)) {
8948       if ( V1.getOpcode() == ISD::SUB &&
8949            ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
8950            V1.getOperand(1) == V2 ) {
8951         // Generate the abs instruction with the operands
8952         return DAG.getNode(ISD::ABS, dl, V2.getValueType(),V2);
8953       }
8954 
8955       if ( V2.getOpcode() == ISD::SUB &&
8956            ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
8957            V2.getOperand(1) == V1 ) {
8958         // Generate the abs instruction with the operands
8959         return DAG.getNode(ISD::ABS, dl, V1.getValueType(),V1);
8960       }
8961     }
8962   }
8963 
8964   // If this is a lowered altivec predicate compare, CompareOpc is set to the
8965   // opcode number of the comparison.
8966   int CompareOpc;
8967   bool isDot;
8968   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
8969     return SDValue();    // Don't custom lower most intrinsics.
8970 
8971   // If this is a non-dot comparison, make the VCMP node and we are done.
8972   if (!isDot) {
8973     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
8974                               Op.getOperand(1), Op.getOperand(2),
8975                               DAG.getConstant(CompareOpc, dl, MVT::i32));
8976     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
8977   }
8978 
8979   // Create the PPCISD altivec 'dot' comparison node.
8980   SDValue Ops[] = {
8981     Op.getOperand(2),  // LHS
8982     Op.getOperand(3),  // RHS
8983     DAG.getConstant(CompareOpc, dl, MVT::i32)
8984   };
8985   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
8986   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
8987 
8988   // Now that we have the comparison, emit a copy from the CR to a GPR.
8989   // This is flagged to the above dot comparison.
8990   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
8991                                 DAG.getRegister(PPC::CR6, MVT::i32),
8992                                 CompNode.getValue(1));
8993 
8994   // Unpack the result based on how the target uses it.
8995   unsigned BitNo;   // Bit # of CR6.
8996   bool InvertBit;   // Invert result?
8997   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
8998   default:  // Can't happen, don't crash on invalid number though.
8999   case 0:   // Return the value of the EQ bit of CR6.
9000     BitNo = 0; InvertBit = false;
9001     break;
9002   case 1:   // Return the inverted value of the EQ bit of CR6.
9003     BitNo = 0; InvertBit = true;
9004     break;
9005   case 2:   // Return the value of the LT bit of CR6.
9006     BitNo = 2; InvertBit = false;
9007     break;
9008   case 3:   // Return the inverted value of the LT bit of CR6.
9009     BitNo = 2; InvertBit = true;
9010     break;
9011   }
9012 
9013   // Shift the bit into the low position.
9014   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9015                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9016   // Isolate the bit.
9017   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9018                       DAG.getConstant(1, dl, MVT::i32));
9019 
9020   // If we are supposed to, toggle the bit.
9021   if (InvertBit)
9022     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9023                         DAG.getConstant(1, dl, MVT::i32));
9024   return Flags;
9025 }
9026 
9027 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9028                                                SelectionDAG &DAG) const {
9029   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9030   // the beginning of the argument list.
9031   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9032   SDLoc DL(Op);
9033   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9034   case Intrinsic::ppc_cfence: {
9035     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9036     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9037     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9038                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9039                                                   Op.getOperand(ArgStart + 1)),
9040                                       Op.getOperand(0)),
9041                    0);
9042   }
9043   default:
9044     break;
9045   }
9046   return SDValue();
9047 }
9048 
9049 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
9050   // Check for a DIV with the same operands as this REM.
9051   for (auto UI : Op.getOperand(1)->uses()) {
9052     if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
9053         (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
9054       if (UI->getOperand(0) == Op.getOperand(0) &&
9055           UI->getOperand(1) == Op.getOperand(1))
9056         return SDValue();
9057   }
9058   return Op;
9059 }
9060 
9061 // Lower scalar BSWAP64 to xxbrd.
9062 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9063   SDLoc dl(Op);
9064   // MTVSRDD
9065   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9066                    Op.getOperand(0));
9067   // XXBRD
9068   Op = DAG.getNode(PPCISD::XXREVERSE, dl, MVT::v2i64, Op);
9069   // MFVSRD
9070   int VectorIndex = 0;
9071   if (Subtarget.isLittleEndian())
9072     VectorIndex = 1;
9073   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9074                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9075   return Op;
9076 }
9077 
9078 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9079 // compared to a value that is atomically loaded (atomic loads zero-extend).
9080 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9081                                                 SelectionDAG &DAG) const {
9082   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9083          "Expecting an atomic compare-and-swap here.");
9084   SDLoc dl(Op);
9085   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9086   EVT MemVT = AtomicNode->getMemoryVT();
9087   if (MemVT.getSizeInBits() >= 32)
9088     return Op;
9089 
9090   SDValue CmpOp = Op.getOperand(2);
9091   // If this is already correctly zero-extended, leave it alone.
9092   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9093   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9094     return Op;
9095 
9096   // Clear the high bits of the compare operand.
9097   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9098   SDValue NewCmpOp =
9099     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9100                 DAG.getConstant(MaskVal, dl, MVT::i32));
9101 
9102   // Replace the existing compare operand with the properly zero-extended one.
9103   SmallVector<SDValue, 4> Ops;
9104   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9105     Ops.push_back(AtomicNode->getOperand(i));
9106   Ops[2] = NewCmpOp;
9107   MachineMemOperand *MMO = AtomicNode->getMemOperand();
9108   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9109   auto NodeTy =
9110     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9111   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9112 }
9113 
9114 SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
9115                                                   SelectionDAG &DAG) const {
9116   SDLoc dl(Op);
9117   // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
9118   // instructions), but for smaller types, we need to first extend up to v2i32
9119   // before doing going farther.
9120   if (Op.getValueType() == MVT::v2i64) {
9121     EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
9122     if (ExtVT != MVT::v2i32) {
9123       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
9124       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
9125                        DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
9126                                         ExtVT.getVectorElementType(), 4)));
9127       Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
9128       Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
9129                        DAG.getValueType(MVT::v2i32));
9130     }
9131 
9132     return Op;
9133   }
9134 
9135   return SDValue();
9136 }
9137 
9138 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9139                                                  SelectionDAG &DAG) const {
9140   SDLoc dl(Op);
9141   // Create a stack slot that is 16-byte aligned.
9142   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9143   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9144   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9145   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9146 
9147   // Store the input value into Value#0 of the stack slot.
9148   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
9149                                MachinePointerInfo());
9150   // Load it out.
9151   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
9152 }
9153 
9154 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
9155                                                   SelectionDAG &DAG) const {
9156   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
9157          "Should only be called for ISD::INSERT_VECTOR_ELT");
9158 
9159   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
9160   // We have legal lowering for constant indices but not for variable ones.
9161   if (!C)
9162     return SDValue();
9163 
9164   EVT VT = Op.getValueType();
9165   SDLoc dl(Op);
9166   SDValue V1 = Op.getOperand(0);
9167   SDValue V2 = Op.getOperand(1);
9168   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
9169   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
9170     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
9171     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
9172     unsigned InsertAtElement = C->getZExtValue();
9173     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9174     if (Subtarget.isLittleEndian()) {
9175       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9176     }
9177     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
9178                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9179   }
9180   return Op;
9181 }
9182 
9183 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
9184                                                    SelectionDAG &DAG) const {
9185   SDLoc dl(Op);
9186   SDNode *N = Op.getNode();
9187 
9188   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
9189          "Unknown extract_vector_elt type");
9190 
9191   SDValue Value = N->getOperand(0);
9192 
9193   // The first part of this is like the store lowering except that we don't
9194   // need to track the chain.
9195 
9196   // The values are now known to be -1 (false) or 1 (true). To convert this
9197   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9198   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9199   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9200 
9201   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9202   // understand how to form the extending load.
9203   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9204 
9205   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9206 
9207   // Now convert to an integer and store.
9208   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9209     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9210     Value);
9211 
9212   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9213   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9214   MachinePointerInfo PtrInfo =
9215       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9216   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9217   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9218 
9219   SDValue StoreChain = DAG.getEntryNode();
9220   SDValue Ops[] = {StoreChain,
9221                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9222                    Value, FIdx};
9223   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9224 
9225   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9226     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9227 
9228   // Extract the value requested.
9229   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
9230   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9231   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9232 
9233   SDValue IntVal =
9234       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
9235 
9236   if (!Subtarget.useCRBits())
9237     return IntVal;
9238 
9239   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
9240 }
9241 
9242 /// Lowering for QPX v4i1 loads
9243 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
9244                                            SelectionDAG &DAG) const {
9245   SDLoc dl(Op);
9246   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
9247   SDValue LoadChain = LN->getChain();
9248   SDValue BasePtr = LN->getBasePtr();
9249 
9250   if (Op.getValueType() == MVT::v4f64 ||
9251       Op.getValueType() == MVT::v4f32) {
9252     EVT MemVT = LN->getMemoryVT();
9253     unsigned Alignment = LN->getAlignment();
9254 
9255     // If this load is properly aligned, then it is legal.
9256     if (Alignment >= MemVT.getStoreSize())
9257       return Op;
9258 
9259     EVT ScalarVT = Op.getValueType().getScalarType(),
9260         ScalarMemVT = MemVT.getScalarType();
9261     unsigned Stride = ScalarMemVT.getStoreSize();
9262 
9263     SDValue Vals[4], LoadChains[4];
9264     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9265       SDValue Load;
9266       if (ScalarVT != ScalarMemVT)
9267         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
9268                               BasePtr,
9269                               LN->getPointerInfo().getWithOffset(Idx * Stride),
9270                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9271                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
9272       else
9273         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
9274                            LN->getPointerInfo().getWithOffset(Idx * Stride),
9275                            MinAlign(Alignment, Idx * Stride),
9276                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
9277 
9278       if (Idx == 0 && LN->isIndexed()) {
9279         assert(LN->getAddressingMode() == ISD::PRE_INC &&
9280                "Unknown addressing mode on vector load");
9281         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
9282                                   LN->getAddressingMode());
9283       }
9284 
9285       Vals[Idx] = Load;
9286       LoadChains[Idx] = Load.getValue(1);
9287 
9288       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9289                             DAG.getConstant(Stride, dl,
9290                                             BasePtr.getValueType()));
9291     }
9292 
9293     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9294     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
9295 
9296     if (LN->isIndexed()) {
9297       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
9298       return DAG.getMergeValues(RetOps, dl);
9299     }
9300 
9301     SDValue RetOps[] = { Value, TF };
9302     return DAG.getMergeValues(RetOps, dl);
9303   }
9304 
9305   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
9306   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
9307 
9308   // To lower v4i1 from a byte array, we load the byte elements of the
9309   // vector and then reuse the BUILD_VECTOR logic.
9310 
9311   SDValue VectElmts[4], VectElmtChains[4];
9312   for (unsigned i = 0; i < 4; ++i) {
9313     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9314     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9315 
9316     VectElmts[i] = DAG.getExtLoad(
9317         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
9318         LN->getPointerInfo().getWithOffset(i), MVT::i8,
9319         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
9320     VectElmtChains[i] = VectElmts[i].getValue(1);
9321   }
9322 
9323   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
9324   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
9325 
9326   SDValue RVals[] = { Value, LoadChain };
9327   return DAG.getMergeValues(RVals, dl);
9328 }
9329 
9330 /// Lowering for QPX v4i1 stores
9331 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
9332                                             SelectionDAG &DAG) const {
9333   SDLoc dl(Op);
9334   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
9335   SDValue StoreChain = SN->getChain();
9336   SDValue BasePtr = SN->getBasePtr();
9337   SDValue Value = SN->getValue();
9338 
9339   if (Value.getValueType() == MVT::v4f64 ||
9340       Value.getValueType() == MVT::v4f32) {
9341     EVT MemVT = SN->getMemoryVT();
9342     unsigned Alignment = SN->getAlignment();
9343 
9344     // If this store is properly aligned, then it is legal.
9345     if (Alignment >= MemVT.getStoreSize())
9346       return Op;
9347 
9348     EVT ScalarVT = Value.getValueType().getScalarType(),
9349         ScalarMemVT = MemVT.getScalarType();
9350     unsigned Stride = ScalarMemVT.getStoreSize();
9351 
9352     SDValue Stores[4];
9353     for (unsigned Idx = 0; Idx < 4; ++Idx) {
9354       SDValue Ex = DAG.getNode(
9355           ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
9356           DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
9357       SDValue Store;
9358       if (ScalarVT != ScalarMemVT)
9359         Store =
9360             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
9361                               SN->getPointerInfo().getWithOffset(Idx * Stride),
9362                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
9363                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
9364       else
9365         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
9366                              SN->getPointerInfo().getWithOffset(Idx * Stride),
9367                              MinAlign(Alignment, Idx * Stride),
9368                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
9369 
9370       if (Idx == 0 && SN->isIndexed()) {
9371         assert(SN->getAddressingMode() == ISD::PRE_INC &&
9372                "Unknown addressing mode on vector store");
9373         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
9374                                     SN->getAddressingMode());
9375       }
9376 
9377       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
9378                             DAG.getConstant(Stride, dl,
9379                                             BasePtr.getValueType()));
9380       Stores[Idx] = Store;
9381     }
9382 
9383     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9384 
9385     if (SN->isIndexed()) {
9386       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
9387       return DAG.getMergeValues(RetOps, dl);
9388     }
9389 
9390     return TF;
9391   }
9392 
9393   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
9394   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
9395 
9396   // The values are now known to be -1 (false) or 1 (true). To convert this
9397   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9398   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9399   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9400 
9401   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9402   // understand how to form the extending load.
9403   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9404 
9405   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9406 
9407   // Now convert to an integer and store.
9408   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9409     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
9410     Value);
9411 
9412   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9413   int FrameIdx = MFI.CreateStackObject(16, 16, false);
9414   MachinePointerInfo PtrInfo =
9415       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9416   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9417   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9418 
9419   SDValue Ops[] = {StoreChain,
9420                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
9421                    Value, FIdx};
9422   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
9423 
9424   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
9425     dl, VTs, Ops, MVT::v4i32, PtrInfo);
9426 
9427   // Move data into the byte array.
9428   SDValue Loads[4], LoadChains[4];
9429   for (unsigned i = 0; i < 4; ++i) {
9430     unsigned Offset = 4*i;
9431     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9432     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9433 
9434     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
9435                            PtrInfo.getWithOffset(Offset));
9436     LoadChains[i] = Loads[i].getValue(1);
9437   }
9438 
9439   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9440 
9441   SDValue Stores[4];
9442   for (unsigned i = 0; i < 4; ++i) {
9443     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
9444     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
9445 
9446     Stores[i] = DAG.getTruncStore(
9447         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
9448         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
9449         SN->getAAInfo());
9450   }
9451 
9452   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9453 
9454   return StoreChain;
9455 }
9456 
9457 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
9458   SDLoc dl(Op);
9459   if (Op.getValueType() == MVT::v4i32) {
9460     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9461 
9462     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
9463     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
9464 
9465     SDValue RHSSwap =   // = vrlw RHS, 16
9466       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
9467 
9468     // Shrinkify inputs to v8i16.
9469     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
9470     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
9471     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
9472 
9473     // Low parts multiplied together, generating 32-bit results (we ignore the
9474     // top parts).
9475     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
9476                                         LHS, RHS, DAG, dl, MVT::v4i32);
9477 
9478     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
9479                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
9480     // Shift the high parts up 16 bits.
9481     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
9482                               Neg16, DAG, dl);
9483     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
9484   } else if (Op.getValueType() == MVT::v8i16) {
9485     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9486 
9487     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
9488 
9489     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
9490                             LHS, RHS, Zero, DAG, dl);
9491   } else if (Op.getValueType() == MVT::v16i8) {
9492     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
9493     bool isLittleEndian = Subtarget.isLittleEndian();
9494 
9495     // Multiply the even 8-bit parts, producing 16-bit sums.
9496     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
9497                                            LHS, RHS, DAG, dl, MVT::v8i16);
9498     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
9499 
9500     // Multiply the odd 8-bit parts, producing 16-bit sums.
9501     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
9502                                           LHS, RHS, DAG, dl, MVT::v8i16);
9503     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
9504 
9505     // Merge the results together.  Because vmuleub and vmuloub are
9506     // instructions with a big-endian bias, we must reverse the
9507     // element numbering and reverse the meaning of "odd" and "even"
9508     // when generating little endian code.
9509     int Ops[16];
9510     for (unsigned i = 0; i != 8; ++i) {
9511       if (isLittleEndian) {
9512         Ops[i*2  ] = 2*i;
9513         Ops[i*2+1] = 2*i+16;
9514       } else {
9515         Ops[i*2  ] = 2*i+1;
9516         Ops[i*2+1] = 2*i+1+16;
9517       }
9518     }
9519     if (isLittleEndian)
9520       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
9521     else
9522       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
9523   } else {
9524     llvm_unreachable("Unknown mul to lower!");
9525   }
9526 }
9527 
9528 /// LowerOperation - Provide custom lowering hooks for some operations.
9529 ///
9530 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9531   switch (Op.getOpcode()) {
9532   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
9533   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
9534   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
9535   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
9536   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
9537   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
9538   case ISD::SETCC:              return LowerSETCC(Op, DAG);
9539   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
9540   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
9541 
9542   // Variable argument lowering.
9543   case ISD::VASTART:            return LowerVASTART(Op, DAG);
9544   case ISD::VAARG:              return LowerVAARG(Op, DAG);
9545   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
9546 
9547   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
9548   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
9549   case ISD::GET_DYNAMIC_AREA_OFFSET:
9550     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
9551 
9552   // Exception handling lowering.
9553   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
9554   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
9555   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
9556 
9557   case ISD::LOAD:               return LowerLOAD(Op, DAG);
9558   case ISD::STORE:              return LowerSTORE(Op, DAG);
9559   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
9560   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
9561   case ISD::FP_TO_UINT:
9562   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
9563   case ISD::UINT_TO_FP:
9564   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
9565   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
9566 
9567   // Lower 64-bit shifts.
9568   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
9569   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
9570   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
9571 
9572   // Vector-related lowering.
9573   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
9574   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
9575   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
9576   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
9577   case ISD::SIGN_EXTEND_INREG:  return LowerSIGN_EXTEND_INREG(Op, DAG);
9578   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
9579   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
9580   case ISD::MUL:                return LowerMUL(Op, DAG);
9581 
9582   // For counter-based loop handling.
9583   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
9584 
9585   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
9586 
9587   // Frame & Return address.
9588   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
9589   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
9590 
9591   case ISD::INTRINSIC_VOID:
9592     return LowerINTRINSIC_VOID(Op, DAG);
9593   case ISD::SREM:
9594   case ISD::UREM:
9595     return LowerREM(Op, DAG);
9596   case ISD::BSWAP:
9597     return LowerBSWAP(Op, DAG);
9598   case ISD::ATOMIC_CMP_SWAP:
9599     return LowerATOMIC_CMP_SWAP(Op, DAG);
9600   }
9601 }
9602 
9603 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
9604                                            SmallVectorImpl<SDValue>&Results,
9605                                            SelectionDAG &DAG) const {
9606   SDLoc dl(N);
9607   switch (N->getOpcode()) {
9608   default:
9609     llvm_unreachable("Do not know how to custom type legalize this operation!");
9610   case ISD::READCYCLECOUNTER: {
9611     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
9612     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
9613 
9614     Results.push_back(RTB);
9615     Results.push_back(RTB.getValue(1));
9616     Results.push_back(RTB.getValue(2));
9617     break;
9618   }
9619   case ISD::INTRINSIC_W_CHAIN: {
9620     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
9621         Intrinsic::ppc_is_decremented_ctr_nonzero)
9622       break;
9623 
9624     assert(N->getValueType(0) == MVT::i1 &&
9625            "Unexpected result type for CTR decrement intrinsic");
9626     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
9627                                  N->getValueType(0));
9628     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
9629     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
9630                                  N->getOperand(1));
9631 
9632     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
9633     Results.push_back(NewInt.getValue(1));
9634     break;
9635   }
9636   case ISD::VAARG: {
9637     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
9638       return;
9639 
9640     EVT VT = N->getValueType(0);
9641 
9642     if (VT == MVT::i64) {
9643       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
9644 
9645       Results.push_back(NewNode);
9646       Results.push_back(NewNode.getValue(1));
9647     }
9648     return;
9649   }
9650   case ISD::FP_TO_SINT:
9651   case ISD::FP_TO_UINT:
9652     // LowerFP_TO_INT() can only handle f32 and f64.
9653     if (N->getOperand(0).getValueType() == MVT::ppcf128)
9654       return;
9655     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
9656     return;
9657   }
9658 }
9659 
9660 //===----------------------------------------------------------------------===//
9661 //  Other Lowering Code
9662 //===----------------------------------------------------------------------===//
9663 
9664 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
9665   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
9666   Function *Func = Intrinsic::getDeclaration(M, Id);
9667   return Builder.CreateCall(Func, {});
9668 }
9669 
9670 // The mappings for emitLeading/TrailingFence is taken from
9671 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
9672 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
9673                                                  Instruction *Inst,
9674                                                  AtomicOrdering Ord) const {
9675   if (Ord == AtomicOrdering::SequentiallyConsistent)
9676     return callIntrinsic(Builder, Intrinsic::ppc_sync);
9677   if (isReleaseOrStronger(Ord))
9678     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9679   return nullptr;
9680 }
9681 
9682 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
9683                                                   Instruction *Inst,
9684                                                   AtomicOrdering Ord) const {
9685   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
9686     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
9687     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
9688     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
9689     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
9690       return Builder.CreateCall(
9691           Intrinsic::getDeclaration(
9692               Builder.GetInsertBlock()->getParent()->getParent(),
9693               Intrinsic::ppc_cfence, {Inst->getType()}),
9694           {Inst});
9695     // FIXME: Can use isync for rmw operation.
9696     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
9697   }
9698   return nullptr;
9699 }
9700 
9701 MachineBasicBlock *
9702 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
9703                                     unsigned AtomicSize,
9704                                     unsigned BinOpcode,
9705                                     unsigned CmpOpcode,
9706                                     unsigned CmpPred) const {
9707   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9708   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9709 
9710   auto LoadMnemonic = PPC::LDARX;
9711   auto StoreMnemonic = PPC::STDCX;
9712   switch (AtomicSize) {
9713   default:
9714     llvm_unreachable("Unexpected size of atomic entity");
9715   case 1:
9716     LoadMnemonic = PPC::LBARX;
9717     StoreMnemonic = PPC::STBCX;
9718     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9719     break;
9720   case 2:
9721     LoadMnemonic = PPC::LHARX;
9722     StoreMnemonic = PPC::STHCX;
9723     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
9724     break;
9725   case 4:
9726     LoadMnemonic = PPC::LWARX;
9727     StoreMnemonic = PPC::STWCX;
9728     break;
9729   case 8:
9730     LoadMnemonic = PPC::LDARX;
9731     StoreMnemonic = PPC::STDCX;
9732     break;
9733   }
9734 
9735   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9736   MachineFunction *F = BB->getParent();
9737   MachineFunction::iterator It = ++BB->getIterator();
9738 
9739   unsigned dest = MI.getOperand(0).getReg();
9740   unsigned ptrA = MI.getOperand(1).getReg();
9741   unsigned ptrB = MI.getOperand(2).getReg();
9742   unsigned incr = MI.getOperand(3).getReg();
9743   DebugLoc dl = MI.getDebugLoc();
9744 
9745   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9746   MachineBasicBlock *loop2MBB =
9747     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9748   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9749   F->insert(It, loopMBB);
9750   if (CmpOpcode)
9751     F->insert(It, loop2MBB);
9752   F->insert(It, exitMBB);
9753   exitMBB->splice(exitMBB->begin(), BB,
9754                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9755   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9756 
9757   MachineRegisterInfo &RegInfo = F->getRegInfo();
9758   unsigned TmpReg = (!BinOpcode) ? incr :
9759     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
9760                                            : &PPC::GPRCRegClass);
9761 
9762   //  thisMBB:
9763   //   ...
9764   //   fallthrough --> loopMBB
9765   BB->addSuccessor(loopMBB);
9766 
9767   //  loopMBB:
9768   //   l[wd]arx dest, ptr
9769   //   add r0, dest, incr
9770   //   st[wd]cx. r0, ptr
9771   //   bne- loopMBB
9772   //   fallthrough --> exitMBB
9773 
9774   // For max/min...
9775   //  loopMBB:
9776   //   l[wd]arx dest, ptr
9777   //   cmpl?[wd] incr, dest
9778   //   bgt exitMBB
9779   //  loop2MBB:
9780   //   st[wd]cx. dest, ptr
9781   //   bne- loopMBB
9782   //   fallthrough --> exitMBB
9783 
9784   BB = loopMBB;
9785   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
9786     .addReg(ptrA).addReg(ptrB);
9787   if (BinOpcode)
9788     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
9789   if (CmpOpcode) {
9790     // Signed comparisons of byte or halfword values must be sign-extended.
9791     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
9792       unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
9793       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
9794               ExtReg).addReg(dest);
9795       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9796         .addReg(incr).addReg(ExtReg);
9797     } else
9798       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9799         .addReg(incr).addReg(dest);
9800 
9801     BuildMI(BB, dl, TII->get(PPC::BCC))
9802       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9803     BB->addSuccessor(loop2MBB);
9804     BB->addSuccessor(exitMBB);
9805     BB = loop2MBB;
9806   }
9807   BuildMI(BB, dl, TII->get(StoreMnemonic))
9808     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
9809   BuildMI(BB, dl, TII->get(PPC::BCC))
9810     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9811   BB->addSuccessor(loopMBB);
9812   BB->addSuccessor(exitMBB);
9813 
9814   //  exitMBB:
9815   //   ...
9816   BB = exitMBB;
9817   return BB;
9818 }
9819 
9820 MachineBasicBlock *
9821 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
9822                                             MachineBasicBlock *BB,
9823                                             bool is8bit, // operation
9824                                             unsigned BinOpcode,
9825                                             unsigned CmpOpcode,
9826                                             unsigned CmpPred) const {
9827   // If we support part-word atomic mnemonics, just use them
9828   if (Subtarget.hasPartwordAtomics())
9829     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
9830                             CmpOpcode, CmpPred);
9831 
9832   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
9833   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9834   // In 64 bit mode we have to use 64 bits for addresses, even though the
9835   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
9836   // registers without caring whether they're 32 or 64, but here we're
9837   // doing actual arithmetic on the addresses.
9838   bool is64bit = Subtarget.isPPC64();
9839   bool isLittleEndian = Subtarget.isLittleEndian();
9840   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9841 
9842   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9843   MachineFunction *F = BB->getParent();
9844   MachineFunction::iterator It = ++BB->getIterator();
9845 
9846   unsigned dest = MI.getOperand(0).getReg();
9847   unsigned ptrA = MI.getOperand(1).getReg();
9848   unsigned ptrB = MI.getOperand(2).getReg();
9849   unsigned incr = MI.getOperand(3).getReg();
9850   DebugLoc dl = MI.getDebugLoc();
9851 
9852   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
9853   MachineBasicBlock *loop2MBB =
9854     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
9855   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
9856   F->insert(It, loopMBB);
9857   if (CmpOpcode)
9858     F->insert(It, loop2MBB);
9859   F->insert(It, exitMBB);
9860   exitMBB->splice(exitMBB->begin(), BB,
9861                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9862   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
9863 
9864   MachineRegisterInfo &RegInfo = F->getRegInfo();
9865   const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
9866                                           : &PPC::GPRCRegClass;
9867   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
9868   unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
9869   unsigned ShiftReg =
9870     isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
9871   unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
9872   unsigned MaskReg = RegInfo.createVirtualRegister(RC);
9873   unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
9874   unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
9875   unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
9876   unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
9877   unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
9878   unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
9879   unsigned Ptr1Reg;
9880   unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
9881 
9882   //  thisMBB:
9883   //   ...
9884   //   fallthrough --> loopMBB
9885   BB->addSuccessor(loopMBB);
9886 
9887   // The 4-byte load must be aligned, while a char or short may be
9888   // anywhere in the word.  Hence all this nasty bookkeeping code.
9889   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
9890   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
9891   //   xori shift, shift1, 24 [16]
9892   //   rlwinm ptr, ptr1, 0, 0, 29
9893   //   slw incr2, incr, shift
9894   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
9895   //   slw mask, mask2, shift
9896   //  loopMBB:
9897   //   lwarx tmpDest, ptr
9898   //   add tmp, tmpDest, incr2
9899   //   andc tmp2, tmpDest, mask
9900   //   and tmp3, tmp, mask
9901   //   or tmp4, tmp3, tmp2
9902   //   stwcx. tmp4, ptr
9903   //   bne- loopMBB
9904   //   fallthrough --> exitMBB
9905   //   srw dest, tmpDest, shift
9906   if (ptrA != ZeroReg) {
9907     Ptr1Reg = RegInfo.createVirtualRegister(RC);
9908     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
9909       .addReg(ptrA).addReg(ptrB);
9910   } else {
9911     Ptr1Reg = ptrB;
9912   }
9913   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
9914       .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
9915   if (!isLittleEndian)
9916     BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
9917         .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
9918   if (is64bit)
9919     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
9920       .addReg(Ptr1Reg).addImm(0).addImm(61);
9921   else
9922     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
9923       .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
9924   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
9925       .addReg(incr).addReg(ShiftReg);
9926   if (is8bit)
9927     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
9928   else {
9929     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
9930     BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
9931   }
9932   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
9933       .addReg(Mask2Reg).addReg(ShiftReg);
9934 
9935   BB = loopMBB;
9936   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
9937     .addReg(ZeroReg).addReg(PtrReg);
9938   if (BinOpcode)
9939     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
9940       .addReg(Incr2Reg).addReg(TmpDestReg);
9941   BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
9942     .addReg(TmpDestReg).addReg(MaskReg);
9943   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
9944     .addReg(TmpReg).addReg(MaskReg);
9945   if (CmpOpcode) {
9946     // For unsigned comparisons, we can directly compare the shifted values.
9947     // For signed comparisons we shift and sign extend.
9948     unsigned SReg = RegInfo.createVirtualRegister(RC);
9949     BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
9950       .addReg(TmpDestReg).addReg(MaskReg);
9951     unsigned ValueReg = SReg;
9952     unsigned CmpReg = Incr2Reg;
9953     if (CmpOpcode == PPC::CMPW) {
9954       ValueReg = RegInfo.createVirtualRegister(RC);
9955       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
9956         .addReg(SReg).addReg(ShiftReg);
9957       unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
9958       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
9959         .addReg(ValueReg);
9960       ValueReg = ValueSReg;
9961       CmpReg = incr;
9962     }
9963     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
9964       .addReg(CmpReg).addReg(ValueReg);
9965     BuildMI(BB, dl, TII->get(PPC::BCC))
9966       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
9967     BB->addSuccessor(loop2MBB);
9968     BB->addSuccessor(exitMBB);
9969     BB = loop2MBB;
9970   }
9971   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
9972     .addReg(Tmp3Reg).addReg(Tmp2Reg);
9973   BuildMI(BB, dl, TII->get(PPC::STWCX))
9974     .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
9975   BuildMI(BB, dl, TII->get(PPC::BCC))
9976     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
9977   BB->addSuccessor(loopMBB);
9978   BB->addSuccessor(exitMBB);
9979 
9980   //  exitMBB:
9981   //   ...
9982   BB = exitMBB;
9983   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
9984     .addReg(ShiftReg);
9985   return BB;
9986 }
9987 
9988 llvm::MachineBasicBlock *
9989 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
9990                                     MachineBasicBlock *MBB) const {
9991   DebugLoc DL = MI.getDebugLoc();
9992   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
9993   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
9994 
9995   MachineFunction *MF = MBB->getParent();
9996   MachineRegisterInfo &MRI = MF->getRegInfo();
9997 
9998   const BasicBlock *BB = MBB->getBasicBlock();
9999   MachineFunction::iterator I = ++MBB->getIterator();
10000 
10001   unsigned DstReg = MI.getOperand(0).getReg();
10002   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10003   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10004   unsigned mainDstReg = MRI.createVirtualRegister(RC);
10005   unsigned restoreDstReg = MRI.createVirtualRegister(RC);
10006 
10007   MVT PVT = getPointerTy(MF->getDataLayout());
10008   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10009          "Invalid Pointer Size!");
10010   // For v = setjmp(buf), we generate
10011   //
10012   // thisMBB:
10013   //  SjLjSetup mainMBB
10014   //  bl mainMBB
10015   //  v_restore = 1
10016   //  b sinkMBB
10017   //
10018   // mainMBB:
10019   //  buf[LabelOffset] = LR
10020   //  v_main = 0
10021   //
10022   // sinkMBB:
10023   //  v = phi(main, restore)
10024   //
10025 
10026   MachineBasicBlock *thisMBB = MBB;
10027   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10028   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10029   MF->insert(I, mainMBB);
10030   MF->insert(I, sinkMBB);
10031 
10032   MachineInstrBuilder MIB;
10033 
10034   // Transfer the remainder of BB and its successor edges to sinkMBB.
10035   sinkMBB->splice(sinkMBB->begin(), MBB,
10036                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10037   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10038 
10039   // Note that the structure of the jmp_buf used here is not compatible
10040   // with that used by libc, and is not designed to be. Specifically, it
10041   // stores only those 'reserved' registers that LLVM does not otherwise
10042   // understand how to spill. Also, by convention, by the time this
10043   // intrinsic is called, Clang has already stored the frame address in the
10044   // first slot of the buffer and stack address in the third. Following the
10045   // X86 target code, we'll store the jump address in the second slot. We also
10046   // need to save the TOC pointer (R2) to handle jumps between shared
10047   // libraries, and that will be stored in the fourth slot. The thread
10048   // identifier (R13) is not affected.
10049 
10050   // thisMBB:
10051   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10052   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10053   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10054 
10055   // Prepare IP either in reg.
10056   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10057   unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
10058   unsigned BufReg = MI.getOperand(1).getReg();
10059 
10060   if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
10061     setUsesTOCBasePtr(*MBB->getParent());
10062     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10063               .addReg(PPC::X2)
10064               .addImm(TOCOffset)
10065               .addReg(BufReg)
10066               .cloneMemRefs(MI);
10067   }
10068 
10069   // Naked functions never have a base pointer, and so we use r1. For all
10070   // other functions, this decision must be delayed until during PEI.
10071   unsigned BaseReg;
10072   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
10073     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
10074   else
10075     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
10076 
10077   MIB = BuildMI(*thisMBB, MI, DL,
10078                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
10079             .addReg(BaseReg)
10080             .addImm(BPOffset)
10081             .addReg(BufReg)
10082             .cloneMemRefs(MI);
10083 
10084   // Setup
10085   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
10086   MIB.addRegMask(TRI->getNoPreservedMask());
10087 
10088   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
10089 
10090   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
10091           .addMBB(mainMBB);
10092   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
10093 
10094   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
10095   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
10096 
10097   // mainMBB:
10098   //  mainDstReg = 0
10099   MIB =
10100       BuildMI(mainMBB, DL,
10101               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
10102 
10103   // Store IP
10104   if (Subtarget.isPPC64()) {
10105     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
10106             .addReg(LabelReg)
10107             .addImm(LabelOffset)
10108             .addReg(BufReg);
10109   } else {
10110     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
10111             .addReg(LabelReg)
10112             .addImm(LabelOffset)
10113             .addReg(BufReg);
10114   }
10115   MIB.cloneMemRefs(MI);
10116 
10117   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
10118   mainMBB->addSuccessor(sinkMBB);
10119 
10120   // sinkMBB:
10121   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
10122           TII->get(PPC::PHI), DstReg)
10123     .addReg(mainDstReg).addMBB(mainMBB)
10124     .addReg(restoreDstReg).addMBB(thisMBB);
10125 
10126   MI.eraseFromParent();
10127   return sinkMBB;
10128 }
10129 
10130 MachineBasicBlock *
10131 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
10132                                      MachineBasicBlock *MBB) const {
10133   DebugLoc DL = MI.getDebugLoc();
10134   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10135 
10136   MachineFunction *MF = MBB->getParent();
10137   MachineRegisterInfo &MRI = MF->getRegInfo();
10138 
10139   MVT PVT = getPointerTy(MF->getDataLayout());
10140   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10141          "Invalid Pointer Size!");
10142 
10143   const TargetRegisterClass *RC =
10144     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10145   unsigned Tmp = MRI.createVirtualRegister(RC);
10146   // Since FP is only updated here but NOT referenced, it's treated as GPR.
10147   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
10148   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
10149   unsigned BP =
10150       (PVT == MVT::i64)
10151           ? PPC::X30
10152           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
10153                                                               : PPC::R30);
10154 
10155   MachineInstrBuilder MIB;
10156 
10157   const int64_t LabelOffset = 1 * PVT.getStoreSize();
10158   const int64_t SPOffset    = 2 * PVT.getStoreSize();
10159   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
10160   const int64_t BPOffset    = 4 * PVT.getStoreSize();
10161 
10162   unsigned BufReg = MI.getOperand(0).getReg();
10163 
10164   // Reload FP (the jumped-to function may not have had a
10165   // frame pointer, and if so, then its r31 will be restored
10166   // as necessary).
10167   if (PVT == MVT::i64) {
10168     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
10169             .addImm(0)
10170             .addReg(BufReg);
10171   } else {
10172     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
10173             .addImm(0)
10174             .addReg(BufReg);
10175   }
10176   MIB.cloneMemRefs(MI);
10177 
10178   // Reload IP
10179   if (PVT == MVT::i64) {
10180     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
10181             .addImm(LabelOffset)
10182             .addReg(BufReg);
10183   } else {
10184     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
10185             .addImm(LabelOffset)
10186             .addReg(BufReg);
10187   }
10188   MIB.cloneMemRefs(MI);
10189 
10190   // Reload SP
10191   if (PVT == MVT::i64) {
10192     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
10193             .addImm(SPOffset)
10194             .addReg(BufReg);
10195   } else {
10196     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
10197             .addImm(SPOffset)
10198             .addReg(BufReg);
10199   }
10200   MIB.cloneMemRefs(MI);
10201 
10202   // Reload BP
10203   if (PVT == MVT::i64) {
10204     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
10205             .addImm(BPOffset)
10206             .addReg(BufReg);
10207   } else {
10208     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
10209             .addImm(BPOffset)
10210             .addReg(BufReg);
10211   }
10212   MIB.cloneMemRefs(MI);
10213 
10214   // Reload TOC
10215   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
10216     setUsesTOCBasePtr(*MBB->getParent());
10217     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
10218               .addImm(TOCOffset)
10219               .addReg(BufReg)
10220               .cloneMemRefs(MI);
10221   }
10222 
10223   // Jump
10224   BuildMI(*MBB, MI, DL,
10225           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
10226   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
10227 
10228   MI.eraseFromParent();
10229   return MBB;
10230 }
10231 
10232 MachineBasicBlock *
10233 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10234                                                MachineBasicBlock *BB) const {
10235   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
10236       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10237     if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
10238         MI.getOpcode() == TargetOpcode::PATCHPOINT) {
10239       // Call lowering should have added an r2 operand to indicate a dependence
10240       // on the TOC base pointer value. It can't however, because there is no
10241       // way to mark the dependence as implicit there, and so the stackmap code
10242       // will confuse it with a regular operand. Instead, add the dependence
10243       // here.
10244       setUsesTOCBasePtr(*BB->getParent());
10245       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
10246     }
10247 
10248     return emitPatchPoint(MI, BB);
10249   }
10250 
10251   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
10252       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
10253     return emitEHSjLjSetJmp(MI, BB);
10254   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
10255              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
10256     return emitEHSjLjLongJmp(MI, BB);
10257   }
10258 
10259   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10260 
10261   // To "insert" these instructions we actually have to insert their
10262   // control-flow patterns.
10263   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10264   MachineFunction::iterator It = ++BB->getIterator();
10265 
10266   MachineFunction *F = BB->getParent();
10267 
10268   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10269        MI.getOpcode() == PPC::SELECT_CC_I8 ||
10270        MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8) {
10271     SmallVector<MachineOperand, 2> Cond;
10272     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10273         MI.getOpcode() == PPC::SELECT_CC_I8)
10274       Cond.push_back(MI.getOperand(4));
10275     else
10276       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
10277     Cond.push_back(MI.getOperand(1));
10278 
10279     DebugLoc dl = MI.getDebugLoc();
10280     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
10281                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
10282   } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
10283              MI.getOpcode() == PPC::SELECT_CC_I8 ||
10284              MI.getOpcode() == PPC::SELECT_CC_F4 ||
10285              MI.getOpcode() == PPC::SELECT_CC_F8 ||
10286              MI.getOpcode() == PPC::SELECT_CC_F16 ||
10287              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
10288              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
10289              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
10290              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
10291              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
10292              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
10293              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
10294              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
10295              MI.getOpcode() == PPC::SELECT_CC_SPE ||
10296              MI.getOpcode() == PPC::SELECT_I4 ||
10297              MI.getOpcode() == PPC::SELECT_I8 ||
10298              MI.getOpcode() == PPC::SELECT_F4 ||
10299              MI.getOpcode() == PPC::SELECT_F8 ||
10300              MI.getOpcode() == PPC::SELECT_F16 ||
10301              MI.getOpcode() == PPC::SELECT_QFRC ||
10302              MI.getOpcode() == PPC::SELECT_QSRC ||
10303              MI.getOpcode() == PPC::SELECT_QBRC ||
10304              MI.getOpcode() == PPC::SELECT_SPE ||
10305              MI.getOpcode() == PPC::SELECT_SPE4 ||
10306              MI.getOpcode() == PPC::SELECT_VRRC ||
10307              MI.getOpcode() == PPC::SELECT_VSFRC ||
10308              MI.getOpcode() == PPC::SELECT_VSSRC ||
10309              MI.getOpcode() == PPC::SELECT_VSRC) {
10310     // The incoming instruction knows the destination vreg to set, the
10311     // condition code register to branch on, the true/false values to
10312     // select between, and a branch opcode to use.
10313 
10314     //  thisMBB:
10315     //  ...
10316     //   TrueVal = ...
10317     //   cmpTY ccX, r1, r2
10318     //   bCC copy1MBB
10319     //   fallthrough --> copy0MBB
10320     MachineBasicBlock *thisMBB = BB;
10321     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10322     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10323     DebugLoc dl = MI.getDebugLoc();
10324     F->insert(It, copy0MBB);
10325     F->insert(It, sinkMBB);
10326 
10327     // Transfer the remainder of BB and its successor edges to sinkMBB.
10328     sinkMBB->splice(sinkMBB->begin(), BB,
10329                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10330     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10331 
10332     // Next, add the true and fallthrough blocks as its successors.
10333     BB->addSuccessor(copy0MBB);
10334     BB->addSuccessor(sinkMBB);
10335 
10336     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
10337         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
10338         MI.getOpcode() == PPC::SELECT_F16 ||
10339         MI.getOpcode() == PPC::SELECT_SPE4 ||
10340         MI.getOpcode() == PPC::SELECT_SPE ||
10341         MI.getOpcode() == PPC::SELECT_QFRC ||
10342         MI.getOpcode() == PPC::SELECT_QSRC ||
10343         MI.getOpcode() == PPC::SELECT_QBRC ||
10344         MI.getOpcode() == PPC::SELECT_VRRC ||
10345         MI.getOpcode() == PPC::SELECT_VSFRC ||
10346         MI.getOpcode() == PPC::SELECT_VSSRC ||
10347         MI.getOpcode() == PPC::SELECT_VSRC) {
10348       BuildMI(BB, dl, TII->get(PPC::BC))
10349           .addReg(MI.getOperand(1).getReg())
10350           .addMBB(sinkMBB);
10351     } else {
10352       unsigned SelectPred = MI.getOperand(4).getImm();
10353       BuildMI(BB, dl, TII->get(PPC::BCC))
10354           .addImm(SelectPred)
10355           .addReg(MI.getOperand(1).getReg())
10356           .addMBB(sinkMBB);
10357     }
10358 
10359     //  copy0MBB:
10360     //   %FalseValue = ...
10361     //   # fallthrough to sinkMBB
10362     BB = copy0MBB;
10363 
10364     // Update machine-CFG edges
10365     BB->addSuccessor(sinkMBB);
10366 
10367     //  sinkMBB:
10368     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10369     //  ...
10370     BB = sinkMBB;
10371     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
10372         .addReg(MI.getOperand(3).getReg())
10373         .addMBB(copy0MBB)
10374         .addReg(MI.getOperand(2).getReg())
10375         .addMBB(thisMBB);
10376   } else if (MI.getOpcode() == PPC::ReadTB) {
10377     // To read the 64-bit time-base register on a 32-bit target, we read the
10378     // two halves. Should the counter have wrapped while it was being read, we
10379     // need to try again.
10380     // ...
10381     // readLoop:
10382     // mfspr Rx,TBU # load from TBU
10383     // mfspr Ry,TB  # load from TB
10384     // mfspr Rz,TBU # load from TBU
10385     // cmpw crX,Rx,Rz # check if 'old'='new'
10386     // bne readLoop   # branch if they're not equal
10387     // ...
10388 
10389     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
10390     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
10391     DebugLoc dl = MI.getDebugLoc();
10392     F->insert(It, readMBB);
10393     F->insert(It, sinkMBB);
10394 
10395     // Transfer the remainder of BB and its successor edges to sinkMBB.
10396     sinkMBB->splice(sinkMBB->begin(), BB,
10397                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10398     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10399 
10400     BB->addSuccessor(readMBB);
10401     BB = readMBB;
10402 
10403     MachineRegisterInfo &RegInfo = F->getRegInfo();
10404     unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10405     unsigned LoReg = MI.getOperand(0).getReg();
10406     unsigned HiReg = MI.getOperand(1).getReg();
10407 
10408     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
10409     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
10410     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
10411 
10412     unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10413 
10414     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
10415       .addReg(HiReg).addReg(ReadAgainReg);
10416     BuildMI(BB, dl, TII->get(PPC::BCC))
10417       .addImm(PPC::PRED_NE).addReg(CmpReg).addMBB(readMBB);
10418 
10419     BB->addSuccessor(readMBB);
10420     BB->addSuccessor(sinkMBB);
10421   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
10422     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
10423   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
10424     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
10425   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
10426     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
10427   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
10428     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
10429 
10430   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
10431     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
10432   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
10433     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
10434   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
10435     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
10436   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
10437     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
10438 
10439   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
10440     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
10441   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
10442     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
10443   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
10444     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
10445   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
10446     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
10447 
10448   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
10449     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
10450   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
10451     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
10452   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
10453     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
10454   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
10455     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
10456 
10457   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
10458     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
10459   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
10460     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
10461   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
10462     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
10463   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
10464     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
10465 
10466   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
10467     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
10468   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
10469     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
10470   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
10471     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
10472   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
10473     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
10474 
10475   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
10476     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
10477   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
10478     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
10479   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
10480     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
10481   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
10482     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
10483 
10484   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
10485     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
10486   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
10487     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
10488   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
10489     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
10490   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
10491     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
10492 
10493   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
10494     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
10495   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
10496     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
10497   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
10498     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
10499   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
10500     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
10501 
10502   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
10503     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
10504   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
10505     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
10506   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
10507     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
10508   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
10509     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
10510 
10511   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
10512     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
10513   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
10514     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
10515   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
10516     BB = EmitAtomicBinary(MI, BB, 4, 0);
10517   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
10518     BB = EmitAtomicBinary(MI, BB, 8, 0);
10519   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
10520            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
10521            (Subtarget.hasPartwordAtomics() &&
10522             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
10523            (Subtarget.hasPartwordAtomics() &&
10524             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
10525     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
10526 
10527     auto LoadMnemonic = PPC::LDARX;
10528     auto StoreMnemonic = PPC::STDCX;
10529     switch (MI.getOpcode()) {
10530     default:
10531       llvm_unreachable("Compare and swap of unknown size");
10532     case PPC::ATOMIC_CMP_SWAP_I8:
10533       LoadMnemonic = PPC::LBARX;
10534       StoreMnemonic = PPC::STBCX;
10535       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10536       break;
10537     case PPC::ATOMIC_CMP_SWAP_I16:
10538       LoadMnemonic = PPC::LHARX;
10539       StoreMnemonic = PPC::STHCX;
10540       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
10541       break;
10542     case PPC::ATOMIC_CMP_SWAP_I32:
10543       LoadMnemonic = PPC::LWARX;
10544       StoreMnemonic = PPC::STWCX;
10545       break;
10546     case PPC::ATOMIC_CMP_SWAP_I64:
10547       LoadMnemonic = PPC::LDARX;
10548       StoreMnemonic = PPC::STDCX;
10549       break;
10550     }
10551     unsigned dest = MI.getOperand(0).getReg();
10552     unsigned ptrA = MI.getOperand(1).getReg();
10553     unsigned ptrB = MI.getOperand(2).getReg();
10554     unsigned oldval = MI.getOperand(3).getReg();
10555     unsigned newval = MI.getOperand(4).getReg();
10556     DebugLoc dl = MI.getDebugLoc();
10557 
10558     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10559     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10560     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10561     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10562     F->insert(It, loop1MBB);
10563     F->insert(It, loop2MBB);
10564     F->insert(It, midMBB);
10565     F->insert(It, exitMBB);
10566     exitMBB->splice(exitMBB->begin(), BB,
10567                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10568     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10569 
10570     //  thisMBB:
10571     //   ...
10572     //   fallthrough --> loopMBB
10573     BB->addSuccessor(loop1MBB);
10574 
10575     // loop1MBB:
10576     //   l[bhwd]arx dest, ptr
10577     //   cmp[wd] dest, oldval
10578     //   bne- midMBB
10579     // loop2MBB:
10580     //   st[bhwd]cx. newval, ptr
10581     //   bne- loopMBB
10582     //   b exitBB
10583     // midMBB:
10584     //   st[bhwd]cx. dest, ptr
10585     // exitBB:
10586     BB = loop1MBB;
10587     BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10588       .addReg(ptrA).addReg(ptrB);
10589     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
10590       .addReg(oldval).addReg(dest);
10591     BuildMI(BB, dl, TII->get(PPC::BCC))
10592       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10593     BB->addSuccessor(loop2MBB);
10594     BB->addSuccessor(midMBB);
10595 
10596     BB = loop2MBB;
10597     BuildMI(BB, dl, TII->get(StoreMnemonic))
10598       .addReg(newval).addReg(ptrA).addReg(ptrB);
10599     BuildMI(BB, dl, TII->get(PPC::BCC))
10600       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10601     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10602     BB->addSuccessor(loop1MBB);
10603     BB->addSuccessor(exitMBB);
10604 
10605     BB = midMBB;
10606     BuildMI(BB, dl, TII->get(StoreMnemonic))
10607       .addReg(dest).addReg(ptrA).addReg(ptrB);
10608     BB->addSuccessor(exitMBB);
10609 
10610     //  exitMBB:
10611     //   ...
10612     BB = exitMBB;
10613   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
10614              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
10615     // We must use 64-bit registers for addresses when targeting 64-bit,
10616     // since we're actually doing arithmetic on them.  Other registers
10617     // can be 32-bit.
10618     bool is64bit = Subtarget.isPPC64();
10619     bool isLittleEndian = Subtarget.isLittleEndian();
10620     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
10621 
10622     unsigned dest = MI.getOperand(0).getReg();
10623     unsigned ptrA = MI.getOperand(1).getReg();
10624     unsigned ptrB = MI.getOperand(2).getReg();
10625     unsigned oldval = MI.getOperand(3).getReg();
10626     unsigned newval = MI.getOperand(4).getReg();
10627     DebugLoc dl = MI.getDebugLoc();
10628 
10629     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
10630     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
10631     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
10632     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10633     F->insert(It, loop1MBB);
10634     F->insert(It, loop2MBB);
10635     F->insert(It, midMBB);
10636     F->insert(It, exitMBB);
10637     exitMBB->splice(exitMBB->begin(), BB,
10638                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10639     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10640 
10641     MachineRegisterInfo &RegInfo = F->getRegInfo();
10642     const TargetRegisterClass *RC = is64bit ? &PPC::G8RCRegClass
10643                                             : &PPC::GPRCRegClass;
10644     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
10645     unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
10646     unsigned ShiftReg =
10647       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(RC);
10648     unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
10649     unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
10650     unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
10651     unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
10652     unsigned MaskReg = RegInfo.createVirtualRegister(RC);
10653     unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
10654     unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
10655     unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
10656     unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
10657     unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
10658     unsigned Ptr1Reg;
10659     unsigned TmpReg = RegInfo.createVirtualRegister(RC);
10660     unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10661     //  thisMBB:
10662     //   ...
10663     //   fallthrough --> loopMBB
10664     BB->addSuccessor(loop1MBB);
10665 
10666     // The 4-byte load must be aligned, while a char or short may be
10667     // anywhere in the word.  Hence all this nasty bookkeeping code.
10668     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
10669     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10670     //   xori shift, shift1, 24 [16]
10671     //   rlwinm ptr, ptr1, 0, 0, 29
10672     //   slw newval2, newval, shift
10673     //   slw oldval2, oldval,shift
10674     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10675     //   slw mask, mask2, shift
10676     //   and newval3, newval2, mask
10677     //   and oldval3, oldval2, mask
10678     // loop1MBB:
10679     //   lwarx tmpDest, ptr
10680     //   and tmp, tmpDest, mask
10681     //   cmpw tmp, oldval3
10682     //   bne- midMBB
10683     // loop2MBB:
10684     //   andc tmp2, tmpDest, mask
10685     //   or tmp4, tmp2, newval3
10686     //   stwcx. tmp4, ptr
10687     //   bne- loop1MBB
10688     //   b exitBB
10689     // midMBB:
10690     //   stwcx. tmpDest, ptr
10691     // exitBB:
10692     //   srw dest, tmpDest, shift
10693     if (ptrA != ZeroReg) {
10694       Ptr1Reg = RegInfo.createVirtualRegister(RC);
10695       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10696         .addReg(ptrA).addReg(ptrB);
10697     } else {
10698       Ptr1Reg = ptrB;
10699     }
10700     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
10701         .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
10702     if (!isLittleEndian)
10703       BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
10704           .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
10705     if (is64bit)
10706       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10707         .addReg(Ptr1Reg).addImm(0).addImm(61);
10708     else
10709       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10710         .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
10711     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
10712         .addReg(newval).addReg(ShiftReg);
10713     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
10714         .addReg(oldval).addReg(ShiftReg);
10715     if (is8bit)
10716       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10717     else {
10718       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10719       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10720         .addReg(Mask3Reg).addImm(65535);
10721     }
10722     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10723         .addReg(Mask2Reg).addReg(ShiftReg);
10724     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
10725         .addReg(NewVal2Reg).addReg(MaskReg);
10726     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
10727         .addReg(OldVal2Reg).addReg(MaskReg);
10728 
10729     BB = loop1MBB;
10730     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10731         .addReg(ZeroReg).addReg(PtrReg);
10732     BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
10733         .addReg(TmpDestReg).addReg(MaskReg);
10734     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
10735         .addReg(TmpReg).addReg(OldVal3Reg);
10736     BuildMI(BB, dl, TII->get(PPC::BCC))
10737         .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
10738     BB->addSuccessor(loop2MBB);
10739     BB->addSuccessor(midMBB);
10740 
10741     BB = loop2MBB;
10742     BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
10743         .addReg(TmpDestReg).addReg(MaskReg);
10744     BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
10745         .addReg(Tmp2Reg).addReg(NewVal3Reg);
10746     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
10747         .addReg(ZeroReg).addReg(PtrReg);
10748     BuildMI(BB, dl, TII->get(PPC::BCC))
10749       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
10750     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
10751     BB->addSuccessor(loop1MBB);
10752     BB->addSuccessor(exitMBB);
10753 
10754     BB = midMBB;
10755     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
10756       .addReg(ZeroReg).addReg(PtrReg);
10757     BB->addSuccessor(exitMBB);
10758 
10759     //  exitMBB:
10760     //   ...
10761     BB = exitMBB;
10762     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
10763       .addReg(ShiftReg);
10764   } else if (MI.getOpcode() == PPC::FADDrtz) {
10765     // This pseudo performs an FADD with rounding mode temporarily forced
10766     // to round-to-zero.  We emit this via custom inserter since the FPSCR
10767     // is not modeled at the SelectionDAG level.
10768     unsigned Dest = MI.getOperand(0).getReg();
10769     unsigned Src1 = MI.getOperand(1).getReg();
10770     unsigned Src2 = MI.getOperand(2).getReg();
10771     DebugLoc dl = MI.getDebugLoc();
10772 
10773     MachineRegisterInfo &RegInfo = F->getRegInfo();
10774     unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
10775 
10776     // Save FPSCR value.
10777     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
10778 
10779     // Set rounding mode to round-to-zero.
10780     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
10781     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
10782 
10783     // Perform addition.
10784     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
10785 
10786     // Restore FPSCR value.
10787     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
10788   } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10789              MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
10790              MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10791              MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
10792     unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
10793                        MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
10794                           ? PPC::ANDIo8
10795                           : PPC::ANDIo;
10796     bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
10797                  MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
10798 
10799     MachineRegisterInfo &RegInfo = F->getRegInfo();
10800     unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
10801                                                   &PPC::GPRCRegClass :
10802                                                   &PPC::G8RCRegClass);
10803 
10804     DebugLoc dl = MI.getDebugLoc();
10805     BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
10806         .addReg(MI.getOperand(1).getReg())
10807         .addImm(1);
10808     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
10809             MI.getOperand(0).getReg())
10810         .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
10811   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
10812     DebugLoc Dl = MI.getDebugLoc();
10813     MachineRegisterInfo &RegInfo = F->getRegInfo();
10814     unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
10815     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
10816     return BB;
10817   } else {
10818     llvm_unreachable("Unexpected instr type to insert");
10819   }
10820 
10821   MI.eraseFromParent(); // The pseudo instruction is gone now.
10822   return BB;
10823 }
10824 
10825 //===----------------------------------------------------------------------===//
10826 // Target Optimization Hooks
10827 //===----------------------------------------------------------------------===//
10828 
10829 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
10830   // For the estimates, convergence is quadratic, so we essentially double the
10831   // number of digits correct after every iteration. For both FRE and FRSQRTE,
10832   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
10833   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
10834   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
10835   if (VT.getScalarType() == MVT::f64)
10836     RefinementSteps++;
10837   return RefinementSteps;
10838 }
10839 
10840 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
10841                                            int Enabled, int &RefinementSteps,
10842                                            bool &UseOneConstNR,
10843                                            bool Reciprocal) const {
10844   EVT VT = Operand.getValueType();
10845   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
10846       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
10847       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10848       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10849       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10850       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10851     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10852       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10853 
10854     UseOneConstNR = true;
10855     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
10856   }
10857   return SDValue();
10858 }
10859 
10860 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
10861                                             int Enabled,
10862                                             int &RefinementSteps) const {
10863   EVT VT = Operand.getValueType();
10864   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
10865       (VT == MVT::f64 && Subtarget.hasFRE()) ||
10866       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
10867       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
10868       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
10869       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
10870     if (RefinementSteps == ReciprocalEstimate::Unspecified)
10871       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
10872     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
10873   }
10874   return SDValue();
10875 }
10876 
10877 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
10878   // Note: This functionality is used only when unsafe-fp-math is enabled, and
10879   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
10880   // enabled for division), this functionality is redundant with the default
10881   // combiner logic (once the division -> reciprocal/multiply transformation
10882   // has taken place). As a result, this matters more for older cores than for
10883   // newer ones.
10884 
10885   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
10886   // reciprocal if there are two or more FDIVs (for embedded cores with only
10887   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
10888   switch (Subtarget.getDarwinDirective()) {
10889   default:
10890     return 3;
10891   case PPC::DIR_440:
10892   case PPC::DIR_A2:
10893   case PPC::DIR_E500:
10894   case PPC::DIR_E500mc:
10895   case PPC::DIR_E5500:
10896     return 2;
10897   }
10898 }
10899 
10900 // isConsecutiveLSLoc needs to work even if all adds have not yet been
10901 // collapsed, and so we need to look through chains of them.
10902 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
10903                                      int64_t& Offset, SelectionDAG &DAG) {
10904   if (DAG.isBaseWithConstantOffset(Loc)) {
10905     Base = Loc.getOperand(0);
10906     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
10907 
10908     // The base might itself be a base plus an offset, and if so, accumulate
10909     // that as well.
10910     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
10911   }
10912 }
10913 
10914 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
10915                             unsigned Bytes, int Dist,
10916                             SelectionDAG &DAG) {
10917   if (VT.getSizeInBits() / 8 != Bytes)
10918     return false;
10919 
10920   SDValue BaseLoc = Base->getBasePtr();
10921   if (Loc.getOpcode() == ISD::FrameIndex) {
10922     if (BaseLoc.getOpcode() != ISD::FrameIndex)
10923       return false;
10924     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10925     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
10926     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
10927     int FS  = MFI.getObjectSize(FI);
10928     int BFS = MFI.getObjectSize(BFI);
10929     if (FS != BFS || FS != (int)Bytes) return false;
10930     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
10931   }
10932 
10933   SDValue Base1 = Loc, Base2 = BaseLoc;
10934   int64_t Offset1 = 0, Offset2 = 0;
10935   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
10936   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
10937   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
10938     return true;
10939 
10940   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10941   const GlobalValue *GV1 = nullptr;
10942   const GlobalValue *GV2 = nullptr;
10943   Offset1 = 0;
10944   Offset2 = 0;
10945   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
10946   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
10947   if (isGA1 && isGA2 && GV1 == GV2)
10948     return Offset1 == (Offset2 + Dist*Bytes);
10949   return false;
10950 }
10951 
10952 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
10953 // not enforce equality of the chain operands.
10954 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
10955                             unsigned Bytes, int Dist,
10956                             SelectionDAG &DAG) {
10957   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
10958     EVT VT = LS->getMemoryVT();
10959     SDValue Loc = LS->getBasePtr();
10960     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
10961   }
10962 
10963   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
10964     EVT VT;
10965     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
10966     default: return false;
10967     case Intrinsic::ppc_qpx_qvlfd:
10968     case Intrinsic::ppc_qpx_qvlfda:
10969       VT = MVT::v4f64;
10970       break;
10971     case Intrinsic::ppc_qpx_qvlfs:
10972     case Intrinsic::ppc_qpx_qvlfsa:
10973       VT = MVT::v4f32;
10974       break;
10975     case Intrinsic::ppc_qpx_qvlfcd:
10976     case Intrinsic::ppc_qpx_qvlfcda:
10977       VT = MVT::v2f64;
10978       break;
10979     case Intrinsic::ppc_qpx_qvlfcs:
10980     case Intrinsic::ppc_qpx_qvlfcsa:
10981       VT = MVT::v2f32;
10982       break;
10983     case Intrinsic::ppc_qpx_qvlfiwa:
10984     case Intrinsic::ppc_qpx_qvlfiwz:
10985     case Intrinsic::ppc_altivec_lvx:
10986     case Intrinsic::ppc_altivec_lvxl:
10987     case Intrinsic::ppc_vsx_lxvw4x:
10988     case Intrinsic::ppc_vsx_lxvw4x_be:
10989       VT = MVT::v4i32;
10990       break;
10991     case Intrinsic::ppc_vsx_lxvd2x:
10992     case Intrinsic::ppc_vsx_lxvd2x_be:
10993       VT = MVT::v2f64;
10994       break;
10995     case Intrinsic::ppc_altivec_lvebx:
10996       VT = MVT::i8;
10997       break;
10998     case Intrinsic::ppc_altivec_lvehx:
10999       VT = MVT::i16;
11000       break;
11001     case Intrinsic::ppc_altivec_lvewx:
11002       VT = MVT::i32;
11003       break;
11004     }
11005 
11006     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
11007   }
11008 
11009   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
11010     EVT VT;
11011     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11012     default: return false;
11013     case Intrinsic::ppc_qpx_qvstfd:
11014     case Intrinsic::ppc_qpx_qvstfda:
11015       VT = MVT::v4f64;
11016       break;
11017     case Intrinsic::ppc_qpx_qvstfs:
11018     case Intrinsic::ppc_qpx_qvstfsa:
11019       VT = MVT::v4f32;
11020       break;
11021     case Intrinsic::ppc_qpx_qvstfcd:
11022     case Intrinsic::ppc_qpx_qvstfcda:
11023       VT = MVT::v2f64;
11024       break;
11025     case Intrinsic::ppc_qpx_qvstfcs:
11026     case Intrinsic::ppc_qpx_qvstfcsa:
11027       VT = MVT::v2f32;
11028       break;
11029     case Intrinsic::ppc_qpx_qvstfiw:
11030     case Intrinsic::ppc_qpx_qvstfiwa:
11031     case Intrinsic::ppc_altivec_stvx:
11032     case Intrinsic::ppc_altivec_stvxl:
11033     case Intrinsic::ppc_vsx_stxvw4x:
11034       VT = MVT::v4i32;
11035       break;
11036     case Intrinsic::ppc_vsx_stxvd2x:
11037       VT = MVT::v2f64;
11038       break;
11039     case Intrinsic::ppc_vsx_stxvw4x_be:
11040       VT = MVT::v4i32;
11041       break;
11042     case Intrinsic::ppc_vsx_stxvd2x_be:
11043       VT = MVT::v2f64;
11044       break;
11045     case Intrinsic::ppc_altivec_stvebx:
11046       VT = MVT::i8;
11047       break;
11048     case Intrinsic::ppc_altivec_stvehx:
11049       VT = MVT::i16;
11050       break;
11051     case Intrinsic::ppc_altivec_stvewx:
11052       VT = MVT::i32;
11053       break;
11054     }
11055 
11056     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
11057   }
11058 
11059   return false;
11060 }
11061 
11062 // Return true is there is a nearyby consecutive load to the one provided
11063 // (regardless of alignment). We search up and down the chain, looking though
11064 // token factors and other loads (but nothing else). As a result, a true result
11065 // indicates that it is safe to create a new consecutive load adjacent to the
11066 // load provided.
11067 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
11068   SDValue Chain = LD->getChain();
11069   EVT VT = LD->getMemoryVT();
11070 
11071   SmallSet<SDNode *, 16> LoadRoots;
11072   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
11073   SmallSet<SDNode *, 16> Visited;
11074 
11075   // First, search up the chain, branching to follow all token-factor operands.
11076   // If we find a consecutive load, then we're done, otherwise, record all
11077   // nodes just above the top-level loads and token factors.
11078   while (!Queue.empty()) {
11079     SDNode *ChainNext = Queue.pop_back_val();
11080     if (!Visited.insert(ChainNext).second)
11081       continue;
11082 
11083     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
11084       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11085         return true;
11086 
11087       if (!Visited.count(ChainLD->getChain().getNode()))
11088         Queue.push_back(ChainLD->getChain().getNode());
11089     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
11090       for (const SDUse &O : ChainNext->ops())
11091         if (!Visited.count(O.getNode()))
11092           Queue.push_back(O.getNode());
11093     } else
11094       LoadRoots.insert(ChainNext);
11095   }
11096 
11097   // Second, search down the chain, starting from the top-level nodes recorded
11098   // in the first phase. These top-level nodes are the nodes just above all
11099   // loads and token factors. Starting with their uses, recursively look though
11100   // all loads (just the chain uses) and token factors to find a consecutive
11101   // load.
11102   Visited.clear();
11103   Queue.clear();
11104 
11105   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
11106        IE = LoadRoots.end(); I != IE; ++I) {
11107     Queue.push_back(*I);
11108 
11109     while (!Queue.empty()) {
11110       SDNode *LoadRoot = Queue.pop_back_val();
11111       if (!Visited.insert(LoadRoot).second)
11112         continue;
11113 
11114       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
11115         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
11116           return true;
11117 
11118       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
11119            UE = LoadRoot->use_end(); UI != UE; ++UI)
11120         if (((isa<MemSDNode>(*UI) &&
11121             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
11122             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
11123           Queue.push_back(*UI);
11124     }
11125   }
11126 
11127   return false;
11128 }
11129 
11130 /// This function is called when we have proved that a SETCC node can be replaced
11131 /// by subtraction (and other supporting instructions) so that the result of
11132 /// comparison is kept in a GPR instead of CR. This function is purely for
11133 /// codegen purposes and has some flags to guide the codegen process.
11134 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
11135                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
11136   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11137 
11138   // Zero extend the operands to the largest legal integer. Originally, they
11139   // must be of a strictly smaller size.
11140   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
11141                          DAG.getConstant(Size, DL, MVT::i32));
11142   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
11143                          DAG.getConstant(Size, DL, MVT::i32));
11144 
11145   // Swap if needed. Depends on the condition code.
11146   if (Swap)
11147     std::swap(Op0, Op1);
11148 
11149   // Subtract extended integers.
11150   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
11151 
11152   // Move the sign bit to the least significant position and zero out the rest.
11153   // Now the least significant bit carries the result of original comparison.
11154   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
11155                              DAG.getConstant(Size - 1, DL, MVT::i32));
11156   auto Final = Shifted;
11157 
11158   // Complement the result if needed. Based on the condition code.
11159   if (Complement)
11160     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
11161                         DAG.getConstant(1, DL, MVT::i64));
11162 
11163   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
11164 }
11165 
11166 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
11167                                                   DAGCombinerInfo &DCI) const {
11168   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
11169 
11170   SelectionDAG &DAG = DCI.DAG;
11171   SDLoc DL(N);
11172 
11173   // Size of integers being compared has a critical role in the following
11174   // analysis, so we prefer to do this when all types are legal.
11175   if (!DCI.isAfterLegalizeDAG())
11176     return SDValue();
11177 
11178   // If all users of SETCC extend its value to a legal integer type
11179   // then we replace SETCC with a subtraction
11180   for (SDNode::use_iterator UI = N->use_begin(),
11181        UE = N->use_end(); UI != UE; ++UI) {
11182     if (UI->getOpcode() != ISD::ZERO_EXTEND)
11183       return SDValue();
11184   }
11185 
11186   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
11187   auto OpSize = N->getOperand(0).getValueSizeInBits();
11188 
11189   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
11190 
11191   if (OpSize < Size) {
11192     switch (CC) {
11193     default: break;
11194     case ISD::SETULT:
11195       return generateEquivalentSub(N, Size, false, false, DL, DAG);
11196     case ISD::SETULE:
11197       return generateEquivalentSub(N, Size, true, true, DL, DAG);
11198     case ISD::SETUGT:
11199       return generateEquivalentSub(N, Size, false, true, DL, DAG);
11200     case ISD::SETUGE:
11201       return generateEquivalentSub(N, Size, true, false, DL, DAG);
11202     }
11203   }
11204 
11205   return SDValue();
11206 }
11207 
11208 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
11209                                                   DAGCombinerInfo &DCI) const {
11210   SelectionDAG &DAG = DCI.DAG;
11211   SDLoc dl(N);
11212 
11213   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
11214   // If we're tracking CR bits, we need to be careful that we don't have:
11215   //   trunc(binary-ops(zext(x), zext(y)))
11216   // or
11217   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
11218   // such that we're unnecessarily moving things into GPRs when it would be
11219   // better to keep them in CR bits.
11220 
11221   // Note that trunc here can be an actual i1 trunc, or can be the effective
11222   // truncation that comes from a setcc or select_cc.
11223   if (N->getOpcode() == ISD::TRUNCATE &&
11224       N->getValueType(0) != MVT::i1)
11225     return SDValue();
11226 
11227   if (N->getOperand(0).getValueType() != MVT::i32 &&
11228       N->getOperand(0).getValueType() != MVT::i64)
11229     return SDValue();
11230 
11231   if (N->getOpcode() == ISD::SETCC ||
11232       N->getOpcode() == ISD::SELECT_CC) {
11233     // If we're looking at a comparison, then we need to make sure that the
11234     // high bits (all except for the first) don't matter the result.
11235     ISD::CondCode CC =
11236       cast<CondCodeSDNode>(N->getOperand(
11237         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
11238     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
11239 
11240     if (ISD::isSignedIntSetCC(CC)) {
11241       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
11242           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
11243         return SDValue();
11244     } else if (ISD::isUnsignedIntSetCC(CC)) {
11245       if (!DAG.MaskedValueIsZero(N->getOperand(0),
11246                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
11247           !DAG.MaskedValueIsZero(N->getOperand(1),
11248                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
11249         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
11250                                              : SDValue());
11251     } else {
11252       // This is neither a signed nor an unsigned comparison, just make sure
11253       // that the high bits are equal.
11254       KnownBits Op1Known, Op2Known;
11255       DAG.computeKnownBits(N->getOperand(0), Op1Known);
11256       DAG.computeKnownBits(N->getOperand(1), Op2Known);
11257 
11258       // We don't really care about what is known about the first bit (if
11259       // anything), so clear it in all masks prior to comparing them.
11260       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
11261       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
11262 
11263       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
11264         return SDValue();
11265     }
11266   }
11267 
11268   // We now know that the higher-order bits are irrelevant, we just need to
11269   // make sure that all of the intermediate operations are bit operations, and
11270   // all inputs are extensions.
11271   if (N->getOperand(0).getOpcode() != ISD::AND &&
11272       N->getOperand(0).getOpcode() != ISD::OR  &&
11273       N->getOperand(0).getOpcode() != ISD::XOR &&
11274       N->getOperand(0).getOpcode() != ISD::SELECT &&
11275       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
11276       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
11277       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
11278       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
11279       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
11280     return SDValue();
11281 
11282   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
11283       N->getOperand(1).getOpcode() != ISD::AND &&
11284       N->getOperand(1).getOpcode() != ISD::OR  &&
11285       N->getOperand(1).getOpcode() != ISD::XOR &&
11286       N->getOperand(1).getOpcode() != ISD::SELECT &&
11287       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
11288       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
11289       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
11290       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
11291       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
11292     return SDValue();
11293 
11294   SmallVector<SDValue, 4> Inputs;
11295   SmallVector<SDValue, 8> BinOps, PromOps;
11296   SmallPtrSet<SDNode *, 16> Visited;
11297 
11298   for (unsigned i = 0; i < 2; ++i) {
11299     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11300           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11301           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11302           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11303         isa<ConstantSDNode>(N->getOperand(i)))
11304       Inputs.push_back(N->getOperand(i));
11305     else
11306       BinOps.push_back(N->getOperand(i));
11307 
11308     if (N->getOpcode() == ISD::TRUNCATE)
11309       break;
11310   }
11311 
11312   // Visit all inputs, collect all binary operations (and, or, xor and
11313   // select) that are all fed by extensions.
11314   while (!BinOps.empty()) {
11315     SDValue BinOp = BinOps.back();
11316     BinOps.pop_back();
11317 
11318     if (!Visited.insert(BinOp.getNode()).second)
11319       continue;
11320 
11321     PromOps.push_back(BinOp);
11322 
11323     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11324       // The condition of the select is not promoted.
11325       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11326         continue;
11327       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11328         continue;
11329 
11330       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11331             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11332             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
11333            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
11334           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11335         Inputs.push_back(BinOp.getOperand(i));
11336       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11337                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11338                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11339                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11340                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
11341                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11342                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
11343                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
11344                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
11345         BinOps.push_back(BinOp.getOperand(i));
11346       } else {
11347         // We have an input that is not an extension or another binary
11348         // operation; we'll abort this transformation.
11349         return SDValue();
11350       }
11351     }
11352   }
11353 
11354   // Make sure that this is a self-contained cluster of operations (which
11355   // is not quite the same thing as saying that everything has only one
11356   // use).
11357   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11358     if (isa<ConstantSDNode>(Inputs[i]))
11359       continue;
11360 
11361     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11362                               UE = Inputs[i].getNode()->use_end();
11363          UI != UE; ++UI) {
11364       SDNode *User = *UI;
11365       if (User != N && !Visited.count(User))
11366         return SDValue();
11367 
11368       // Make sure that we're not going to promote the non-output-value
11369       // operand(s) or SELECT or SELECT_CC.
11370       // FIXME: Although we could sometimes handle this, and it does occur in
11371       // practice that one of the condition inputs to the select is also one of
11372       // the outputs, we currently can't deal with this.
11373       if (User->getOpcode() == ISD::SELECT) {
11374         if (User->getOperand(0) == Inputs[i])
11375           return SDValue();
11376       } else if (User->getOpcode() == ISD::SELECT_CC) {
11377         if (User->getOperand(0) == Inputs[i] ||
11378             User->getOperand(1) == Inputs[i])
11379           return SDValue();
11380       }
11381     }
11382   }
11383 
11384   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11385     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11386                               UE = PromOps[i].getNode()->use_end();
11387          UI != UE; ++UI) {
11388       SDNode *User = *UI;
11389       if (User != N && !Visited.count(User))
11390         return SDValue();
11391 
11392       // Make sure that we're not going to promote the non-output-value
11393       // operand(s) or SELECT or SELECT_CC.
11394       // FIXME: Although we could sometimes handle this, and it does occur in
11395       // practice that one of the condition inputs to the select is also one of
11396       // the outputs, we currently can't deal with this.
11397       if (User->getOpcode() == ISD::SELECT) {
11398         if (User->getOperand(0) == PromOps[i])
11399           return SDValue();
11400       } else if (User->getOpcode() == ISD::SELECT_CC) {
11401         if (User->getOperand(0) == PromOps[i] ||
11402             User->getOperand(1) == PromOps[i])
11403           return SDValue();
11404       }
11405     }
11406   }
11407 
11408   // Replace all inputs with the extension operand.
11409   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11410     // Constants may have users outside the cluster of to-be-promoted nodes,
11411     // and so we need to replace those as we do the promotions.
11412     if (isa<ConstantSDNode>(Inputs[i]))
11413       continue;
11414     else
11415       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
11416   }
11417 
11418   std::list<HandleSDNode> PromOpHandles;
11419   for (auto &PromOp : PromOps)
11420     PromOpHandles.emplace_back(PromOp);
11421 
11422   // Replace all operations (these are all the same, but have a different
11423   // (i1) return type). DAG.getNode will validate that the types of
11424   // a binary operator match, so go through the list in reverse so that
11425   // we've likely promoted both operands first. Any intermediate truncations or
11426   // extensions disappear.
11427   while (!PromOpHandles.empty()) {
11428     SDValue PromOp = PromOpHandles.back().getValue();
11429     PromOpHandles.pop_back();
11430 
11431     if (PromOp.getOpcode() == ISD::TRUNCATE ||
11432         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
11433         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
11434         PromOp.getOpcode() == ISD::ANY_EXTEND) {
11435       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
11436           PromOp.getOperand(0).getValueType() != MVT::i1) {
11437         // The operand is not yet ready (see comment below).
11438         PromOpHandles.emplace_front(PromOp);
11439         continue;
11440       }
11441 
11442       SDValue RepValue = PromOp.getOperand(0);
11443       if (isa<ConstantSDNode>(RepValue))
11444         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
11445 
11446       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
11447       continue;
11448     }
11449 
11450     unsigned C;
11451     switch (PromOp.getOpcode()) {
11452     default:             C = 0; break;
11453     case ISD::SELECT:    C = 1; break;
11454     case ISD::SELECT_CC: C = 2; break;
11455     }
11456 
11457     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11458          PromOp.getOperand(C).getValueType() != MVT::i1) ||
11459         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11460          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
11461       // The to-be-promoted operands of this node have not yet been
11462       // promoted (this should be rare because we're going through the
11463       // list backward, but if one of the operands has several users in
11464       // this cluster of to-be-promoted nodes, it is possible).
11465       PromOpHandles.emplace_front(PromOp);
11466       continue;
11467     }
11468 
11469     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11470                                 PromOp.getNode()->op_end());
11471 
11472     // If there are any constant inputs, make sure they're replaced now.
11473     for (unsigned i = 0; i < 2; ++i)
11474       if (isa<ConstantSDNode>(Ops[C+i]))
11475         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
11476 
11477     DAG.ReplaceAllUsesOfValueWith(PromOp,
11478       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
11479   }
11480 
11481   // Now we're left with the initial truncation itself.
11482   if (N->getOpcode() == ISD::TRUNCATE)
11483     return N->getOperand(0);
11484 
11485   // Otherwise, this is a comparison. The operands to be compared have just
11486   // changed type (to i1), but everything else is the same.
11487   return SDValue(N, 0);
11488 }
11489 
11490 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
11491                                                   DAGCombinerInfo &DCI) const {
11492   SelectionDAG &DAG = DCI.DAG;
11493   SDLoc dl(N);
11494 
11495   // If we're tracking CR bits, we need to be careful that we don't have:
11496   //   zext(binary-ops(trunc(x), trunc(y)))
11497   // or
11498   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
11499   // such that we're unnecessarily moving things into CR bits that can more
11500   // efficiently stay in GPRs. Note that if we're not certain that the high
11501   // bits are set as required by the final extension, we still may need to do
11502   // some masking to get the proper behavior.
11503 
11504   // This same functionality is important on PPC64 when dealing with
11505   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
11506   // the return values of functions. Because it is so similar, it is handled
11507   // here as well.
11508 
11509   if (N->getValueType(0) != MVT::i32 &&
11510       N->getValueType(0) != MVT::i64)
11511     return SDValue();
11512 
11513   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
11514         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
11515     return SDValue();
11516 
11517   if (N->getOperand(0).getOpcode() != ISD::AND &&
11518       N->getOperand(0).getOpcode() != ISD::OR  &&
11519       N->getOperand(0).getOpcode() != ISD::XOR &&
11520       N->getOperand(0).getOpcode() != ISD::SELECT &&
11521       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
11522     return SDValue();
11523 
11524   SmallVector<SDValue, 4> Inputs;
11525   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
11526   SmallPtrSet<SDNode *, 16> Visited;
11527 
11528   // Visit all inputs, collect all binary operations (and, or, xor and
11529   // select) that are all fed by truncations.
11530   while (!BinOps.empty()) {
11531     SDValue BinOp = BinOps.back();
11532     BinOps.pop_back();
11533 
11534     if (!Visited.insert(BinOp.getNode()).second)
11535       continue;
11536 
11537     PromOps.push_back(BinOp);
11538 
11539     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
11540       // The condition of the select is not promoted.
11541       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
11542         continue;
11543       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
11544         continue;
11545 
11546       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
11547           isa<ConstantSDNode>(BinOp.getOperand(i))) {
11548         Inputs.push_back(BinOp.getOperand(i));
11549       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
11550                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
11551                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
11552                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
11553                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
11554         BinOps.push_back(BinOp.getOperand(i));
11555       } else {
11556         // We have an input that is not a truncation or another binary
11557         // operation; we'll abort this transformation.
11558         return SDValue();
11559       }
11560     }
11561   }
11562 
11563   // The operands of a select that must be truncated when the select is
11564   // promoted because the operand is actually part of the to-be-promoted set.
11565   DenseMap<SDNode *, EVT> SelectTruncOp[2];
11566 
11567   // Make sure that this is a self-contained cluster of operations (which
11568   // is not quite the same thing as saying that everything has only one
11569   // use).
11570   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11571     if (isa<ConstantSDNode>(Inputs[i]))
11572       continue;
11573 
11574     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
11575                               UE = Inputs[i].getNode()->use_end();
11576          UI != UE; ++UI) {
11577       SDNode *User = *UI;
11578       if (User != N && !Visited.count(User))
11579         return SDValue();
11580 
11581       // If we're going to promote the non-output-value operand(s) or SELECT or
11582       // SELECT_CC, record them for truncation.
11583       if (User->getOpcode() == ISD::SELECT) {
11584         if (User->getOperand(0) == Inputs[i])
11585           SelectTruncOp[0].insert(std::make_pair(User,
11586                                     User->getOperand(0).getValueType()));
11587       } else if (User->getOpcode() == ISD::SELECT_CC) {
11588         if (User->getOperand(0) == Inputs[i])
11589           SelectTruncOp[0].insert(std::make_pair(User,
11590                                     User->getOperand(0).getValueType()));
11591         if (User->getOperand(1) == Inputs[i])
11592           SelectTruncOp[1].insert(std::make_pair(User,
11593                                     User->getOperand(1).getValueType()));
11594       }
11595     }
11596   }
11597 
11598   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
11599     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
11600                               UE = PromOps[i].getNode()->use_end();
11601          UI != UE; ++UI) {
11602       SDNode *User = *UI;
11603       if (User != N && !Visited.count(User))
11604         return SDValue();
11605 
11606       // If we're going to promote the non-output-value operand(s) or SELECT or
11607       // SELECT_CC, record them for truncation.
11608       if (User->getOpcode() == ISD::SELECT) {
11609         if (User->getOperand(0) == PromOps[i])
11610           SelectTruncOp[0].insert(std::make_pair(User,
11611                                     User->getOperand(0).getValueType()));
11612       } else if (User->getOpcode() == ISD::SELECT_CC) {
11613         if (User->getOperand(0) == PromOps[i])
11614           SelectTruncOp[0].insert(std::make_pair(User,
11615                                     User->getOperand(0).getValueType()));
11616         if (User->getOperand(1) == PromOps[i])
11617           SelectTruncOp[1].insert(std::make_pair(User,
11618                                     User->getOperand(1).getValueType()));
11619       }
11620     }
11621   }
11622 
11623   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
11624   bool ReallyNeedsExt = false;
11625   if (N->getOpcode() != ISD::ANY_EXTEND) {
11626     // If all of the inputs are not already sign/zero extended, then
11627     // we'll still need to do that at the end.
11628     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11629       if (isa<ConstantSDNode>(Inputs[i]))
11630         continue;
11631 
11632       unsigned OpBits =
11633         Inputs[i].getOperand(0).getValueSizeInBits();
11634       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
11635 
11636       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
11637            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
11638                                   APInt::getHighBitsSet(OpBits,
11639                                                         OpBits-PromBits))) ||
11640           (N->getOpcode() == ISD::SIGN_EXTEND &&
11641            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
11642              (OpBits-(PromBits-1)))) {
11643         ReallyNeedsExt = true;
11644         break;
11645       }
11646     }
11647   }
11648 
11649   // Replace all inputs, either with the truncation operand, or a
11650   // truncation or extension to the final output type.
11651   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
11652     // Constant inputs need to be replaced with the to-be-promoted nodes that
11653     // use them because they might have users outside of the cluster of
11654     // promoted nodes.
11655     if (isa<ConstantSDNode>(Inputs[i]))
11656       continue;
11657 
11658     SDValue InSrc = Inputs[i].getOperand(0);
11659     if (Inputs[i].getValueType() == N->getValueType(0))
11660       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
11661     else if (N->getOpcode() == ISD::SIGN_EXTEND)
11662       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11663         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
11664     else if (N->getOpcode() == ISD::ZERO_EXTEND)
11665       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11666         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
11667     else
11668       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
11669         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
11670   }
11671 
11672   std::list<HandleSDNode> PromOpHandles;
11673   for (auto &PromOp : PromOps)
11674     PromOpHandles.emplace_back(PromOp);
11675 
11676   // Replace all operations (these are all the same, but have a different
11677   // (promoted) return type). DAG.getNode will validate that the types of
11678   // a binary operator match, so go through the list in reverse so that
11679   // we've likely promoted both operands first.
11680   while (!PromOpHandles.empty()) {
11681     SDValue PromOp = PromOpHandles.back().getValue();
11682     PromOpHandles.pop_back();
11683 
11684     unsigned C;
11685     switch (PromOp.getOpcode()) {
11686     default:             C = 0; break;
11687     case ISD::SELECT:    C = 1; break;
11688     case ISD::SELECT_CC: C = 2; break;
11689     }
11690 
11691     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
11692          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
11693         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
11694          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
11695       // The to-be-promoted operands of this node have not yet been
11696       // promoted (this should be rare because we're going through the
11697       // list backward, but if one of the operands has several users in
11698       // this cluster of to-be-promoted nodes, it is possible).
11699       PromOpHandles.emplace_front(PromOp);
11700       continue;
11701     }
11702 
11703     // For SELECT and SELECT_CC nodes, we do a similar check for any
11704     // to-be-promoted comparison inputs.
11705     if (PromOp.getOpcode() == ISD::SELECT ||
11706         PromOp.getOpcode() == ISD::SELECT_CC) {
11707       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
11708            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
11709           (SelectTruncOp[1].count(PromOp.getNode()) &&
11710            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
11711         PromOpHandles.emplace_front(PromOp);
11712         continue;
11713       }
11714     }
11715 
11716     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
11717                                 PromOp.getNode()->op_end());
11718 
11719     // If this node has constant inputs, then they'll need to be promoted here.
11720     for (unsigned i = 0; i < 2; ++i) {
11721       if (!isa<ConstantSDNode>(Ops[C+i]))
11722         continue;
11723       if (Ops[C+i].getValueType() == N->getValueType(0))
11724         continue;
11725 
11726       if (N->getOpcode() == ISD::SIGN_EXTEND)
11727         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11728       else if (N->getOpcode() == ISD::ZERO_EXTEND)
11729         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11730       else
11731         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
11732     }
11733 
11734     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
11735     // truncate them again to the original value type.
11736     if (PromOp.getOpcode() == ISD::SELECT ||
11737         PromOp.getOpcode() == ISD::SELECT_CC) {
11738       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
11739       if (SI0 != SelectTruncOp[0].end())
11740         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
11741       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
11742       if (SI1 != SelectTruncOp[1].end())
11743         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
11744     }
11745 
11746     DAG.ReplaceAllUsesOfValueWith(PromOp,
11747       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
11748   }
11749 
11750   // Now we're left with the initial extension itself.
11751   if (!ReallyNeedsExt)
11752     return N->getOperand(0);
11753 
11754   // To zero extend, just mask off everything except for the first bit (in the
11755   // i1 case).
11756   if (N->getOpcode() == ISD::ZERO_EXTEND)
11757     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
11758                        DAG.getConstant(APInt::getLowBitsSet(
11759                                          N->getValueSizeInBits(0), PromBits),
11760                                        dl, N->getValueType(0)));
11761 
11762   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
11763          "Invalid extension type");
11764   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
11765   SDValue ShiftCst =
11766       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
11767   return DAG.getNode(
11768       ISD::SRA, dl, N->getValueType(0),
11769       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
11770       ShiftCst);
11771 }
11772 
11773 // Is this an extending load from an f32 to an f64?
11774 static bool isFPExtLoad(SDValue Op) {
11775   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
11776     return LD->getExtensionType() == ISD::EXTLOAD &&
11777       Op.getValueType() == MVT::f64;
11778   return false;
11779 }
11780 
11781 /// Reduces the number of fp-to-int conversion when building a vector.
11782 ///
11783 /// If this vector is built out of floating to integer conversions,
11784 /// transform it to a vector built out of floating point values followed by a
11785 /// single floating to integer conversion of the vector.
11786 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
11787 /// becomes (fptosi (build_vector ($A, $B, ...)))
11788 SDValue PPCTargetLowering::
11789 combineElementTruncationToVectorTruncation(SDNode *N,
11790                                            DAGCombinerInfo &DCI) const {
11791   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11792          "Should be called with a BUILD_VECTOR node");
11793 
11794   SelectionDAG &DAG = DCI.DAG;
11795   SDLoc dl(N);
11796 
11797   SDValue FirstInput = N->getOperand(0);
11798   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
11799          "The input operand must be an fp-to-int conversion.");
11800 
11801   // This combine happens after legalization so the fp_to_[su]i nodes are
11802   // already converted to PPCSISD nodes.
11803   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
11804   if (FirstConversion == PPCISD::FCTIDZ ||
11805       FirstConversion == PPCISD::FCTIDUZ ||
11806       FirstConversion == PPCISD::FCTIWZ ||
11807       FirstConversion == PPCISD::FCTIWUZ) {
11808     bool IsSplat = true;
11809     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
11810       FirstConversion == PPCISD::FCTIWUZ;
11811     EVT SrcVT = FirstInput.getOperand(0).getValueType();
11812     SmallVector<SDValue, 4> Ops;
11813     EVT TargetVT = N->getValueType(0);
11814     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11815       SDValue NextOp = N->getOperand(i);
11816       if (NextOp.getOpcode() != PPCISD::MFVSR)
11817         return SDValue();
11818       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
11819       if (NextConversion != FirstConversion)
11820         return SDValue();
11821       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
11822       // This is not valid if the input was originally double precision. It is
11823       // also not profitable to do unless this is an extending load in which
11824       // case doing this combine will allow us to combine consecutive loads.
11825       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
11826         return SDValue();
11827       if (N->getOperand(i) != FirstInput)
11828         IsSplat = false;
11829     }
11830 
11831     // If this is a splat, we leave it as-is since there will be only a single
11832     // fp-to-int conversion followed by a splat of the integer. This is better
11833     // for 32-bit and smaller ints and neutral for 64-bit ints.
11834     if (IsSplat)
11835       return SDValue();
11836 
11837     // Now that we know we have the right type of node, get its operands
11838     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
11839       SDValue In = N->getOperand(i).getOperand(0);
11840       if (Is32Bit) {
11841         // For 32-bit values, we need to add an FP_ROUND node (if we made it
11842         // here, we know that all inputs are extending loads so this is safe).
11843         if (In.isUndef())
11844           Ops.push_back(DAG.getUNDEF(SrcVT));
11845         else {
11846           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
11847                                       MVT::f32, In.getOperand(0),
11848                                       DAG.getIntPtrConstant(1, dl));
11849           Ops.push_back(Trunc);
11850         }
11851       } else
11852         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
11853     }
11854 
11855     unsigned Opcode;
11856     if (FirstConversion == PPCISD::FCTIDZ ||
11857         FirstConversion == PPCISD::FCTIWZ)
11858       Opcode = ISD::FP_TO_SINT;
11859     else
11860       Opcode = ISD::FP_TO_UINT;
11861 
11862     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
11863     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
11864     return DAG.getNode(Opcode, dl, TargetVT, BV);
11865   }
11866   return SDValue();
11867 }
11868 
11869 /// Reduce the number of loads when building a vector.
11870 ///
11871 /// Building a vector out of multiple loads can be converted to a load
11872 /// of the vector type if the loads are consecutive. If the loads are
11873 /// consecutive but in descending order, a shuffle is added at the end
11874 /// to reorder the vector.
11875 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
11876   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
11877          "Should be called with a BUILD_VECTOR node");
11878 
11879   SDLoc dl(N);
11880   bool InputsAreConsecutiveLoads = true;
11881   bool InputsAreReverseConsecutive = true;
11882   unsigned ElemSize = N->getValueType(0).getScalarSizeInBits() / 8;
11883   SDValue FirstInput = N->getOperand(0);
11884   bool IsRoundOfExtLoad = false;
11885 
11886   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
11887       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
11888     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
11889     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
11890   }
11891   // Not a build vector of (possibly fp_rounded) loads.
11892   if (!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD)
11893     return SDValue();
11894 
11895   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
11896     // If any inputs are fp_round(extload), they all must be.
11897     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
11898       return SDValue();
11899 
11900     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
11901       N->getOperand(i);
11902     if (NextInput.getOpcode() != ISD::LOAD)
11903       return SDValue();
11904 
11905     SDValue PreviousInput =
11906       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
11907     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
11908     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
11909 
11910     // If any inputs are fp_round(extload), they all must be.
11911     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
11912       return SDValue();
11913 
11914     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
11915       InputsAreConsecutiveLoads = false;
11916     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
11917       InputsAreReverseConsecutive = false;
11918 
11919     // Exit early if the loads are neither consecutive nor reverse consecutive.
11920     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
11921       return SDValue();
11922   }
11923 
11924   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
11925          "The loads cannot be both consecutive and reverse consecutive.");
11926 
11927   SDValue FirstLoadOp =
11928     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
11929   SDValue LastLoadOp =
11930     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
11931                        N->getOperand(N->getNumOperands()-1);
11932 
11933   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
11934   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
11935   if (InputsAreConsecutiveLoads) {
11936     assert(LD1 && "Input needs to be a LoadSDNode.");
11937     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
11938                        LD1->getBasePtr(), LD1->getPointerInfo(),
11939                        LD1->getAlignment());
11940   }
11941   if (InputsAreReverseConsecutive) {
11942     assert(LDL && "Input needs to be a LoadSDNode.");
11943     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
11944                                LDL->getBasePtr(), LDL->getPointerInfo(),
11945                                LDL->getAlignment());
11946     SmallVector<int, 16> Ops;
11947     for (int i = N->getNumOperands() - 1; i >= 0; i--)
11948       Ops.push_back(i);
11949 
11950     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
11951                                 DAG.getUNDEF(N->getValueType(0)), Ops);
11952   }
11953   return SDValue();
11954 }
11955 
11956 // This function adds the required vector_shuffle needed to get
11957 // the elements of the vector extract in the correct position
11958 // as specified by the CorrectElems encoding.
11959 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
11960                                       SDValue Input, uint64_t Elems,
11961                                       uint64_t CorrectElems) {
11962   SDLoc dl(N);
11963 
11964   unsigned NumElems = Input.getValueType().getVectorNumElements();
11965   SmallVector<int, 16> ShuffleMask(NumElems, -1);
11966 
11967   // Knowing the element indices being extracted from the original
11968   // vector and the order in which they're being inserted, just put
11969   // them at element indices required for the instruction.
11970   for (unsigned i = 0; i < N->getNumOperands(); i++) {
11971     if (DAG.getDataLayout().isLittleEndian())
11972       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
11973     else
11974       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
11975     CorrectElems = CorrectElems >> 8;
11976     Elems = Elems >> 8;
11977   }
11978 
11979   SDValue Shuffle =
11980       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
11981                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
11982 
11983   EVT Ty = N->getValueType(0);
11984   SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
11985   return BV;
11986 }
11987 
11988 // Look for build vector patterns where input operands come from sign
11989 // extended vector_extract elements of specific indices. If the correct indices
11990 // aren't used, add a vector shuffle to fix up the indices and create a new
11991 // PPCISD:SExtVElems node which selects the vector sign extend instructions
11992 // during instruction selection.
11993 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
11994   // This array encodes the indices that the vector sign extend instructions
11995   // extract from when extending from one type to another for both BE and LE.
11996   // The right nibble of each byte corresponds to the LE incides.
11997   // and the left nibble of each byte corresponds to the BE incides.
11998   // For example: 0x3074B8FC  byte->word
11999   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
12000   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
12001   // For example: 0x000070F8  byte->double word
12002   // For LE: the allowed indices are: 0x0,0x8
12003   // For BE: the allowed indices are: 0x7,0xF
12004   uint64_t TargetElems[] = {
12005       0x3074B8FC, // b->w
12006       0x000070F8, // b->d
12007       0x10325476, // h->w
12008       0x00003074, // h->d
12009       0x00001032, // w->d
12010   };
12011 
12012   uint64_t Elems = 0;
12013   int Index;
12014   SDValue Input;
12015 
12016   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
12017     if (!Op)
12018       return false;
12019     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
12020         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
12021       return false;
12022 
12023     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
12024     // of the right width.
12025     SDValue Extract = Op.getOperand(0);
12026     if (Extract.getOpcode() == ISD::ANY_EXTEND)
12027       Extract = Extract.getOperand(0);
12028     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12029       return false;
12030 
12031     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
12032     if (!ExtOp)
12033       return false;
12034 
12035     Index = ExtOp->getZExtValue();
12036     if (Input && Input != Extract.getOperand(0))
12037       return false;
12038 
12039     if (!Input)
12040       Input = Extract.getOperand(0);
12041 
12042     Elems = Elems << 8;
12043     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
12044     Elems |= Index;
12045 
12046     return true;
12047   };
12048 
12049   // If the build vector operands aren't sign extended vector extracts,
12050   // of the same input vector, then return.
12051   for (unsigned i = 0; i < N->getNumOperands(); i++) {
12052     if (!isSExtOfVecExtract(N->getOperand(i))) {
12053       return SDValue();
12054     }
12055   }
12056 
12057   // If the vector extract indicies are not correct, add the appropriate
12058   // vector_shuffle.
12059   int TgtElemArrayIdx;
12060   int InputSize = Input.getValueType().getScalarSizeInBits();
12061   int OutputSize = N->getValueType(0).getScalarSizeInBits();
12062   if (InputSize + OutputSize == 40)
12063     TgtElemArrayIdx = 0;
12064   else if (InputSize + OutputSize == 72)
12065     TgtElemArrayIdx = 1;
12066   else if (InputSize + OutputSize == 48)
12067     TgtElemArrayIdx = 2;
12068   else if (InputSize + OutputSize == 80)
12069     TgtElemArrayIdx = 3;
12070   else if (InputSize + OutputSize == 96)
12071     TgtElemArrayIdx = 4;
12072   else
12073     return SDValue();
12074 
12075   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
12076   CorrectElems = DAG.getDataLayout().isLittleEndian()
12077                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
12078                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
12079   if (Elems != CorrectElems) {
12080     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
12081   }
12082 
12083   // Regular lowering will catch cases where a shuffle is not needed.
12084   return SDValue();
12085 }
12086 
12087 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
12088                                                  DAGCombinerInfo &DCI) const {
12089   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12090          "Should be called with a BUILD_VECTOR node");
12091 
12092   SelectionDAG &DAG = DCI.DAG;
12093   SDLoc dl(N);
12094 
12095   if (!Subtarget.hasVSX())
12096     return SDValue();
12097 
12098   // The target independent DAG combiner will leave a build_vector of
12099   // float-to-int conversions intact. We can generate MUCH better code for
12100   // a float-to-int conversion of a vector of floats.
12101   SDValue FirstInput = N->getOperand(0);
12102   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
12103     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
12104     if (Reduced)
12105       return Reduced;
12106   }
12107 
12108   // If we're building a vector out of consecutive loads, just load that
12109   // vector type.
12110   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
12111   if (Reduced)
12112     return Reduced;
12113 
12114   // If we're building a vector out of extended elements from another vector
12115   // we have P9 vector integer extend instructions. The code assumes legal
12116   // input types (i.e. it can't handle things like v4i16) so do not run before
12117   // legalization.
12118   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
12119     Reduced = combineBVOfVecSExt(N, DAG);
12120     if (Reduced)
12121       return Reduced;
12122   }
12123 
12124 
12125   if (N->getValueType(0) != MVT::v2f64)
12126     return SDValue();
12127 
12128   // Looking for:
12129   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
12130   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
12131       FirstInput.getOpcode() != ISD::UINT_TO_FP)
12132     return SDValue();
12133   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
12134       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
12135     return SDValue();
12136   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
12137     return SDValue();
12138 
12139   SDValue Ext1 = FirstInput.getOperand(0);
12140   SDValue Ext2 = N->getOperand(1).getOperand(0);
12141   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
12142      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12143     return SDValue();
12144 
12145   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
12146   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
12147   if (!Ext1Op || !Ext2Op)
12148     return SDValue();
12149   if (Ext1.getValueType() != MVT::i32 ||
12150       Ext2.getValueType() != MVT::i32)
12151   if (Ext1.getOperand(0) != Ext2.getOperand(0))
12152     return SDValue();
12153 
12154   int FirstElem = Ext1Op->getZExtValue();
12155   int SecondElem = Ext2Op->getZExtValue();
12156   int SubvecIdx;
12157   if (FirstElem == 0 && SecondElem == 1)
12158     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
12159   else if (FirstElem == 2 && SecondElem == 3)
12160     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
12161   else
12162     return SDValue();
12163 
12164   SDValue SrcVec = Ext1.getOperand(0);
12165   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
12166     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
12167   return DAG.getNode(NodeType, dl, MVT::v2f64,
12168                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
12169 }
12170 
12171 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
12172                                               DAGCombinerInfo &DCI) const {
12173   assert((N->getOpcode() == ISD::SINT_TO_FP ||
12174           N->getOpcode() == ISD::UINT_TO_FP) &&
12175          "Need an int -> FP conversion node here");
12176 
12177   if (useSoftFloat() || !Subtarget.has64BitSupport())
12178     return SDValue();
12179 
12180   SelectionDAG &DAG = DCI.DAG;
12181   SDLoc dl(N);
12182   SDValue Op(N, 0);
12183 
12184   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
12185   // from the hardware.
12186   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
12187     return SDValue();
12188   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
12189       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
12190     return SDValue();
12191 
12192   SDValue FirstOperand(Op.getOperand(0));
12193   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
12194     (FirstOperand.getValueType() == MVT::i8 ||
12195      FirstOperand.getValueType() == MVT::i16);
12196   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
12197     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
12198     bool DstDouble = Op.getValueType() == MVT::f64;
12199     unsigned ConvOp = Signed ?
12200       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
12201       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
12202     SDValue WidthConst =
12203       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
12204                             dl, false);
12205     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
12206     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
12207     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
12208                                          DAG.getVTList(MVT::f64, MVT::Other),
12209                                          Ops, MVT::i8, LDN->getMemOperand());
12210 
12211     // For signed conversion, we need to sign-extend the value in the VSR
12212     if (Signed) {
12213       SDValue ExtOps[] = { Ld, WidthConst };
12214       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
12215       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
12216     } else
12217       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
12218   }
12219 
12220 
12221   // For i32 intermediate values, unfortunately, the conversion functions
12222   // leave the upper 32 bits of the value are undefined. Within the set of
12223   // scalar instructions, we have no method for zero- or sign-extending the
12224   // value. Thus, we cannot handle i32 intermediate values here.
12225   if (Op.getOperand(0).getValueType() == MVT::i32)
12226     return SDValue();
12227 
12228   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
12229          "UINT_TO_FP is supported only with FPCVT");
12230 
12231   // If we have FCFIDS, then use it when converting to single-precision.
12232   // Otherwise, convert to double-precision and then round.
12233   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12234                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
12235                                                             : PPCISD::FCFIDS)
12236                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
12237                                                             : PPCISD::FCFID);
12238   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
12239                   ? MVT::f32
12240                   : MVT::f64;
12241 
12242   // If we're converting from a float, to an int, and back to a float again,
12243   // then we don't need the store/load pair at all.
12244   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
12245        Subtarget.hasFPCVT()) ||
12246       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
12247     SDValue Src = Op.getOperand(0).getOperand(0);
12248     if (Src.getValueType() == MVT::f32) {
12249       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
12250       DCI.AddToWorklist(Src.getNode());
12251     } else if (Src.getValueType() != MVT::f64) {
12252       // Make sure that we don't pick up a ppc_fp128 source value.
12253       return SDValue();
12254     }
12255 
12256     unsigned FCTOp =
12257       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
12258                                                         PPCISD::FCTIDUZ;
12259 
12260     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
12261     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
12262 
12263     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
12264       FP = DAG.getNode(ISD::FP_ROUND, dl,
12265                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
12266       DCI.AddToWorklist(FP.getNode());
12267     }
12268 
12269     return FP;
12270   }
12271 
12272   return SDValue();
12273 }
12274 
12275 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
12276 // builtins) into loads with swaps.
12277 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
12278                                               DAGCombinerInfo &DCI) const {
12279   SelectionDAG &DAG = DCI.DAG;
12280   SDLoc dl(N);
12281   SDValue Chain;
12282   SDValue Base;
12283   MachineMemOperand *MMO;
12284 
12285   switch (N->getOpcode()) {
12286   default:
12287     llvm_unreachable("Unexpected opcode for little endian VSX load");
12288   case ISD::LOAD: {
12289     LoadSDNode *LD = cast<LoadSDNode>(N);
12290     Chain = LD->getChain();
12291     Base = LD->getBasePtr();
12292     MMO = LD->getMemOperand();
12293     // If the MMO suggests this isn't a load of a full vector, leave
12294     // things alone.  For a built-in, we have to make the change for
12295     // correctness, so if there is a size problem that will be a bug.
12296     if (MMO->getSize() < 16)
12297       return SDValue();
12298     break;
12299   }
12300   case ISD::INTRINSIC_W_CHAIN: {
12301     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12302     Chain = Intrin->getChain();
12303     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
12304     // us what we want. Get operand 2 instead.
12305     Base = Intrin->getOperand(2);
12306     MMO = Intrin->getMemOperand();
12307     break;
12308   }
12309   }
12310 
12311   MVT VecTy = N->getValueType(0).getSimpleVT();
12312 
12313   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
12314   // aligned and the type is a vector with elements up to 4 bytes
12315   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12316       && VecTy.getScalarSizeInBits() <= 32 ) {
12317     return SDValue();
12318   }
12319 
12320   SDValue LoadOps[] = { Chain, Base };
12321   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
12322                                          DAG.getVTList(MVT::v2f64, MVT::Other),
12323                                          LoadOps, MVT::v2f64, MMO);
12324 
12325   DCI.AddToWorklist(Load.getNode());
12326   Chain = Load.getValue(1);
12327   SDValue Swap = DAG.getNode(
12328       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
12329   DCI.AddToWorklist(Swap.getNode());
12330 
12331   // Add a bitcast if the resulting load type doesn't match v2f64.
12332   if (VecTy != MVT::v2f64) {
12333     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
12334     DCI.AddToWorklist(N.getNode());
12335     // Package {bitcast value, swap's chain} to match Load's shape.
12336     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
12337                        N, Swap.getValue(1));
12338   }
12339 
12340   return Swap;
12341 }
12342 
12343 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
12344 // builtins) into stores with swaps.
12345 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
12346                                                DAGCombinerInfo &DCI) const {
12347   SelectionDAG &DAG = DCI.DAG;
12348   SDLoc dl(N);
12349   SDValue Chain;
12350   SDValue Base;
12351   unsigned SrcOpnd;
12352   MachineMemOperand *MMO;
12353 
12354   switch (N->getOpcode()) {
12355   default:
12356     llvm_unreachable("Unexpected opcode for little endian VSX store");
12357   case ISD::STORE: {
12358     StoreSDNode *ST = cast<StoreSDNode>(N);
12359     Chain = ST->getChain();
12360     Base = ST->getBasePtr();
12361     MMO = ST->getMemOperand();
12362     SrcOpnd = 1;
12363     // If the MMO suggests this isn't a store of a full vector, leave
12364     // things alone.  For a built-in, we have to make the change for
12365     // correctness, so if there is a size problem that will be a bug.
12366     if (MMO->getSize() < 16)
12367       return SDValue();
12368     break;
12369   }
12370   case ISD::INTRINSIC_VOID: {
12371     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
12372     Chain = Intrin->getChain();
12373     // Intrin->getBasePtr() oddly does not get what we want.
12374     Base = Intrin->getOperand(3);
12375     MMO = Intrin->getMemOperand();
12376     SrcOpnd = 2;
12377     break;
12378   }
12379   }
12380 
12381   SDValue Src = N->getOperand(SrcOpnd);
12382   MVT VecTy = Src.getValueType().getSimpleVT();
12383 
12384   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
12385   // aligned and the type is a vector with elements up to 4 bytes
12386   if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
12387       && VecTy.getScalarSizeInBits() <= 32 ) {
12388     return SDValue();
12389   }
12390 
12391   // All stores are done as v2f64 and possible bit cast.
12392   if (VecTy != MVT::v2f64) {
12393     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
12394     DCI.AddToWorklist(Src.getNode());
12395   }
12396 
12397   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
12398                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
12399   DCI.AddToWorklist(Swap.getNode());
12400   Chain = Swap.getValue(1);
12401   SDValue StoreOps[] = { Chain, Swap, Base };
12402   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
12403                                           DAG.getVTList(MVT::Other),
12404                                           StoreOps, VecTy, MMO);
12405   DCI.AddToWorklist(Store.getNode());
12406   return Store;
12407 }
12408 
12409 // Handle DAG combine for STORE (FP_TO_INT F).
12410 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
12411                                                DAGCombinerInfo &DCI) const {
12412 
12413   SelectionDAG &DAG = DCI.DAG;
12414   SDLoc dl(N);
12415   unsigned Opcode = N->getOperand(1).getOpcode();
12416 
12417   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
12418          && "Not a FP_TO_INT Instruction!");
12419 
12420   SDValue Val = N->getOperand(1).getOperand(0);
12421   EVT Op1VT = N->getOperand(1).getValueType();
12422   EVT ResVT = Val.getValueType();
12423 
12424   // Floating point types smaller than 32 bits are not legal on Power.
12425   if (ResVT.getScalarSizeInBits() < 32)
12426     return SDValue();
12427 
12428   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
12429   bool ValidTypeForStoreFltAsInt =
12430         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
12431          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
12432 
12433   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
12434       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
12435     return SDValue();
12436 
12437   // Extend f32 values to f64
12438   if (ResVT.getScalarSizeInBits() == 32) {
12439     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
12440     DCI.AddToWorklist(Val.getNode());
12441   }
12442 
12443   // Set signed or unsigned conversion opcode.
12444   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
12445                           PPCISD::FP_TO_SINT_IN_VSR :
12446                           PPCISD::FP_TO_UINT_IN_VSR;
12447 
12448   Val = DAG.getNode(ConvOpcode,
12449                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
12450   DCI.AddToWorklist(Val.getNode());
12451 
12452   // Set number of bytes being converted.
12453   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
12454   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
12455                     DAG.getIntPtrConstant(ByteSize, dl, false),
12456                     DAG.getValueType(Op1VT) };
12457 
12458   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
12459           DAG.getVTList(MVT::Other), Ops,
12460           cast<StoreSDNode>(N)->getMemoryVT(),
12461           cast<StoreSDNode>(N)->getMemOperand());
12462 
12463   DCI.AddToWorklist(Val.getNode());
12464   return Val;
12465 }
12466 
12467 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
12468                                              DAGCombinerInfo &DCI) const {
12469   SelectionDAG &DAG = DCI.DAG;
12470   SDLoc dl(N);
12471   switch (N->getOpcode()) {
12472   default: break;
12473   case ISD::SHL:
12474     return combineSHL(N, DCI);
12475   case ISD::SRA:
12476     return combineSRA(N, DCI);
12477   case ISD::SRL:
12478     return combineSRL(N, DCI);
12479   case PPCISD::SHL:
12480     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
12481         return N->getOperand(0);
12482     break;
12483   case PPCISD::SRL:
12484     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
12485         return N->getOperand(0);
12486     break;
12487   case PPCISD::SRA:
12488     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
12489       if (C->isNullValue() ||   //  0 >>s V -> 0.
12490           C->isAllOnesValue())    // -1 >>s V -> -1.
12491         return N->getOperand(0);
12492     }
12493     break;
12494   case ISD::SIGN_EXTEND:
12495   case ISD::ZERO_EXTEND:
12496   case ISD::ANY_EXTEND:
12497     return DAGCombineExtBoolTrunc(N, DCI);
12498   case ISD::TRUNCATE:
12499   case ISD::SETCC:
12500   case ISD::SELECT_CC:
12501     return DAGCombineTruncBoolExt(N, DCI);
12502   case ISD::SINT_TO_FP:
12503   case ISD::UINT_TO_FP:
12504     return combineFPToIntToFP(N, DCI);
12505   case ISD::STORE: {
12506 
12507     EVT Op1VT = N->getOperand(1).getValueType();
12508     unsigned Opcode = N->getOperand(1).getOpcode();
12509 
12510     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
12511       SDValue Val= combineStoreFPToInt(N, DCI);
12512       if (Val)
12513         return Val;
12514     }
12515 
12516     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
12517     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
12518         N->getOperand(1).getNode()->hasOneUse() &&
12519         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
12520          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
12521 
12522       // STBRX can only handle simple types.
12523       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
12524       if (mVT.isExtended())
12525         break;
12526 
12527       SDValue BSwapOp = N->getOperand(1).getOperand(0);
12528       // Do an any-extend to 32-bits if this is a half-word input.
12529       if (BSwapOp.getValueType() == MVT::i16)
12530         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
12531 
12532       // If the type of BSWAP operand is wider than stored memory width
12533       // it need to be shifted to the right side before STBRX.
12534       if (Op1VT.bitsGT(mVT)) {
12535         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
12536         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
12537                               DAG.getConstant(Shift, dl, MVT::i32));
12538         // Need to truncate if this is a bswap of i64 stored as i32/i16.
12539         if (Op1VT == MVT::i64)
12540           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
12541       }
12542 
12543       SDValue Ops[] = {
12544         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
12545       };
12546       return
12547         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
12548                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
12549                                 cast<StoreSDNode>(N)->getMemOperand());
12550     }
12551 
12552     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
12553     // So it can increase the chance of CSE constant construction.
12554     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
12555         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
12556       // Need to sign-extended to 64-bits to handle negative values.
12557       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
12558       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
12559                                     MemVT.getSizeInBits());
12560       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
12561 
12562       // DAG.getTruncStore() can't be used here because it doesn't accept
12563       // the general (base + offset) addressing mode.
12564       // So we use UpdateNodeOperands and setTruncatingStore instead.
12565       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
12566                              N->getOperand(3));
12567       cast<StoreSDNode>(N)->setTruncatingStore(true);
12568       return SDValue(N, 0);
12569     }
12570 
12571     // For little endian, VSX stores require generating xxswapd/lxvd2x.
12572     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12573     if (Op1VT.isSimple()) {
12574       MVT StoreVT = Op1VT.getSimpleVT();
12575       if (Subtarget.needsSwapsForVSXMemOps() &&
12576           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
12577            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
12578         return expandVSXStoreForLE(N, DCI);
12579     }
12580     break;
12581   }
12582   case ISD::LOAD: {
12583     LoadSDNode *LD = cast<LoadSDNode>(N);
12584     EVT VT = LD->getValueType(0);
12585 
12586     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12587     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12588     if (VT.isSimple()) {
12589       MVT LoadVT = VT.getSimpleVT();
12590       if (Subtarget.needsSwapsForVSXMemOps() &&
12591           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
12592            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
12593         return expandVSXLoadForLE(N, DCI);
12594     }
12595 
12596     // We sometimes end up with a 64-bit integer load, from which we extract
12597     // two single-precision floating-point numbers. This happens with
12598     // std::complex<float>, and other similar structures, because of the way we
12599     // canonicalize structure copies. However, if we lack direct moves,
12600     // then the final bitcasts from the extracted integer values to the
12601     // floating-point numbers turn into store/load pairs. Even with direct moves,
12602     // just loading the two floating-point numbers is likely better.
12603     auto ReplaceTwoFloatLoad = [&]() {
12604       if (VT != MVT::i64)
12605         return false;
12606 
12607       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
12608           LD->isVolatile())
12609         return false;
12610 
12611       //  We're looking for a sequence like this:
12612       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
12613       //      t16: i64 = srl t13, Constant:i32<32>
12614       //    t17: i32 = truncate t16
12615       //  t18: f32 = bitcast t17
12616       //    t19: i32 = truncate t13
12617       //  t20: f32 = bitcast t19
12618 
12619       if (!LD->hasNUsesOfValue(2, 0))
12620         return false;
12621 
12622       auto UI = LD->use_begin();
12623       while (UI.getUse().getResNo() != 0) ++UI;
12624       SDNode *Trunc = *UI++;
12625       while (UI.getUse().getResNo() != 0) ++UI;
12626       SDNode *RightShift = *UI;
12627       if (Trunc->getOpcode() != ISD::TRUNCATE)
12628         std::swap(Trunc, RightShift);
12629 
12630       if (Trunc->getOpcode() != ISD::TRUNCATE ||
12631           Trunc->getValueType(0) != MVT::i32 ||
12632           !Trunc->hasOneUse())
12633         return false;
12634       if (RightShift->getOpcode() != ISD::SRL ||
12635           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
12636           RightShift->getConstantOperandVal(1) != 32 ||
12637           !RightShift->hasOneUse())
12638         return false;
12639 
12640       SDNode *Trunc2 = *RightShift->use_begin();
12641       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
12642           Trunc2->getValueType(0) != MVT::i32 ||
12643           !Trunc2->hasOneUse())
12644         return false;
12645 
12646       SDNode *Bitcast = *Trunc->use_begin();
12647       SDNode *Bitcast2 = *Trunc2->use_begin();
12648 
12649       if (Bitcast->getOpcode() != ISD::BITCAST ||
12650           Bitcast->getValueType(0) != MVT::f32)
12651         return false;
12652       if (Bitcast2->getOpcode() != ISD::BITCAST ||
12653           Bitcast2->getValueType(0) != MVT::f32)
12654         return false;
12655 
12656       if (Subtarget.isLittleEndian())
12657         std::swap(Bitcast, Bitcast2);
12658 
12659       // Bitcast has the second float (in memory-layout order) and Bitcast2
12660       // has the first one.
12661 
12662       SDValue BasePtr = LD->getBasePtr();
12663       if (LD->isIndexed()) {
12664         assert(LD->getAddressingMode() == ISD::PRE_INC &&
12665                "Non-pre-inc AM on PPC?");
12666         BasePtr =
12667           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
12668                       LD->getOffset());
12669       }
12670 
12671       auto MMOFlags =
12672           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
12673       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
12674                                       LD->getPointerInfo(), LD->getAlignment(),
12675                                       MMOFlags, LD->getAAInfo());
12676       SDValue AddPtr =
12677         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
12678                     BasePtr, DAG.getIntPtrConstant(4, dl));
12679       SDValue FloatLoad2 = DAG.getLoad(
12680           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
12681           LD->getPointerInfo().getWithOffset(4),
12682           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
12683 
12684       if (LD->isIndexed()) {
12685         // Note that DAGCombine should re-form any pre-increment load(s) from
12686         // what is produced here if that makes sense.
12687         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
12688       }
12689 
12690       DCI.CombineTo(Bitcast2, FloatLoad);
12691       DCI.CombineTo(Bitcast, FloatLoad2);
12692 
12693       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
12694                                     SDValue(FloatLoad2.getNode(), 1));
12695       return true;
12696     };
12697 
12698     if (ReplaceTwoFloatLoad())
12699       return SDValue(N, 0);
12700 
12701     EVT MemVT = LD->getMemoryVT();
12702     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
12703     unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
12704     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
12705     unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
12706     if (LD->isUnindexed() && VT.isVector() &&
12707         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
12708           // P8 and later hardware should just use LOAD.
12709           !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
12710                                        VT == MVT::v4i32 || VT == MVT::v4f32)) ||
12711          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
12712           LD->getAlignment() >= ScalarABIAlignment)) &&
12713         LD->getAlignment() < ABIAlignment) {
12714       // This is a type-legal unaligned Altivec or QPX load.
12715       SDValue Chain = LD->getChain();
12716       SDValue Ptr = LD->getBasePtr();
12717       bool isLittleEndian = Subtarget.isLittleEndian();
12718 
12719       // This implements the loading of unaligned vectors as described in
12720       // the venerable Apple Velocity Engine overview. Specifically:
12721       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
12722       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
12723       //
12724       // The general idea is to expand a sequence of one or more unaligned
12725       // loads into an alignment-based permutation-control instruction (lvsl
12726       // or lvsr), a series of regular vector loads (which always truncate
12727       // their input address to an aligned address), and a series of
12728       // permutations.  The results of these permutations are the requested
12729       // loaded values.  The trick is that the last "extra" load is not taken
12730       // from the address you might suspect (sizeof(vector) bytes after the
12731       // last requested load), but rather sizeof(vector) - 1 bytes after the
12732       // last requested vector. The point of this is to avoid a page fault if
12733       // the base address happened to be aligned. This works because if the
12734       // base address is aligned, then adding less than a full vector length
12735       // will cause the last vector in the sequence to be (re)loaded.
12736       // Otherwise, the next vector will be fetched as you might suspect was
12737       // necessary.
12738 
12739       // We might be able to reuse the permutation generation from
12740       // a different base address offset from this one by an aligned amount.
12741       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
12742       // optimization later.
12743       Intrinsic::ID Intr, IntrLD, IntrPerm;
12744       MVT PermCntlTy, PermTy, LDTy;
12745       if (Subtarget.hasAltivec()) {
12746         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
12747                                  Intrinsic::ppc_altivec_lvsl;
12748         IntrLD = Intrinsic::ppc_altivec_lvx;
12749         IntrPerm = Intrinsic::ppc_altivec_vperm;
12750         PermCntlTy = MVT::v16i8;
12751         PermTy = MVT::v4i32;
12752         LDTy = MVT::v4i32;
12753       } else {
12754         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
12755                                        Intrinsic::ppc_qpx_qvlpcls;
12756         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
12757                                        Intrinsic::ppc_qpx_qvlfs;
12758         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
12759         PermCntlTy = MVT::v4f64;
12760         PermTy = MVT::v4f64;
12761         LDTy = MemVT.getSimpleVT();
12762       }
12763 
12764       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
12765 
12766       // Create the new MMO for the new base load. It is like the original MMO,
12767       // but represents an area in memory almost twice the vector size centered
12768       // on the original address. If the address is unaligned, we might start
12769       // reading up to (sizeof(vector)-1) bytes below the address of the
12770       // original unaligned load.
12771       MachineFunction &MF = DAG.getMachineFunction();
12772       MachineMemOperand *BaseMMO =
12773         MF.getMachineMemOperand(LD->getMemOperand(),
12774                                 -(long)MemVT.getStoreSize()+1,
12775                                 2*MemVT.getStoreSize()-1);
12776 
12777       // Create the new base load.
12778       SDValue LDXIntID =
12779           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
12780       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
12781       SDValue BaseLoad =
12782         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12783                                 DAG.getVTList(PermTy, MVT::Other),
12784                                 BaseLoadOps, LDTy, BaseMMO);
12785 
12786       // Note that the value of IncOffset (which is provided to the next
12787       // load's pointer info offset value, and thus used to calculate the
12788       // alignment), and the value of IncValue (which is actually used to
12789       // increment the pointer value) are different! This is because we
12790       // require the next load to appear to be aligned, even though it
12791       // is actually offset from the base pointer by a lesser amount.
12792       int IncOffset = VT.getSizeInBits() / 8;
12793       int IncValue = IncOffset;
12794 
12795       // Walk (both up and down) the chain looking for another load at the real
12796       // (aligned) offset (the alignment of the other load does not matter in
12797       // this case). If found, then do not use the offset reduction trick, as
12798       // that will prevent the loads from being later combined (as they would
12799       // otherwise be duplicates).
12800       if (!findConsecutiveLoad(LD, DAG))
12801         --IncValue;
12802 
12803       SDValue Increment =
12804           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
12805       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
12806 
12807       MachineMemOperand *ExtraMMO =
12808         MF.getMachineMemOperand(LD->getMemOperand(),
12809                                 1, 2*MemVT.getStoreSize()-1);
12810       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
12811       SDValue ExtraLoad =
12812         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
12813                                 DAG.getVTList(PermTy, MVT::Other),
12814                                 ExtraLoadOps, LDTy, ExtraMMO);
12815 
12816       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
12817         BaseLoad.getValue(1), ExtraLoad.getValue(1));
12818 
12819       // Because vperm has a big-endian bias, we must reverse the order
12820       // of the input vectors and complement the permute control vector
12821       // when generating little endian code.  We have already handled the
12822       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
12823       // and ExtraLoad here.
12824       SDValue Perm;
12825       if (isLittleEndian)
12826         Perm = BuildIntrinsicOp(IntrPerm,
12827                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
12828       else
12829         Perm = BuildIntrinsicOp(IntrPerm,
12830                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
12831 
12832       if (VT != PermTy)
12833         Perm = Subtarget.hasAltivec() ?
12834                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
12835                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
12836                                DAG.getTargetConstant(1, dl, MVT::i64));
12837                                // second argument is 1 because this rounding
12838                                // is always exact.
12839 
12840       // The output of the permutation is our loaded result, the TokenFactor is
12841       // our new chain.
12842       DCI.CombineTo(N, Perm, TF);
12843       return SDValue(N, 0);
12844     }
12845     }
12846     break;
12847     case ISD::INTRINSIC_WO_CHAIN: {
12848       bool isLittleEndian = Subtarget.isLittleEndian();
12849       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
12850       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
12851                                            : Intrinsic::ppc_altivec_lvsl);
12852       if ((IID == Intr ||
12853            IID == Intrinsic::ppc_qpx_qvlpcld  ||
12854            IID == Intrinsic::ppc_qpx_qvlpcls) &&
12855         N->getOperand(1)->getOpcode() == ISD::ADD) {
12856         SDValue Add = N->getOperand(1);
12857 
12858         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
12859                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
12860 
12861         if (DAG.MaskedValueIsZero(Add->getOperand(1),
12862                                   APInt::getAllOnesValue(Bits /* alignment */)
12863                                       .zext(Add.getScalarValueSizeInBits()))) {
12864           SDNode *BasePtr = Add->getOperand(0).getNode();
12865           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12866                                     UE = BasePtr->use_end();
12867                UI != UE; ++UI) {
12868             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12869                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
12870               // We've found another LVSL/LVSR, and this address is an aligned
12871               // multiple of that one. The results will be the same, so use the
12872               // one we've just found instead.
12873 
12874               return SDValue(*UI, 0);
12875             }
12876           }
12877         }
12878 
12879         if (isa<ConstantSDNode>(Add->getOperand(1))) {
12880           SDNode *BasePtr = Add->getOperand(0).getNode();
12881           for (SDNode::use_iterator UI = BasePtr->use_begin(),
12882                UE = BasePtr->use_end(); UI != UE; ++UI) {
12883             if (UI->getOpcode() == ISD::ADD &&
12884                 isa<ConstantSDNode>(UI->getOperand(1)) &&
12885                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
12886                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
12887                 (1ULL << Bits) == 0) {
12888               SDNode *OtherAdd = *UI;
12889               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
12890                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
12891                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
12892                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
12893                   return SDValue(*VI, 0);
12894                 }
12895               }
12896             }
12897           }
12898         }
12899       }
12900     }
12901 
12902     break;
12903   case ISD::INTRINSIC_W_CHAIN:
12904     // For little endian, VSX loads require generating lxvd2x/xxswapd.
12905     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
12906     if (Subtarget.needsSwapsForVSXMemOps()) {
12907       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12908       default:
12909         break;
12910       case Intrinsic::ppc_vsx_lxvw4x:
12911       case Intrinsic::ppc_vsx_lxvd2x:
12912         return expandVSXLoadForLE(N, DCI);
12913       }
12914     }
12915     break;
12916   case ISD::INTRINSIC_VOID:
12917     // For little endian, VSX stores require generating xxswapd/stxvd2x.
12918     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
12919     if (Subtarget.needsSwapsForVSXMemOps()) {
12920       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12921       default:
12922         break;
12923       case Intrinsic::ppc_vsx_stxvw4x:
12924       case Intrinsic::ppc_vsx_stxvd2x:
12925         return expandVSXStoreForLE(N, DCI);
12926       }
12927     }
12928     break;
12929   case ISD::BSWAP:
12930     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
12931     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
12932         N->getOperand(0).hasOneUse() &&
12933         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
12934          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
12935           N->getValueType(0) == MVT::i64))) {
12936       SDValue Load = N->getOperand(0);
12937       LoadSDNode *LD = cast<LoadSDNode>(Load);
12938       // Create the byte-swapping load.
12939       SDValue Ops[] = {
12940         LD->getChain(),    // Chain
12941         LD->getBasePtr(),  // Ptr
12942         DAG.getValueType(N->getValueType(0)) // VT
12943       };
12944       SDValue BSLoad =
12945         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
12946                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
12947                                               MVT::i64 : MVT::i32, MVT::Other),
12948                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
12949 
12950       // If this is an i16 load, insert the truncate.
12951       SDValue ResVal = BSLoad;
12952       if (N->getValueType(0) == MVT::i16)
12953         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
12954 
12955       // First, combine the bswap away.  This makes the value produced by the
12956       // load dead.
12957       DCI.CombineTo(N, ResVal);
12958 
12959       // Next, combine the load away, we give it a bogus result value but a real
12960       // chain result.  The result value is dead because the bswap is dead.
12961       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
12962 
12963       // Return N so it doesn't get rechecked!
12964       return SDValue(N, 0);
12965     }
12966     break;
12967   case PPCISD::VCMP:
12968     // If a VCMPo node already exists with exactly the same operands as this
12969     // node, use its result instead of this node (VCMPo computes both a CR6 and
12970     // a normal output).
12971     //
12972     if (!N->getOperand(0).hasOneUse() &&
12973         !N->getOperand(1).hasOneUse() &&
12974         !N->getOperand(2).hasOneUse()) {
12975 
12976       // Scan all of the users of the LHS, looking for VCMPo's that match.
12977       SDNode *VCMPoNode = nullptr;
12978 
12979       SDNode *LHSN = N->getOperand(0).getNode();
12980       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
12981            UI != E; ++UI)
12982         if (UI->getOpcode() == PPCISD::VCMPo &&
12983             UI->getOperand(1) == N->getOperand(1) &&
12984             UI->getOperand(2) == N->getOperand(2) &&
12985             UI->getOperand(0) == N->getOperand(0)) {
12986           VCMPoNode = *UI;
12987           break;
12988         }
12989 
12990       // If there is no VCMPo node, or if the flag value has a single use, don't
12991       // transform this.
12992       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
12993         break;
12994 
12995       // Look at the (necessarily single) use of the flag value.  If it has a
12996       // chain, this transformation is more complex.  Note that multiple things
12997       // could use the value result, which we should ignore.
12998       SDNode *FlagUser = nullptr;
12999       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
13000            FlagUser == nullptr; ++UI) {
13001         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
13002         SDNode *User = *UI;
13003         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
13004           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
13005             FlagUser = User;
13006             break;
13007           }
13008         }
13009       }
13010 
13011       // If the user is a MFOCRF instruction, we know this is safe.
13012       // Otherwise we give up for right now.
13013       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
13014         return SDValue(VCMPoNode, 0);
13015     }
13016     break;
13017   case ISD::BRCOND: {
13018     SDValue Cond = N->getOperand(1);
13019     SDValue Target = N->getOperand(2);
13020 
13021     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13022         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
13023           Intrinsic::ppc_is_decremented_ctr_nonzero) {
13024 
13025       // We now need to make the intrinsic dead (it cannot be instruction
13026       // selected).
13027       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
13028       assert(Cond.getNode()->hasOneUse() &&
13029              "Counter decrement has more than one use");
13030 
13031       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
13032                          N->getOperand(0), Target);
13033     }
13034   }
13035   break;
13036   case ISD::BR_CC: {
13037     // If this is a branch on an altivec predicate comparison, lower this so
13038     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
13039     // lowering is done pre-legalize, because the legalizer lowers the predicate
13040     // compare down to code that is difficult to reassemble.
13041     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
13042     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
13043 
13044     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
13045     // value. If so, pass-through the AND to get to the intrinsic.
13046     if (LHS.getOpcode() == ISD::AND &&
13047         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13048         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
13049           Intrinsic::ppc_is_decremented_ctr_nonzero &&
13050         isa<ConstantSDNode>(LHS.getOperand(1)) &&
13051         !isNullConstant(LHS.getOperand(1)))
13052       LHS = LHS.getOperand(0);
13053 
13054     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
13055         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
13056           Intrinsic::ppc_is_decremented_ctr_nonzero &&
13057         isa<ConstantSDNode>(RHS)) {
13058       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
13059              "Counter decrement comparison is not EQ or NE");
13060 
13061       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13062       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
13063                     (CC == ISD::SETNE && !Val);
13064 
13065       // We now need to make the intrinsic dead (it cannot be instruction
13066       // selected).
13067       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
13068       assert(LHS.getNode()->hasOneUse() &&
13069              "Counter decrement has more than one use");
13070 
13071       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
13072                          N->getOperand(0), N->getOperand(4));
13073     }
13074 
13075     int CompareOpc;
13076     bool isDot;
13077 
13078     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
13079         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
13080         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
13081       assert(isDot && "Can't compare against a vector result!");
13082 
13083       // If this is a comparison against something other than 0/1, then we know
13084       // that the condition is never/always true.
13085       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
13086       if (Val != 0 && Val != 1) {
13087         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
13088           return N->getOperand(0);
13089         // Always !=, turn it into an unconditional branch.
13090         return DAG.getNode(ISD::BR, dl, MVT::Other,
13091                            N->getOperand(0), N->getOperand(4));
13092       }
13093 
13094       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
13095 
13096       // Create the PPCISD altivec 'dot' comparison node.
13097       SDValue Ops[] = {
13098         LHS.getOperand(2),  // LHS of compare
13099         LHS.getOperand(3),  // RHS of compare
13100         DAG.getConstant(CompareOpc, dl, MVT::i32)
13101       };
13102       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
13103       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
13104 
13105       // Unpack the result based on how the target uses it.
13106       PPC::Predicate CompOpc;
13107       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
13108       default:  // Can't happen, don't crash on invalid number though.
13109       case 0:   // Branch on the value of the EQ bit of CR6.
13110         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
13111         break;
13112       case 1:   // Branch on the inverted value of the EQ bit of CR6.
13113         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
13114         break;
13115       case 2:   // Branch on the value of the LT bit of CR6.
13116         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
13117         break;
13118       case 3:   // Branch on the inverted value of the LT bit of CR6.
13119         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
13120         break;
13121       }
13122 
13123       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
13124                          DAG.getConstant(CompOpc, dl, MVT::i32),
13125                          DAG.getRegister(PPC::CR6, MVT::i32),
13126                          N->getOperand(4), CompNode.getValue(1));
13127     }
13128     break;
13129   }
13130   case ISD::BUILD_VECTOR:
13131     return DAGCombineBuildVector(N, DCI);
13132   }
13133 
13134   return SDValue();
13135 }
13136 
13137 SDValue
13138 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13139                                  SelectionDAG &DAG,
13140                                  SmallVectorImpl<SDNode *> &Created) const {
13141   // fold (sdiv X, pow2)
13142   EVT VT = N->getValueType(0);
13143   if (VT == MVT::i64 && !Subtarget.isPPC64())
13144     return SDValue();
13145   if ((VT != MVT::i32 && VT != MVT::i64) ||
13146       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
13147     return SDValue();
13148 
13149   SDLoc DL(N);
13150   SDValue N0 = N->getOperand(0);
13151 
13152   bool IsNegPow2 = (-Divisor).isPowerOf2();
13153   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
13154   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
13155 
13156   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
13157   Created.push_back(Op.getNode());
13158 
13159   if (IsNegPow2) {
13160     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
13161     Created.push_back(Op.getNode());
13162   }
13163 
13164   return Op;
13165 }
13166 
13167 //===----------------------------------------------------------------------===//
13168 // Inline Assembly Support
13169 //===----------------------------------------------------------------------===//
13170 
13171 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
13172                                                       KnownBits &Known,
13173                                                       const APInt &DemandedElts,
13174                                                       const SelectionDAG &DAG,
13175                                                       unsigned Depth) const {
13176   Known.resetAll();
13177   switch (Op.getOpcode()) {
13178   default: break;
13179   case PPCISD::LBRX: {
13180     // lhbrx is known to have the top bits cleared out.
13181     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
13182       Known.Zero = 0xFFFF0000;
13183     break;
13184   }
13185   case ISD::INTRINSIC_WO_CHAIN: {
13186     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
13187     default: break;
13188     case Intrinsic::ppc_altivec_vcmpbfp_p:
13189     case Intrinsic::ppc_altivec_vcmpeqfp_p:
13190     case Intrinsic::ppc_altivec_vcmpequb_p:
13191     case Intrinsic::ppc_altivec_vcmpequh_p:
13192     case Intrinsic::ppc_altivec_vcmpequw_p:
13193     case Intrinsic::ppc_altivec_vcmpequd_p:
13194     case Intrinsic::ppc_altivec_vcmpgefp_p:
13195     case Intrinsic::ppc_altivec_vcmpgtfp_p:
13196     case Intrinsic::ppc_altivec_vcmpgtsb_p:
13197     case Intrinsic::ppc_altivec_vcmpgtsh_p:
13198     case Intrinsic::ppc_altivec_vcmpgtsw_p:
13199     case Intrinsic::ppc_altivec_vcmpgtsd_p:
13200     case Intrinsic::ppc_altivec_vcmpgtub_p:
13201     case Intrinsic::ppc_altivec_vcmpgtuh_p:
13202     case Intrinsic::ppc_altivec_vcmpgtuw_p:
13203     case Intrinsic::ppc_altivec_vcmpgtud_p:
13204       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
13205       break;
13206     }
13207   }
13208   }
13209 }
13210 
13211 unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
13212   switch (Subtarget.getDarwinDirective()) {
13213   default: break;
13214   case PPC::DIR_970:
13215   case PPC::DIR_PWR4:
13216   case PPC::DIR_PWR5:
13217   case PPC::DIR_PWR5X:
13218   case PPC::DIR_PWR6:
13219   case PPC::DIR_PWR6X:
13220   case PPC::DIR_PWR7:
13221   case PPC::DIR_PWR8:
13222   case PPC::DIR_PWR9: {
13223     if (!ML)
13224       break;
13225 
13226     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
13227 
13228     // For small loops (between 5 and 8 instructions), align to a 32-byte
13229     // boundary so that the entire loop fits in one instruction-cache line.
13230     uint64_t LoopSize = 0;
13231     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
13232       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
13233         LoopSize += TII->getInstSizeInBytes(*J);
13234         if (LoopSize > 32)
13235           break;
13236       }
13237 
13238     if (LoopSize > 16 && LoopSize <= 32)
13239       return 5;
13240 
13241     break;
13242   }
13243   }
13244 
13245   return TargetLowering::getPrefLoopAlignment(ML);
13246 }
13247 
13248 /// getConstraintType - Given a constraint, return the type of
13249 /// constraint it is for this target.
13250 PPCTargetLowering::ConstraintType
13251 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
13252   if (Constraint.size() == 1) {
13253     switch (Constraint[0]) {
13254     default: break;
13255     case 'b':
13256     case 'r':
13257     case 'f':
13258     case 'd':
13259     case 'v':
13260     case 'y':
13261       return C_RegisterClass;
13262     case 'Z':
13263       // FIXME: While Z does indicate a memory constraint, it specifically
13264       // indicates an r+r address (used in conjunction with the 'y' modifier
13265       // in the replacement string). Currently, we're forcing the base
13266       // register to be r0 in the asm printer (which is interpreted as zero)
13267       // and forming the complete address in the second register. This is
13268       // suboptimal.
13269       return C_Memory;
13270     }
13271   } else if (Constraint == "wc") { // individual CR bits.
13272     return C_RegisterClass;
13273   } else if (Constraint == "wa" || Constraint == "wd" ||
13274              Constraint == "wf" || Constraint == "ws") {
13275     return C_RegisterClass; // VSX registers.
13276   }
13277   return TargetLowering::getConstraintType(Constraint);
13278 }
13279 
13280 /// Examine constraint type and operand type and determine a weight value.
13281 /// This object must already have been set up with the operand type
13282 /// and the current alternative constraint selected.
13283 TargetLowering::ConstraintWeight
13284 PPCTargetLowering::getSingleConstraintMatchWeight(
13285     AsmOperandInfo &info, const char *constraint) const {
13286   ConstraintWeight weight = CW_Invalid;
13287   Value *CallOperandVal = info.CallOperandVal;
13288     // If we don't have a value, we can't do a match,
13289     // but allow it at the lowest weight.
13290   if (!CallOperandVal)
13291     return CW_Default;
13292   Type *type = CallOperandVal->getType();
13293 
13294   // Look at the constraint type.
13295   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
13296     return CW_Register; // an individual CR bit.
13297   else if ((StringRef(constraint) == "wa" ||
13298             StringRef(constraint) == "wd" ||
13299             StringRef(constraint) == "wf") &&
13300            type->isVectorTy())
13301     return CW_Register;
13302   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
13303     return CW_Register;
13304 
13305   switch (*constraint) {
13306   default:
13307     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
13308     break;
13309   case 'b':
13310     if (type->isIntegerTy())
13311       weight = CW_Register;
13312     break;
13313   case 'f':
13314     if (type->isFloatTy())
13315       weight = CW_Register;
13316     break;
13317   case 'd':
13318     if (type->isDoubleTy())
13319       weight = CW_Register;
13320     break;
13321   case 'v':
13322     if (type->isVectorTy())
13323       weight = CW_Register;
13324     break;
13325   case 'y':
13326     weight = CW_Register;
13327     break;
13328   case 'Z':
13329     weight = CW_Memory;
13330     break;
13331   }
13332   return weight;
13333 }
13334 
13335 std::pair<unsigned, const TargetRegisterClass *>
13336 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
13337                                                 StringRef Constraint,
13338                                                 MVT VT) const {
13339   if (Constraint.size() == 1) {
13340     // GCC RS6000 Constraint Letters
13341     switch (Constraint[0]) {
13342     case 'b':   // R1-R31
13343       if (VT == MVT::i64 && Subtarget.isPPC64())
13344         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
13345       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
13346     case 'r':   // R0-R31
13347       if (VT == MVT::i64 && Subtarget.isPPC64())
13348         return std::make_pair(0U, &PPC::G8RCRegClass);
13349       return std::make_pair(0U, &PPC::GPRCRegClass);
13350     // 'd' and 'f' constraints are both defined to be "the floating point
13351     // registers", where one is for 32-bit and the other for 64-bit. We don't
13352     // really care overly much here so just give them all the same reg classes.
13353     case 'd':
13354     case 'f':
13355       if (Subtarget.hasSPE()) {
13356         if (VT == MVT::f32 || VT == MVT::i32)
13357           return std::make_pair(0U, &PPC::SPE4RCRegClass);
13358         if (VT == MVT::f64 || VT == MVT::i64)
13359           return std::make_pair(0U, &PPC::SPERCRegClass);
13360       } else {
13361         if (VT == MVT::f32 || VT == MVT::i32)
13362           return std::make_pair(0U, &PPC::F4RCRegClass);
13363         if (VT == MVT::f64 || VT == MVT::i64)
13364           return std::make_pair(0U, &PPC::F8RCRegClass);
13365         if (VT == MVT::v4f64 && Subtarget.hasQPX())
13366           return std::make_pair(0U, &PPC::QFRCRegClass);
13367         if (VT == MVT::v4f32 && Subtarget.hasQPX())
13368           return std::make_pair(0U, &PPC::QSRCRegClass);
13369       }
13370       break;
13371     case 'v':
13372       if (VT == MVT::v4f64 && Subtarget.hasQPX())
13373         return std::make_pair(0U, &PPC::QFRCRegClass);
13374       if (VT == MVT::v4f32 && Subtarget.hasQPX())
13375         return std::make_pair(0U, &PPC::QSRCRegClass);
13376       if (Subtarget.hasAltivec())
13377         return std::make_pair(0U, &PPC::VRRCRegClass);
13378       break;
13379     case 'y':   // crrc
13380       return std::make_pair(0U, &PPC::CRRCRegClass);
13381     }
13382   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
13383     // An individual CR bit.
13384     return std::make_pair(0U, &PPC::CRBITRCRegClass);
13385   } else if ((Constraint == "wa" || Constraint == "wd" ||
13386              Constraint == "wf") && Subtarget.hasVSX()) {
13387     return std::make_pair(0U, &PPC::VSRCRegClass);
13388   } else if (Constraint == "ws" && Subtarget.hasVSX()) {
13389     if (VT == MVT::f32 && Subtarget.hasP8Vector())
13390       return std::make_pair(0U, &PPC::VSSRCRegClass);
13391     else
13392       return std::make_pair(0U, &PPC::VSFRCRegClass);
13393   }
13394 
13395   std::pair<unsigned, const TargetRegisterClass *> R =
13396       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
13397 
13398   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
13399   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
13400   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
13401   // register.
13402   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
13403   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
13404   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
13405       PPC::GPRCRegClass.contains(R.first))
13406     return std::make_pair(TRI->getMatchingSuperReg(R.first,
13407                             PPC::sub_32, &PPC::G8RCRegClass),
13408                           &PPC::G8RCRegClass);
13409 
13410   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
13411   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
13412     R.first = PPC::CR0;
13413     R.second = &PPC::CRRCRegClass;
13414   }
13415 
13416   return R;
13417 }
13418 
13419 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
13420 /// vector.  If it is invalid, don't add anything to Ops.
13421 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
13422                                                      std::string &Constraint,
13423                                                      std::vector<SDValue>&Ops,
13424                                                      SelectionDAG &DAG) const {
13425   SDValue Result;
13426 
13427   // Only support length 1 constraints.
13428   if (Constraint.length() > 1) return;
13429 
13430   char Letter = Constraint[0];
13431   switch (Letter) {
13432   default: break;
13433   case 'I':
13434   case 'J':
13435   case 'K':
13436   case 'L':
13437   case 'M':
13438   case 'N':
13439   case 'O':
13440   case 'P': {
13441     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
13442     if (!CST) return; // Must be an immediate to match.
13443     SDLoc dl(Op);
13444     int64_t Value = CST->getSExtValue();
13445     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
13446                          // numbers are printed as such.
13447     switch (Letter) {
13448     default: llvm_unreachable("Unknown constraint letter!");
13449     case 'I':  // "I" is a signed 16-bit constant.
13450       if (isInt<16>(Value))
13451         Result = DAG.getTargetConstant(Value, dl, TCVT);
13452       break;
13453     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
13454       if (isShiftedUInt<16, 16>(Value))
13455         Result = DAG.getTargetConstant(Value, dl, TCVT);
13456       break;
13457     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
13458       if (isShiftedInt<16, 16>(Value))
13459         Result = DAG.getTargetConstant(Value, dl, TCVT);
13460       break;
13461     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
13462       if (isUInt<16>(Value))
13463         Result = DAG.getTargetConstant(Value, dl, TCVT);
13464       break;
13465     case 'M':  // "M" is a constant that is greater than 31.
13466       if (Value > 31)
13467         Result = DAG.getTargetConstant(Value, dl, TCVT);
13468       break;
13469     case 'N':  // "N" is a positive constant that is an exact power of two.
13470       if (Value > 0 && isPowerOf2_64(Value))
13471         Result = DAG.getTargetConstant(Value, dl, TCVT);
13472       break;
13473     case 'O':  // "O" is the constant zero.
13474       if (Value == 0)
13475         Result = DAG.getTargetConstant(Value, dl, TCVT);
13476       break;
13477     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
13478       if (isInt<16>(-Value))
13479         Result = DAG.getTargetConstant(Value, dl, TCVT);
13480       break;
13481     }
13482     break;
13483   }
13484   }
13485 
13486   if (Result.getNode()) {
13487     Ops.push_back(Result);
13488     return;
13489   }
13490 
13491   // Handle standard constraint letters.
13492   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
13493 }
13494 
13495 // isLegalAddressingMode - Return true if the addressing mode represented
13496 // by AM is legal for this target, for a load/store of the specified type.
13497 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
13498                                               const AddrMode &AM, Type *Ty,
13499                                               unsigned AS, Instruction *I) const {
13500   // PPC does not allow r+i addressing modes for vectors!
13501   if (Ty->isVectorTy() && AM.BaseOffs != 0)
13502     return false;
13503 
13504   // PPC allows a sign-extended 16-bit immediate field.
13505   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
13506     return false;
13507 
13508   // No global is ever allowed as a base.
13509   if (AM.BaseGV)
13510     return false;
13511 
13512   // PPC only support r+r,
13513   switch (AM.Scale) {
13514   case 0:  // "r+i" or just "i", depending on HasBaseReg.
13515     break;
13516   case 1:
13517     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
13518       return false;
13519     // Otherwise we have r+r or r+i.
13520     break;
13521   case 2:
13522     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
13523       return false;
13524     // Allow 2*r as r+r.
13525     break;
13526   default:
13527     // No other scales are supported.
13528     return false;
13529   }
13530 
13531   return true;
13532 }
13533 
13534 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
13535                                            SelectionDAG &DAG) const {
13536   MachineFunction &MF = DAG.getMachineFunction();
13537   MachineFrameInfo &MFI = MF.getFrameInfo();
13538   MFI.setReturnAddressIsTaken(true);
13539 
13540   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
13541     return SDValue();
13542 
13543   SDLoc dl(Op);
13544   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13545 
13546   // Make sure the function does not optimize away the store of the RA to
13547   // the stack.
13548   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
13549   FuncInfo->setLRStoreRequired();
13550   bool isPPC64 = Subtarget.isPPC64();
13551   auto PtrVT = getPointerTy(MF.getDataLayout());
13552 
13553   if (Depth > 0) {
13554     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
13555     SDValue Offset =
13556         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
13557                         isPPC64 ? MVT::i64 : MVT::i32);
13558     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
13559                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
13560                        MachinePointerInfo());
13561   }
13562 
13563   // Just load the return address off the stack.
13564   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
13565   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
13566                      MachinePointerInfo());
13567 }
13568 
13569 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
13570                                           SelectionDAG &DAG) const {
13571   SDLoc dl(Op);
13572   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13573 
13574   MachineFunction &MF = DAG.getMachineFunction();
13575   MachineFrameInfo &MFI = MF.getFrameInfo();
13576   MFI.setFrameAddressIsTaken(true);
13577 
13578   EVT PtrVT = getPointerTy(MF.getDataLayout());
13579   bool isPPC64 = PtrVT == MVT::i64;
13580 
13581   // Naked functions never have a frame pointer, and so we use r1. For all
13582   // other functions, this decision must be delayed until during PEI.
13583   unsigned FrameReg;
13584   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
13585     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
13586   else
13587     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
13588 
13589   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
13590                                          PtrVT);
13591   while (Depth--)
13592     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
13593                             FrameAddr, MachinePointerInfo());
13594   return FrameAddr;
13595 }
13596 
13597 // FIXME? Maybe this could be a TableGen attribute on some registers and
13598 // this table could be generated automatically from RegInfo.
13599 unsigned PPCTargetLowering::getRegisterByName(const char* RegName, EVT VT,
13600                                               SelectionDAG &DAG) const {
13601   bool isPPC64 = Subtarget.isPPC64();
13602   bool isDarwinABI = Subtarget.isDarwinABI();
13603 
13604   if ((isPPC64 && VT != MVT::i64 && VT != MVT::i32) ||
13605       (!isPPC64 && VT != MVT::i32))
13606     report_fatal_error("Invalid register global variable type");
13607 
13608   bool is64Bit = isPPC64 && VT == MVT::i64;
13609   unsigned Reg = StringSwitch<unsigned>(RegName)
13610                    .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
13611                    .Case("r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
13612                    .Case("r13", (!isPPC64 && isDarwinABI) ? 0 :
13613                                   (is64Bit ? PPC::X13 : PPC::R13))
13614                    .Default(0);
13615 
13616   if (Reg)
13617     return Reg;
13618   report_fatal_error("Invalid register name global variable");
13619 }
13620 
13621 bool
13622 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
13623   // The PowerPC target isn't yet aware of offsets.
13624   return false;
13625 }
13626 
13627 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
13628                                            const CallInst &I,
13629                                            MachineFunction &MF,
13630                                            unsigned Intrinsic) const {
13631   switch (Intrinsic) {
13632   case Intrinsic::ppc_qpx_qvlfd:
13633   case Intrinsic::ppc_qpx_qvlfs:
13634   case Intrinsic::ppc_qpx_qvlfcd:
13635   case Intrinsic::ppc_qpx_qvlfcs:
13636   case Intrinsic::ppc_qpx_qvlfiwa:
13637   case Intrinsic::ppc_qpx_qvlfiwz:
13638   case Intrinsic::ppc_altivec_lvx:
13639   case Intrinsic::ppc_altivec_lvxl:
13640   case Intrinsic::ppc_altivec_lvebx:
13641   case Intrinsic::ppc_altivec_lvehx:
13642   case Intrinsic::ppc_altivec_lvewx:
13643   case Intrinsic::ppc_vsx_lxvd2x:
13644   case Intrinsic::ppc_vsx_lxvw4x: {
13645     EVT VT;
13646     switch (Intrinsic) {
13647     case Intrinsic::ppc_altivec_lvebx:
13648       VT = MVT::i8;
13649       break;
13650     case Intrinsic::ppc_altivec_lvehx:
13651       VT = MVT::i16;
13652       break;
13653     case Intrinsic::ppc_altivec_lvewx:
13654       VT = MVT::i32;
13655       break;
13656     case Intrinsic::ppc_vsx_lxvd2x:
13657       VT = MVT::v2f64;
13658       break;
13659     case Intrinsic::ppc_qpx_qvlfd:
13660       VT = MVT::v4f64;
13661       break;
13662     case Intrinsic::ppc_qpx_qvlfs:
13663       VT = MVT::v4f32;
13664       break;
13665     case Intrinsic::ppc_qpx_qvlfcd:
13666       VT = MVT::v2f64;
13667       break;
13668     case Intrinsic::ppc_qpx_qvlfcs:
13669       VT = MVT::v2f32;
13670       break;
13671     default:
13672       VT = MVT::v4i32;
13673       break;
13674     }
13675 
13676     Info.opc = ISD::INTRINSIC_W_CHAIN;
13677     Info.memVT = VT;
13678     Info.ptrVal = I.getArgOperand(0);
13679     Info.offset = -VT.getStoreSize()+1;
13680     Info.size = 2*VT.getStoreSize()-1;
13681     Info.align = 1;
13682     Info.flags = MachineMemOperand::MOLoad;
13683     return true;
13684   }
13685   case Intrinsic::ppc_qpx_qvlfda:
13686   case Intrinsic::ppc_qpx_qvlfsa:
13687   case Intrinsic::ppc_qpx_qvlfcda:
13688   case Intrinsic::ppc_qpx_qvlfcsa:
13689   case Intrinsic::ppc_qpx_qvlfiwaa:
13690   case Intrinsic::ppc_qpx_qvlfiwza: {
13691     EVT VT;
13692     switch (Intrinsic) {
13693     case Intrinsic::ppc_qpx_qvlfda:
13694       VT = MVT::v4f64;
13695       break;
13696     case Intrinsic::ppc_qpx_qvlfsa:
13697       VT = MVT::v4f32;
13698       break;
13699     case Intrinsic::ppc_qpx_qvlfcda:
13700       VT = MVT::v2f64;
13701       break;
13702     case Intrinsic::ppc_qpx_qvlfcsa:
13703       VT = MVT::v2f32;
13704       break;
13705     default:
13706       VT = MVT::v4i32;
13707       break;
13708     }
13709 
13710     Info.opc = ISD::INTRINSIC_W_CHAIN;
13711     Info.memVT = VT;
13712     Info.ptrVal = I.getArgOperand(0);
13713     Info.offset = 0;
13714     Info.size = VT.getStoreSize();
13715     Info.align = 1;
13716     Info.flags = MachineMemOperand::MOLoad;
13717     return true;
13718   }
13719   case Intrinsic::ppc_qpx_qvstfd:
13720   case Intrinsic::ppc_qpx_qvstfs:
13721   case Intrinsic::ppc_qpx_qvstfcd:
13722   case Intrinsic::ppc_qpx_qvstfcs:
13723   case Intrinsic::ppc_qpx_qvstfiw:
13724   case Intrinsic::ppc_altivec_stvx:
13725   case Intrinsic::ppc_altivec_stvxl:
13726   case Intrinsic::ppc_altivec_stvebx:
13727   case Intrinsic::ppc_altivec_stvehx:
13728   case Intrinsic::ppc_altivec_stvewx:
13729   case Intrinsic::ppc_vsx_stxvd2x:
13730   case Intrinsic::ppc_vsx_stxvw4x: {
13731     EVT VT;
13732     switch (Intrinsic) {
13733     case Intrinsic::ppc_altivec_stvebx:
13734       VT = MVT::i8;
13735       break;
13736     case Intrinsic::ppc_altivec_stvehx:
13737       VT = MVT::i16;
13738       break;
13739     case Intrinsic::ppc_altivec_stvewx:
13740       VT = MVT::i32;
13741       break;
13742     case Intrinsic::ppc_vsx_stxvd2x:
13743       VT = MVT::v2f64;
13744       break;
13745     case Intrinsic::ppc_qpx_qvstfd:
13746       VT = MVT::v4f64;
13747       break;
13748     case Intrinsic::ppc_qpx_qvstfs:
13749       VT = MVT::v4f32;
13750       break;
13751     case Intrinsic::ppc_qpx_qvstfcd:
13752       VT = MVT::v2f64;
13753       break;
13754     case Intrinsic::ppc_qpx_qvstfcs:
13755       VT = MVT::v2f32;
13756       break;
13757     default:
13758       VT = MVT::v4i32;
13759       break;
13760     }
13761 
13762     Info.opc = ISD::INTRINSIC_VOID;
13763     Info.memVT = VT;
13764     Info.ptrVal = I.getArgOperand(1);
13765     Info.offset = -VT.getStoreSize()+1;
13766     Info.size = 2*VT.getStoreSize()-1;
13767     Info.align = 1;
13768     Info.flags = MachineMemOperand::MOStore;
13769     return true;
13770   }
13771   case Intrinsic::ppc_qpx_qvstfda:
13772   case Intrinsic::ppc_qpx_qvstfsa:
13773   case Intrinsic::ppc_qpx_qvstfcda:
13774   case Intrinsic::ppc_qpx_qvstfcsa:
13775   case Intrinsic::ppc_qpx_qvstfiwa: {
13776     EVT VT;
13777     switch (Intrinsic) {
13778     case Intrinsic::ppc_qpx_qvstfda:
13779       VT = MVT::v4f64;
13780       break;
13781     case Intrinsic::ppc_qpx_qvstfsa:
13782       VT = MVT::v4f32;
13783       break;
13784     case Intrinsic::ppc_qpx_qvstfcda:
13785       VT = MVT::v2f64;
13786       break;
13787     case Intrinsic::ppc_qpx_qvstfcsa:
13788       VT = MVT::v2f32;
13789       break;
13790     default:
13791       VT = MVT::v4i32;
13792       break;
13793     }
13794 
13795     Info.opc = ISD::INTRINSIC_VOID;
13796     Info.memVT = VT;
13797     Info.ptrVal = I.getArgOperand(1);
13798     Info.offset = 0;
13799     Info.size = VT.getStoreSize();
13800     Info.align = 1;
13801     Info.flags = MachineMemOperand::MOStore;
13802     return true;
13803   }
13804   default:
13805     break;
13806   }
13807 
13808   return false;
13809 }
13810 
13811 /// getOptimalMemOpType - Returns the target specific optimal type for load
13812 /// and store operations as a result of memset, memcpy, and memmove
13813 /// lowering. If DstAlign is zero that means it's safe to destination
13814 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
13815 /// means there isn't a need to check it against alignment requirement,
13816 /// probably because the source does not need to be loaded. If 'IsMemset' is
13817 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
13818 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
13819 /// source is constant so it does not need to be loaded.
13820 /// It returns EVT::Other if the type should be determined using generic
13821 /// target-independent logic.
13822 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
13823                                            unsigned DstAlign, unsigned SrcAlign,
13824                                            bool IsMemset, bool ZeroMemset,
13825                                            bool MemcpyStrSrc,
13826                                            MachineFunction &MF) const {
13827   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
13828     const Function &F = MF.getFunction();
13829     // When expanding a memset, require at least two QPX instructions to cover
13830     // the cost of loading the value to be stored from the constant pool.
13831     if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
13832        (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
13833         !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
13834       return MVT::v4f64;
13835     }
13836 
13837     // We should use Altivec/VSX loads and stores when available. For unaligned
13838     // addresses, unaligned VSX loads are only fast starting with the P8.
13839     if (Subtarget.hasAltivec() && Size >= 16 &&
13840         (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
13841          ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
13842       return MVT::v4i32;
13843   }
13844 
13845   if (Subtarget.isPPC64()) {
13846     return MVT::i64;
13847   }
13848 
13849   return MVT::i32;
13850 }
13851 
13852 /// Returns true if it is beneficial to convert a load of a constant
13853 /// to just the constant itself.
13854 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13855                                                           Type *Ty) const {
13856   assert(Ty->isIntegerTy());
13857 
13858   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13859   return !(BitSize == 0 || BitSize > 64);
13860 }
13861 
13862 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
13863   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
13864     return false;
13865   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
13866   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
13867   return NumBits1 == 64 && NumBits2 == 32;
13868 }
13869 
13870 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
13871   if (!VT1.isInteger() || !VT2.isInteger())
13872     return false;
13873   unsigned NumBits1 = VT1.getSizeInBits();
13874   unsigned NumBits2 = VT2.getSizeInBits();
13875   return NumBits1 == 64 && NumBits2 == 32;
13876 }
13877 
13878 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
13879   // Generally speaking, zexts are not free, but they are free when they can be
13880   // folded with other operations.
13881   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
13882     EVT MemVT = LD->getMemoryVT();
13883     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
13884          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
13885         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
13886          LD->getExtensionType() == ISD::ZEXTLOAD))
13887       return true;
13888   }
13889 
13890   // FIXME: Add other cases...
13891   //  - 32-bit shifts with a zext to i64
13892   //  - zext after ctlz, bswap, etc.
13893   //  - zext after and by a constant mask
13894 
13895   return TargetLowering::isZExtFree(Val, VT2);
13896 }
13897 
13898 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
13899   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
13900          "invalid fpext types");
13901   // Extending to float128 is not free.
13902   if (DestVT == MVT::f128)
13903     return false;
13904   return true;
13905 }
13906 
13907 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
13908   return isInt<16>(Imm) || isUInt<16>(Imm);
13909 }
13910 
13911 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
13912   return isInt<16>(Imm) || isUInt<16>(Imm);
13913 }
13914 
13915 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
13916                                                        unsigned,
13917                                                        unsigned,
13918                                                        bool *Fast) const {
13919   if (DisablePPCUnaligned)
13920     return false;
13921 
13922   // PowerPC supports unaligned memory access for simple non-vector types.
13923   // Although accessing unaligned addresses is not as efficient as accessing
13924   // aligned addresses, it is generally more efficient than manual expansion,
13925   // and generally only traps for software emulation when crossing page
13926   // boundaries.
13927 
13928   if (!VT.isSimple())
13929     return false;
13930 
13931   if (VT.getSimpleVT().isVector()) {
13932     if (Subtarget.hasVSX()) {
13933       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
13934           VT != MVT::v4f32 && VT != MVT::v4i32)
13935         return false;
13936     } else {
13937       return false;
13938     }
13939   }
13940 
13941   if (VT == MVT::ppcf128)
13942     return false;
13943 
13944   if (Fast)
13945     *Fast = true;
13946 
13947   return true;
13948 }
13949 
13950 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
13951   VT = VT.getScalarType();
13952 
13953   if (!VT.isSimple())
13954     return false;
13955 
13956   switch (VT.getSimpleVT().SimpleTy) {
13957   case MVT::f32:
13958   case MVT::f64:
13959     return true;
13960   case MVT::f128:
13961     return (EnableQuadPrecision && Subtarget.hasP9Vector());
13962   default:
13963     break;
13964   }
13965 
13966   return false;
13967 }
13968 
13969 const MCPhysReg *
13970 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
13971   // LR is a callee-save register, but we must treat it as clobbered by any call
13972   // site. Hence we include LR in the scratch registers, which are in turn added
13973   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
13974   // to CTR, which is used by any indirect call.
13975   static const MCPhysReg ScratchRegs[] = {
13976     PPC::X12, PPC::LR8, PPC::CTR8, 0
13977   };
13978 
13979   return ScratchRegs;
13980 }
13981 
13982 unsigned PPCTargetLowering::getExceptionPointerRegister(
13983     const Constant *PersonalityFn) const {
13984   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
13985 }
13986 
13987 unsigned PPCTargetLowering::getExceptionSelectorRegister(
13988     const Constant *PersonalityFn) const {
13989   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
13990 }
13991 
13992 bool
13993 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
13994                      EVT VT , unsigned DefinedValues) const {
13995   if (VT == MVT::v2i64)
13996     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
13997 
13998   if (Subtarget.hasVSX() || Subtarget.hasQPX())
13999     return true;
14000 
14001   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
14002 }
14003 
14004 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
14005   if (DisableILPPref || Subtarget.enableMachineScheduler())
14006     return TargetLowering::getSchedulingPreference(N);
14007 
14008   return Sched::ILP;
14009 }
14010 
14011 // Create a fast isel object.
14012 FastISel *
14013 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
14014                                   const TargetLibraryInfo *LibInfo) const {
14015   return PPC::createFastISel(FuncInfo, LibInfo);
14016 }
14017 
14018 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
14019   if (Subtarget.isDarwinABI()) return;
14020   if (!Subtarget.isPPC64()) return;
14021 
14022   // Update IsSplitCSR in PPCFunctionInfo
14023   PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
14024   PFI->setIsSplitCSR(true);
14025 }
14026 
14027 void PPCTargetLowering::insertCopiesSplitCSR(
14028   MachineBasicBlock *Entry,
14029   const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
14030   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
14031   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
14032   if (!IStart)
14033     return;
14034 
14035   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
14036   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
14037   MachineBasicBlock::iterator MBBI = Entry->begin();
14038   for (const MCPhysReg *I = IStart; *I; ++I) {
14039     const TargetRegisterClass *RC = nullptr;
14040     if (PPC::G8RCRegClass.contains(*I))
14041       RC = &PPC::G8RCRegClass;
14042     else if (PPC::F8RCRegClass.contains(*I))
14043       RC = &PPC::F8RCRegClass;
14044     else if (PPC::CRRCRegClass.contains(*I))
14045       RC = &PPC::CRRCRegClass;
14046     else if (PPC::VRRCRegClass.contains(*I))
14047       RC = &PPC::VRRCRegClass;
14048     else
14049       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
14050 
14051     unsigned NewVR = MRI->createVirtualRegister(RC);
14052     // Create copy from CSR to a virtual register.
14053     // FIXME: this currently does not emit CFI pseudo-instructions, it works
14054     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
14055     // nounwind. If we want to generalize this later, we may need to emit
14056     // CFI pseudo-instructions.
14057     assert(Entry->getParent()->getFunction().hasFnAttribute(
14058              Attribute::NoUnwind) &&
14059            "Function should be nounwind in insertCopiesSplitCSR!");
14060     Entry->addLiveIn(*I);
14061     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
14062       .addReg(*I);
14063 
14064     // Insert the copy-back instructions right before the terminator
14065     for (auto *Exit : Exits)
14066       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
14067               TII->get(TargetOpcode::COPY), *I)
14068         .addReg(NewVR);
14069   }
14070 }
14071 
14072 // Override to enable LOAD_STACK_GUARD lowering on Linux.
14073 bool PPCTargetLowering::useLoadStackGuardNode() const {
14074   if (!Subtarget.isTargetLinux())
14075     return TargetLowering::useLoadStackGuardNode();
14076   return true;
14077 }
14078 
14079 // Override to disable global variable loading on Linux.
14080 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
14081   if (!Subtarget.isTargetLinux())
14082     return TargetLowering::insertSSPDeclarations(M);
14083 }
14084 
14085 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
14086   if (!VT.isSimple() || !Subtarget.hasVSX())
14087     return false;
14088 
14089   switch(VT.getSimpleVT().SimpleTy) {
14090   default:
14091     // For FP types that are currently not supported by PPC backend, return
14092     // false. Examples: f16, f80.
14093     return false;
14094   case MVT::f32:
14095   case MVT::f64:
14096   case MVT::ppcf128:
14097     return Imm.isPosZero();
14098   }
14099 }
14100 
14101 // For vector shift operation op, fold
14102 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
14103 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
14104                                   SelectionDAG &DAG) {
14105   SDValue N0 = N->getOperand(0);
14106   SDValue N1 = N->getOperand(1);
14107   EVT VT = N0.getValueType();
14108   unsigned OpSizeInBits = VT.getScalarSizeInBits();
14109   unsigned Opcode = N->getOpcode();
14110   unsigned TargetOpcode;
14111 
14112   switch (Opcode) {
14113   default:
14114     llvm_unreachable("Unexpected shift operation");
14115   case ISD::SHL:
14116     TargetOpcode = PPCISD::SHL;
14117     break;
14118   case ISD::SRL:
14119     TargetOpcode = PPCISD::SRL;
14120     break;
14121   case ISD::SRA:
14122     TargetOpcode = PPCISD::SRA;
14123     break;
14124   }
14125 
14126   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
14127       N1->getOpcode() == ISD::AND)
14128     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
14129       if (Mask->getZExtValue() == OpSizeInBits - 1)
14130         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
14131 
14132   return SDValue();
14133 }
14134 
14135 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
14136   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14137     return Value;
14138 
14139   SDValue N0 = N->getOperand(0);
14140   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
14141   if (!Subtarget.isISA3_0() ||
14142       N0.getOpcode() != ISD::SIGN_EXTEND ||
14143       N0.getOperand(0).getValueType() != MVT::i32 ||
14144       CN1 == nullptr || N->getValueType(0) != MVT::i64)
14145     return SDValue();
14146 
14147   // We can't save an operation here if the value is already extended, and
14148   // the existing shift is easier to combine.
14149   SDValue ExtsSrc = N0.getOperand(0);
14150   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
14151       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
14152     return SDValue();
14153 
14154   SDLoc DL(N0);
14155   SDValue ShiftBy = SDValue(CN1, 0);
14156   // We want the shift amount to be i32 on the extswli, but the shift could
14157   // have an i64.
14158   if (ShiftBy.getValueType() == MVT::i64)
14159     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
14160 
14161   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
14162                          ShiftBy);
14163 }
14164 
14165 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
14166   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14167     return Value;
14168 
14169   return SDValue();
14170 }
14171 
14172 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
14173   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
14174     return Value;
14175 
14176   return SDValue();
14177 }
14178 
14179 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
14180   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
14181   if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64())
14182     return false;
14183 
14184   // If not a tail call then no need to proceed.
14185   if (!CI->isTailCall())
14186     return false;
14187 
14188   // If tail calls are disabled for the caller then we are done.
14189   const Function *Caller = CI->getParent()->getParent();
14190   auto Attr = Caller->getFnAttribute("disable-tail-calls");
14191   if (Attr.getValueAsString() == "true")
14192     return false;
14193 
14194   // If sibling calls have been disabled and tail-calls aren't guaranteed
14195   // there is no reason to duplicate.
14196   auto &TM = getTargetMachine();
14197   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
14198     return false;
14199 
14200   // Can't tail call a function called indirectly, or if it has variadic args.
14201   const Function *Callee = CI->getCalledFunction();
14202   if (!Callee || Callee->isVarArg())
14203     return false;
14204 
14205   // Make sure the callee and caller calling conventions are eligible for tco.
14206   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
14207                                            CI->getCallingConv()))
14208       return false;
14209 
14210   // If the function is local then we have a good chance at tail-calling it
14211   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
14212 }
14213 
14214 bool PPCTargetLowering::
14215 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
14216   const Value *Mask = AndI.getOperand(1);
14217   // If the mask is suitable for andi. or andis. we should sink the and.
14218   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
14219     // Can't handle constants wider than 64-bits.
14220     if (CI->getBitWidth() > 64)
14221       return false;
14222     int64_t ConstVal = CI->getZExtValue();
14223     return isUInt<16>(ConstVal) ||
14224       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
14225   }
14226 
14227   // For non-constant masks, we can always use the record-form and.
14228   return true;
14229 }
14230